aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--CREDITS1
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst28
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/bpf/bpf_licensing.rst92
-rw-r--r--Documentation/bpf/index.rst9
-rw-r--r--Documentation/core-api/irq/irq-domain.rst5
-rw-r--r--Documentation/devicetree/bindings/arm/tegra.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml2
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml46
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/asix,ax88796c.yaml73
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.yaml12
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml43
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca8k.txt215
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca8k.yaml362
-rw-r--r--Documentation/devicetree/bindings/net/dsa/realtek-smi.txt87
-rw-r--r--Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ether.yaml17
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml2
-rw-r--r--Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml89
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/filesystems/ntfs3.rst141
-rw-r--r--Documentation/gpu/amdgpu.rst4
-rw-r--r--Documentation/gpu/drm-internals.rst9
-rw-r--r--Documentation/hwmon/k10temp.rst17
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ice.rst2
-rw-r--r--Documentation/networking/devlink/devlink-region.rst4
-rw-r--r--Documentation/networking/devlink/ice.rst13
-rw-r--r--Documentation/networking/devlink/iosm.rst32
-rw-r--r--Documentation/networking/dsa/sja1105.rst2
-rw-r--r--Documentation/networking/ethtool-netlink.rst81
-rw-r--r--Documentation/networking/ip-sysctl.rst8
-rw-r--r--Documentation/networking/ipvs-sysctl.rst11
-rw-r--r--Documentation/networking/mctp.rst69
-rw-r--r--Documentation/userspace-api/vduse.rst2
-rw-r--r--MAINTAINERS99
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/include/asm/asm-prototypes.h1
-rw-r--r--arch/alpha/include/asm/jensen.h8
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/alpha/kernel/sys_jensen.c10
-rw-r--r--arch/alpha/lib/Makefile1
-rw-r--r--arch/alpha/lib/udiv-qrnnd.S (renamed from arch/alpha/math-emu/qrnnd.S)2
-rw-r--r--arch/alpha/math-emu/Makefile2
-rw-r--r--arch/alpha/math-emu/math.c2
-rw-r--r--arch/arc/include/asm/pgtable.h5
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1_ek.dts1
-rw-r--r--arch/arm/boot/dts/at91-sama7g5ek.dts45
-rw-r--r--arch/arm/boot/dts/bcm2711-rpi-4-b.dts11
-rw-r--r--arch/arm/boot/dts/bcm2711.dtsi12
-rw-r--r--arch/arm/boot/dts/bcm2835-common.dtsi8
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi8
-rw-r--r--arch/arm/boot/dts/imx53-m53menlo.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6qdl-pico.dtsi11
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts4
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3430-sdp.dts2
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi15
-rw-r--r--arch/arm/boot/dts/sama7g5.dtsi39
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi2
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi67
-rw-r--r--arch/arm/boot/dts/vexpress-v2m.dtsi65
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts57
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts57
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts57
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca9.dts58
-rw-r--r--arch/arm/common/sharpsl_param.c4
-rw-r--r--arch/arm/configs/gemini_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig4
-rw-r--r--arch/arm/configs/oxnas_v6_defconfig1
-rw-r--r--arch/arm/configs/shmobile_defconfig1
-rw-r--r--arch/arm/kernel/signal.c1
-rw-r--r--arch/arm/mach-at91/pm.c130
-rw-r--r--arch/arm/mach-at91/pm_suspend.S42
-rw-r--r--arch/arm/mach-dove/include/mach/uncompress.h4
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c3
-rw-r--r--arch/arm/mach-imx/pm-imx6.c2
-rw-r--r--arch/arm/mach-imx/src.c40
-rw-r--r--arch/arm/mach-omap1/include/mach/memory.h12
-rw-r--r--arch/arm/mach-omap1/usb.c116
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/net/bpf_jit_32.c19
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/fvp-base-revc.dts23
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi12
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi21
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts11
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi20
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts57
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/qrb5165-rb5.dts10
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi9
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sdm630.dtsi15
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi21
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts34
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/acpi.h3
-rw-r--r--arch/arm64/include/asm/assembler.h5
-rw-r--r--arch/arm64/include/asm/mte.h6
-rw-r--r--arch/arm64/include/asm/string.h2
-rw-r--r--arch/arm64/kernel/acpi.c19
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/mte.c10
-rw-r--r--arch/arm64/kernel/process.c3
-rw-r--r--arch/arm64/kernel/signal.c4
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/gfp.h1
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c13
-rw-r--r--arch/arm64/kvm/hyp/nvhe/page_alloc.c15
-rw-r--r--arch/arm64/kvm/mmu.c6
-rw-r--r--arch/arm64/kvm/perf.c3
-rw-r--r--arch/arm64/kvm/pmu-emul.c9
-rw-r--r--arch/arm64/lib/strcmp.S2
-rw-r--r--arch/arm64/lib/strncmp.S2
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/csky/Kconfig3
-rw-r--r--arch/csky/include/asm/bitops.h1
-rw-r--r--arch/csky/kernel/ptrace.c3
-rw-r--r--arch/csky/kernel/signal.c8
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/m68k/68000/entry.S4
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/coldfire/entry.S4
-rw-r--r--arch/m68k/emu/nfeth.c2
-rw-r--r--arch/m68k/include/asm/processor.h31
-rw-r--r--arch/m68k/include/asm/segment.h59
-rw-r--r--arch/m68k/include/asm/thread_info.h3
-rw-r--r--arch/m68k/include/asm/tlbflush.h11
-rw-r--r--arch/m68k/include/asm/traps.h4
-rw-r--r--arch/m68k/include/asm/uaccess.h215
-rw-r--r--arch/m68k/kernel/asm-offsets.c2
-rw-r--r--arch/m68k/kernel/entry.S58
-rw-r--r--arch/m68k/kernel/process.c4
-rw-r--r--arch/m68k/kernel/signal.c199
-rw-r--r--arch/m68k/kernel/traps.c13
-rw-r--r--arch/m68k/mac/misc.c1
-rw-r--r--arch/m68k/mm/cache.c25
-rw-r--r--arch/m68k/mm/init.c6
-rw-r--r--arch/m68k/mm/kmap.c1
-rw-r--r--arch/m68k/mm/memory.c1
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/m68k/sun3/config.c3
-rw-r--r--arch/m68k/sun3/mmu_emu.c6
-rw-r--r--arch/m68k/sun3/sun3ints.c1
-rw-r--r--arch/m68k/sun3x/prom.c1
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/mips-cps.h23
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/kernel/signal.c4
-rw-r--r--arch/mips/net/bpf_jit.c57
-rw-r--r--arch/nios2/Kconfig.debug3
-rw-r--r--arch/nios2/include/asm/irqflags.h4
-rw-r--r--arch/nios2/include/asm/registers.h2
-rw-r--r--arch/nios2/kernel/setup.c2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/lib/iomap.c4
-rw-r--r--arch/powerpc/boot/dts/fsl/t1023rdb.dts2
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h8
-rw-r--r--arch/powerpc/include/asm/code-patching.h1
-rw-r--r--arch/powerpc/include/asm/interrupt.h18
-rw-r--r--arch/powerpc/include/asm/security_features.h5
-rw-r--r--arch/powerpc/kernel/dma-iommu.c9
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S25
-rw-r--r--arch/powerpc/kernel/idle_book3s.S10
-rw-r--r--arch/powerpc/kernel/interrupt.c43
-rw-r--r--arch/powerpc/kernel/interrupt_64.S41
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/mce.c17
-rw-r--r--arch/powerpc/kernel/security.c5
-rw-r--r--arch/powerpc/kernel/signal.c4
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/traps.c43
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S64
-rw-r--r--arch/powerpc/lib/code-patching.c7
-rw-r--r--arch/powerpc/net/bpf_jit.h33
-rw-r--r--arch/powerpc/net/bpf_jit64.h8
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c6
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c16
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c100
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c4
-rw-r--r--arch/powerpc/platforms/pseries/msi.c15
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c4
-rw-r--r--arch/powerpc/sysdev/xive/common.c3
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/include/asm/syscall.h1
-rw-r--r--arch/riscv/include/asm/vdso.h18
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h3
-rw-r--r--arch/riscv/kernel/syscall_table.c1
-rw-r--r--arch/riscv/kernel/vdso.c53
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S3
-rw-r--r--arch/riscv/mm/cacheflush.c2
-rw-r--r--arch/s390/Kconfig10
-rw-r--r--arch/s390/Makefile7
-rw-r--r--arch/s390/configs/debug_defconfig8
-rw-r--r--arch/s390/configs/defconfig5
-rw-r--r--arch/s390/include/asm/ccwgroup.h2
-rw-r--r--arch/s390/include/asm/pci.h2
-rw-r--r--arch/s390/kvm/gaccess.c12
-rw-r--r--arch/s390/kvm/intercept.c4
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/s390/lib/string.c15
-rw-r--r--arch/s390/net/bpf_jit_comp.c72
-rw-r--r--arch/s390/pci/pci.c45
-rw-r--r--arch/s390/pci/pci_event.c4
-rw-r--r--arch/s390/pci/pci_mmio.c4
-rw-r--r--arch/sh/boot/Makefile16
-rw-r--r--arch/sh/include/asm/pgtable-3level.h2
-rw-r--r--arch/sparc/include/uapi/asm/socket.h3
-rw-r--r--arch/sparc/kernel/ioport.c4
-rw-r--r--arch/sparc/lib/iomap.c2
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/Makefile_32.cpu12
-rw-r--r--arch/x86/crypto/sm4-aesni-avx-asm_64.S5
-rw-r--r--arch/x86/events/core.c1
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/msr.c1
-rw-r--r--arch/x86/hyperv/hv_apic.c20
-rw-r--r--arch/x86/include/asm/entry-common.h2
-rw-r--r--arch/x86/include/asm/kvm_page_track.h2
-rw-r--r--arch/x86/include/asm/kvmclock.h14
-rw-r--r--arch/x86/include/asm/pkeys.h2
-rw-r--r--arch/x86/include/asm/special_insns.h2
-rw-r--r--arch/x86/include/asm/xen/pci.h11
-rw-r--r--arch/x86/include/asm/xen/swiotlb-xen.h6
-rw-r--r--arch/x86/kernel/cpu/common.c1
-rw-r--r--arch/x86/kernel/cpu/mce/core.c43
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c6
-rw-r--r--arch/x86/kernel/early-quirks.c6
-rw-r--r--arch/x86/kernel/fpu/signal.c11
-rw-r--r--arch/x86/kernel/hpet.c81
-rw-r--r--arch/x86/kernel/kvmclock.c13
-rw-r--r--arch/x86/kernel/setup.c26
-rw-r--r--arch/x86/kernel/sev-shared.c2
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/emulate.c3
-rw-r--r--arch/x86/kvm/hyperv.c7
-rw-r--r--arch/x86/kvm/hyperv.h2
-rw-r--r--arch/x86/kvm/ioapic.c10
-rw-r--r--arch/x86/kvm/lapic.c20
-rw-r--r--arch/x86/kvm/mmu/mmu.c17
-rw-r--r--arch/x86/kvm/mmu/page_track.c4
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h46
-rw-r--r--arch/x86/kvm/svm/nested.c10
-rw-r--r--arch/x86/kvm/svm/sev.c99
-rw-r--r--arch/x86/kvm/svm/svm.c137
-rw-r--r--arch/x86/kvm/svm/svm.h5
-rw-r--r--arch/x86/kvm/vmx/evmcs.c12
-rw-r--r--arch/x86/kvm/vmx/nested.c24
-rw-r--r--arch/x86/kvm/vmx/vmx.c54
-rw-r--r--arch/x86/kvm/vmx/vmx.h5
-rw-r--r--arch/x86/kvm/x86.c31
-rw-r--r--arch/x86/lib/insn.c4
-rw-r--r--arch/x86/mm/fault.c26
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--arch/x86/mm/pat/memtype.c7
-rw-r--r--arch/x86/net/bpf_jit_comp.c66
-rw-r--r--arch/x86/pci/xen.c15
-rw-r--r--arch/x86/platform/olpc/olpc.c2
-rw-r--r--arch/x86/platform/pvh/enlighten.c12
-rw-r--r--arch/x86/xen/Kconfig19
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c54
-rw-r--r--arch/x86/xen/enlighten_pv.c57
-rw-r--r--arch/x86/xen/enlighten_pvh.c10
-rw-r--r--arch/x86/xen/mmu_pv.c9
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c4
-rw-r--r--arch/x86/xen/smp_pv.c4
-rw-r--r--arch/x86/xen/xen-ops.h5
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h2
-rw-r--r--arch/xtensa/kernel/irq.c2
-rw-r--r--arch/xtensa/kernel/setup.c12
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/platforms/iss/network.c2
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c12
-rw-r--r--block/bdev.c2
-rw-r--r--block/bfq-cgroup.c6
-rw-r--r--block/bfq-iosched.c16
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-cgroup.c18
-rw-r--r--block/blk-core.c148
-rw-r--r--block/blk-integrity.c9
-rw-r--r--block/blk-mq-debugfs.c1
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq.c9
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg.c23
-rw-r--r--block/fops.c21
-rw-r--r--block/genhd.c24
-rw-r--r--block/kyber-iosched.c10
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/arm64/gtdt.c2
-rw-r--r--drivers/acpi/nfit/core.c12
-rw-r--r--drivers/acpi/osl.c23
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/acpi/x86/s2idle.c3
-rw-r--r--drivers/android/binder.c58
-rw-r--r--drivers/android/binder_internal.h2
-rw-r--r--drivers/ata/libahci_platform.c5
-rw-r--r--drivers/ata/pata_legacy.c6
-rw-r--r--drivers/base/core.c93
-rw-r--r--drivers/base/power/trace.c10
-rw-r--r--drivers/base/property.c63
-rw-r--r--drivers/base/swnode.c3
-rw-r--r--drivers/base/test/Makefile2
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/block/brd.c44
-rw-r--r--drivers/block/nbd.c29
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c4
-rw-r--r--drivers/block/virtio_blk.c37
-rw-r--r--drivers/bluetooth/btintel.c239
-rw-r--r--drivers/bluetooth/btintel.h11
-rw-r--r--drivers/bluetooth/btmrvl_main.c6
-rw-r--r--drivers/bluetooth/btmtkuart.c13
-rw-r--r--drivers/bluetooth/btrsi.c1
-rw-r--r--drivers/bluetooth/btrtl.c26
-rw-r--r--drivers/bluetooth/btusb.c64
-rw-r--r--drivers/bluetooth/hci_h5.c35
-rw-r--r--drivers/bluetooth/hci_ldisc.c3
-rw-r--r--drivers/bluetooth/hci_qca.c5
-rw-r--r--drivers/bluetooth/hci_vhci.c122
-rw-r--r--drivers/bus/Kconfig12
-rw-r--r--drivers/bus/Makefile2
-rw-r--r--drivers/bus/simple-pm-bus.c42
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/clk/qcom/Kconfig1
-rw-r--r--drivers/clk/qcom/gcc-sm6115.c2
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c2
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c2
-rw-r--r--drivers/clk/socfpga/clk-agilex.c9
-rw-r--r--drivers/comedi/comedi_fops.c1
-rw-r--r--drivers/cpufreq/cpufreq_governor_attr_set.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c22
-rw-r--r--drivers/crypto/ccp/ccp-ops.c14
-rw-r--r--drivers/edac/armada_xp_edac.c2
-rw-r--r--drivers/edac/dmc520_edac.c2
-rw-r--r--drivers/edac/synopsys_edac.c2
-rw-r--r--drivers/firmware/Kconfig5
-rw-r--r--drivers/firmware/arm_ffa/bus.c10
-rw-r--r--drivers/firmware/arm_scmi/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/virtio.c44
-rw-r--r--drivers/firmware/efi/cper.c4
-rw-r--r--drivers/firmware/efi/libstub/fdt.c2
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c2
-rw-r--r--drivers/fpga/dfl.c14
-rw-r--r--drivers/fpga/ice40-spi.c7
-rw-r--r--drivers/fpga/machxo2-spi.c6
-rw-r--r--drivers/gpio/gpio-74x164.c8
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-mockup.c21
-rw-r--r--drivers/gpio/gpio-pca953x.c27
-rw-r--r--drivers/gpio/gpio-rockchip.c26
-rw-r--r--drivers/gpio/gpio-uniphier.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c65
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c24
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c111
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h27
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h86
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h5
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c481
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h15
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c18
-rw-r--r--drivers/gpu/drm/drm_edid.c15
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c43
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c54
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c9
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h5
-rw-r--r--drivers/gpu/drm/i915/i915_request.c11
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c12
-rw-r--r--drivers/gpu/drm/kmb/kmb_crtc.c41
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c10
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h13
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.c25
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.h2
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c122
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.h11
-rw-r--r--drivers/gpu/drm/kmb/kmb_regs.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c157
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c53
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c30
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h47
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h70
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c9
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c72
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c311
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c7
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c12
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.h5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c26
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c97
-rw-r--r--drivers/gpu/drm/tegra/dc.c3
-rw-r--r--drivers/gpu/drm/tegra/dc.h6
-rw-r--r--drivers/gpu/drm/tegra/uapi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c92
-rw-r--r--drivers/gpu/host1x/fence.c6
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c8
-rw-r--r--drivers/hid/hid-apple.c7
-rw-r--r--drivers/hid/hid-betopff.c13
-rw-r--r--drivers/hid/hid-u2fzero.c4
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hsi/clients/ssi_protocol.c4
-rw-r--r--drivers/hwmon/k10temp.c6
-rw-r--r--drivers/hwmon/ltc2947-core.c8
-rw-r--r--drivers/hwmon/mlxreg-fan.c12
-rw-r--r--drivers/hwmon/occ/common.c17
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c10
-rw-r--r--drivers/hwmon/pmbus/mp2975.c2
-rw-r--r--drivers/hwmon/tmp421.c71
-rw-r--r--drivers/hwmon/w83791d.c29
-rw-r--r--drivers/hwmon/w83792d.c28
-rw-r--r--drivers/hwmon/w83793.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c1
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c4
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c11
-rw-r--r--drivers/i2c/i2c-core-acpi.c1
-rw-r--r--drivers/iio/accel/fxls8962af-core.c2
-rw-r--r--drivers/iio/adc/ad7192.c1
-rw-r--r--drivers/iio/adc/ad7780.c2
-rw-r--r--drivers/iio/adc/ad7793.c2
-rw-r--r--drivers/iio/adc/aspeed_adc.c1
-rw-r--r--drivers/iio/adc/max1027.c3
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c8
-rw-r--r--drivers/iio/adc/rzg2l_adc.c6
-rw-r--r--drivers/iio/adc/ti-adc128s052.c6
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c11
-rw-r--r--drivers/iio/dac/ti-dac5571.c1
-rw-r--r--drivers/iio/imu/adis16475.c3
-rw-r--r--drivers/iio/imu/adis16480.c14
-rw-r--r--drivers/iio/light/opt3001.c6
-rw-r--r--drivers/iio/test/Makefile1
-rw-r--r--drivers/infiniband/core/cma.c51
-rw-r--r--drivers/infiniband/core/cma_priv.h1
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c31
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c13
-rw-r--r--drivers/infiniband/hw/irdma/cm.c4
-rw-r--r--drivers/infiniband/hw/irdma/hw.c14
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.h1
-rw-r--r--drivers/infiniband/hw/irdma/user.h2
-rw-r--r--drivers/infiniband/hw/irdma/utils.c2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c1
-rw-r--r--drivers/infiniband/hw/qedr/main.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c16
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c29
-rw-r--r--drivers/input/touchscreen.c42
-rw-r--r--drivers/input/touchscreen/resistive-adc-touch.c29
-rw-r--r--drivers/interconnect/qcom/sdm660.c25
-rw-r--r--drivers/iommu/Kconfig11
-rw-r--r--drivers/iommu/apple-dart.c56
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c3
-rw-r--r--drivers/iommu/intel/dmar.c6
-rw-r--r--drivers/ipack/devices/ipoctal.c63
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic.c52
-rw-r--r--drivers/irqchip/irq-mbigen.c6
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c12
-rw-r--r--drivers/isdn/capi/kcapi.c5
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c8
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/mcb/mcb-core.c12
-rw-r--r--drivers/md/dm-clone-target.c2
-rw-r--r--drivers/md/dm-rq.c8
-rw-r--r--drivers/md/dm-verity-target.c15
-rw-r--r--drivers/md/dm.c17
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c18
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h28
-rw-r--r--drivers/media/rc/ir_toy.c21
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c6
-rw-r--r--drivers/misc/cb710/sgbuf2.c2
-rw-r--r--drivers/misc/eeprom/at25.c8
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c18
-rw-r--r--drivers/misc/fastrpc.c2
-rw-r--r--drivers/misc/gehc-achc.c1
-rw-r--r--drivers/misc/genwqe/card_base.c2
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c104
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c9
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c11
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c115
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h2
-rw-r--r--drivers/misc/mei/hbm.c12
-rw-r--r--drivers/misc/mei/hw-me-regs.h1
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/dw_mmc.c15
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c73
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c2
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c22
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c8
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c3
-rw-r--r--drivers/net/arcnet/arc-rimi.c5
-rw-r--r--drivers/net/arcnet/arcdevice.h5
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/arcnet/com20020.c4
-rw-r--r--drivers/net/arcnet/com20020_cs.c2
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arcnet/com90xx.c3
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/can/m_can/m_can_platform.c14
-rw-r--r--drivers/net/can/rcar/rcar_can.c20
-rw-r--r--drivers/net/can/sja1000/peak_pci.c9
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c8
-rw-r--r--drivers/net/dsa/Kconfig1
-rw-r--r--drivers/net/dsa/Makefile2
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c21
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c13
-rw-r--r--drivers/net/dsa/b53/b53_priv.h5
-rw-r--r--drivers/net/dsa/b53/b53_spi.c13
-rw-r--r--drivers/net/dsa/b53/b53_srab.c21
-rw-r--r--drivers/net/dsa/bcm_sf2.c14
-rw-r--r--drivers/net/dsa/dsa_loop.c22
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c16
-rw-r--r--drivers/net/dsa/lan9303-core.c6
-rw-r--r--drivers/net/dsa/lan9303.h1
-rw-r--r--drivers/net/dsa/lan9303_i2c.c24
-rw-r--r--drivers/net/dsa/lan9303_mdio.c15
-rw-r--r--drivers/net/dsa/lantiq_gswip.c20
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c11
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c13
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c14
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c8
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c4
-rw-r--r--drivers/net/dsa/mt7530.c26
-rw-r--r--drivers/net/dsa/mv88e6060.c18
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c180
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h10
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c73
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c23
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/ocelot/felix.c155
-rw-r--r--drivers/net/dsa/ocelot/felix.h3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c22
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c20
-rw-r--r--drivers/net/dsa/qca/ar9331.c18
-rw-r--r--drivers/net/dsa/qca8k.c453
-rw-r--r--drivers/net/dsa/qca8k.h35
-rw-r--r--drivers/net/dsa/realtek-smi-core.c26
-rw-r--r--drivers/net/dsa/realtek-smi-core.h4
-rw-r--r--drivers/net/dsa/rtl8365mb.c1982
-rw-r--r--drivers/net/dsa/rtl8366.c96
-rw-r--r--drivers/net/dsa/rtl8366rb.c301
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h27
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c37
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c158
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c45
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h19
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c17
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.h2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c6
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c22
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c22
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h1
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c6
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.h1
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c18
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c18
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c5
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c11
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c10
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/8390/apne.c3
-rw-r--r--drivers/net/ethernet/8390/ax88796.c12
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c3
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c22
-rw-r--r--drivers/net/ethernet/8390/stnic.c5
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c3
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c14
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/agere/et131x.c4
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c20
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c6
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c5
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/arc/Kconfig5
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/asix/Kconfig35
-rw-r--r--drivers/net/ethernet/asix/Makefile6
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.c239
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.h26
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c1163
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.h568
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.c115
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.h69
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c39
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c74
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c155
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c60
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c11
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c13
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c9
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c6
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c35
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c45
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c5
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c8
-rw-r--r--drivers/net/ethernet/ec_bhf.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/ethoc.c16
-rw-r--r--drivers/net/ethernet/ezchip/Kconfig2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/fealnx.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c14
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c24
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c58
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c333
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c10
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve.h33
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c146
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c106
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c117
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c84
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c74
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c57
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c60
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c6
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c7
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c46
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c13
-rw-r--r--drivers/net/ethernet/intel/Kconfig15
-rw-r--r--drivers/net/ethernet/intel/e100.c26
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c31
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c50
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c52
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c195
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h206
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c121
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c131
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c225
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c216
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c249
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c649
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c235
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c279
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c856
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1387
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h169
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c387
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c151
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c386
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c197
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2473
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h149
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c1056
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c326
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h147
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c403
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c158
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/korina.c4
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c19
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h572
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c466
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c234
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c273
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c8
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c29
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c17
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c348
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c396
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag.c)106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag.h)9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c)17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c611
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h180
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c540
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c186
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/micrel/Makefile6
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c15
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c22
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c16
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c7
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c10
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c11
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c91
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c6
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/Kconfig2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c402
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c125
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c10
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c22
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c11
-rw-r--r--drivers/net/ethernet/neterion/s2io.c8
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c51
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c5
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c48
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c38
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c266
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h49
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c92
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c244
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c130
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h143
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h1491
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1389
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c124
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h347
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h12339
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h222
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c405
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c98
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h286
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h500
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h135
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h131
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c66
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h765
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h2474
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h223
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c63
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c201
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h311
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c21
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c24
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c7
-rw-r--r--drivers/net/ethernet/realtek/8139too.c7
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c3
-rw-r--r--drivers/net/ethernet/renesas/ravb.h52
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c728
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h6
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c37
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c4
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c4
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.h2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/silan/sc92031.c14
-rw-r--r--drivers/net/ethernet/sis/sis190.c10
-rw-r--r--drivers/net/ethernet/sis/sis900.c19
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c15
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c22
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c26
-rw-r--r--drivers/net/ethernet/socionext/netsec.c25
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/sun/Kconfig1
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c46
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c15
-rw-r--r--drivers/net/ethernet/sun/sunhme.c23
-rw-r--r--drivers/net/ethernet/sun/sunqe.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c8
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c19
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c11
-rw-r--r--drivers/net/ethernet/ti/cpts.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c14
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c11
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c11
-rw-r--r--drivers/net/ethernet/wiznet/w5100.h2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c11
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c14
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c7
-rw-r--r--drivers/net/fddi/defxx.c12
-rw-r--r--drivers/net/fddi/defza.c2
-rw-r--r--drivers/net/fddi/skfp/h/smc.h2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c9
-rw-r--r--drivers/net/fddi/skfp/smtinit.c4
-rw-r--r--drivers/net/fjes/fjes_hw.c3
-rw-r--r--drivers/net/fjes/fjes_hw.h2
-rw-r--r--drivers/net/fjes/fjes_main.c14
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/6pack.c6
-rw-r--r--drivers/net/hamradio/Kconfig1
-rw-r--r--drivers/net/hamradio/baycom_epp.c10
-rw-r--r--drivers/net/hamradio/bpqether.c7
-rw-r--r--drivers/net/hamradio/dmascc.c11
-rw-r--r--drivers/net/hamradio/hdlcdrv.c4
-rw-r--r--drivers/net/hamradio/mkiss.c6
-rw-r--r--drivers/net/hamradio/scc.c7
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hippi/rrunner.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c6
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/ipa/Kconfig1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/ipvlan/ipvtap.c2
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c6
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c15
-rw-r--r--drivers/net/mhi_net.c6
-rw-r--r--drivers/net/net_failover.c3
-rw-r--r--drivers/net/netdevsim/dev.c10
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c2
-rw-r--r--drivers/net/pcs/pcs-xpcs.c45
-rw-r--r--drivers/net/phy/at803x.c150
-rw-r--r--drivers/net/phy/bcm7xxx.c114
-rw-r--r--drivers/net/phy/dp83867.c19
-rw-r--r--drivers/net/phy/marvell10g.c107
-rw-r--r--drivers/net/phy/mdio_bus.c39
-rw-r--r--drivers/net/phy/mdio_device.c11
-rw-r--r--drivers/net/phy/micrel.c107
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/mxl-gpy.c23
-rw-r--r--drivers/net/phy/phy_device.c13
-rw-r--r--drivers/net/phy/phylink.c84
-rw-r--r--drivers/net/phy/realtek.c8
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/plip/plip.c8
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/rionet.c14
-rw-r--r--drivers/net/sb1000.c12
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/Kconfig5
-rw-r--r--drivers/net/usb/aqc111.c4
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/asix_devices.c2
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c12
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/ch9200.c4
-rw-r--r--drivers/net/usb/cx82310_eth.c5
-rw-r--r--drivers/net/usb/dm9601.c7
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/usb/kaweth.c3
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/mcs7830.c9
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c20
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/sierra_net.c6
-rw-r--r--drivers/net/usb/smsc75xx.c9
-rw-r--r--drivers/net/usb/smsc95xx.c12
-rw-r--r--drivers/net/usb/sr9700.c9
-rw-r--r--drivers/net/usb/sr9800.c7
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/net/virtio_net.c20
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wan/hdlc_fr.c4
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c58
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h49
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c4344
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h226
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c243
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c45
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1443
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c350
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c42
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c152
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h107
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig4
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c105
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c56
-rw-r--r--drivers/net/wireless/ath/spectral_common.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c11
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c99
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c370
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c11
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c31
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c91
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c134
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_if.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h24
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h49
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c119
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c753
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c47
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c10
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c74
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c16
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c24
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h11
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h15
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c88
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.h44
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_flash.c42
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c4
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/microread/mei.c2
-rw-r--r--drivers/nfc/pn533/i2c.c2
-rw-r--r--drivers/nfc/pn533/pn533.c2
-rw-r--r--drivers/nfc/pn533/pn533.h4
-rw-r--r--drivers/nfc/pn533/uart.c4
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c29
-rw-r--r--drivers/nfc/s3fwrn5/nci.c18
-rw-r--r--drivers/nfc/st-nci/i2c.c4
-rw-r--r--drivers/nfc/st-nci/ndlc.c4
-rw-r--r--drivers/nfc/st-nci/se.c6
-rw-r--r--drivers/nfc/st-nci/spi.c5
-rw-r--r--drivers/nfc/st21nfca/i2c.c4
-rw-r--r--drivers/nfc/st21nfca/se.c4
-rw-r--r--drivers/nfc/st95hf/core.c6
-rw-r--r--drivers/nfc/trf7970a.c8
-rw-r--r--drivers/nvdimm/pmem.c5
-rw-r--r--drivers/nvme/host/core.c75
-rw-r--r--drivers/nvme/host/fc.c18
-rw-r--r--drivers/nvme/host/multipath.c9
-rw-r--r--drivers/nvme/host/nvme.h6
-rw-r--r--drivers/nvme/host/pci.c5
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/host/tcp.c33
-rw-r--r--drivers/nvme/target/configfs.c2
-rw-r--r--drivers/nvmem/Kconfig1
-rw-r--r--drivers/nvmem/core.c3
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c1
-rw-r--r--drivers/of/device.c6
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/controller/pci-hyperv.c13
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c9
-rw-r--r--drivers/pci/msi.c18
-rw-r--r--drivers/pci/pci-acpi.c5
-rw-r--r--drivers/pci/quirks.c9
-rw-r--r--drivers/pci/vpd.c36
-rw-r--r--drivers/pcmcia/pcmcia_cis.c5
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c19
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c67
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h10
-rw-r--r--drivers/pinctrl/qcom/Kconfig3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c37
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c4
-rw-r--r--drivers/platform/x86/amd-pmc.c3
-rw-r--r--drivers/platform/x86/dell/Kconfig2
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c2
-rw-r--r--drivers/platform/x86/intel/hid.c27
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c23
-rw-r--r--drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c2
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c3
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c6
-rw-r--r--drivers/platform/x86/lg-laptop.c2
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c54
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/ptp/idt8a340_reg.h783
-rw-r--r--drivers/ptp/ptp_clock.c16
-rw-r--r--drivers/ptp/ptp_clockmatrix.c772
-rw-r--r--drivers/ptp/ptp_clockmatrix.h117
-rw-r--r--drivers/ptp/ptp_kvm_x86.c13
-rw-r--r--drivers/ptp/ptp_ocp.c6
-rw-r--r--drivers/ptp/ptp_pch.c1
-rw-r--r--drivers/regulator/max14577-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/s390/char/sclp_early.c3
-rw-r--r--drivers/s390/cio/blacklist.c8
-rw-r--r--drivers/s390/cio/ccwgroup.c10
-rw-r--r--drivers/s390/cio/css.c40
-rw-r--r--drivers/s390/cio/css.h10
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/s390/crypto/ap_queue.c4
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c4
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c26
-rw-r--r--drivers/s390/net/qeth_l2_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/scsi/arm/Kconfig11
-rw-r--r--drivers/scsi/arm/acornscsi.c103
-rw-r--r--drivers/scsi/arm/fas216.c31
-rw-r--r--drivers/scsi/arm/queue.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c1
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c4
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.c3
-rw-r--r--drivers/scsi/elx/libefc/efc_device.c7
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c3
-rw-r--r--drivers/scsi/libiscsi.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/ncr53c8xx.c23
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c8
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h4
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c10
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c4
-rw-r--r--drivers/scsi/qedi/qedi_fw.c40
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c22
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/sd.c14
-rw-r--r--drivers/scsi/sd_zbc.c8
-rw-r--r--drivers/scsi/ses.c24
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c78
-rw-r--r--drivers/scsi/ufs/ufshcd.c171
-rw-r--r--drivers/scsi/ufs/ufshcd.h6
-rw-r--r--drivers/scsi/ufs/ufshpb.c8
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/soc/canaan/Kconfig1
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h3
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c1
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c117
-rw-r--r--drivers/soc/fsl/dpio/dpio.c1
-rw-r--r--drivers/soc/fsl/dpio/dpio.h2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c58
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h13
-rw-r--r--drivers/soc/qcom/mdt_loader.c2
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/soc/ti/omap_prm.c27
-rw-r--r--drivers/spi/spi-atmel.c4
-rw-r--r--drivers/spi/spi-bcm-qspi.c77
-rw-r--r--drivers/spi/spi-mt65xx.c64
-rw-r--r--drivers/spi/spi-mux.c7
-rw-r--r--drivers/spi/spi-nxp-fspi.c26
-rw-r--r--drivers/spi/spi-rockchip.c6
-rw-r--r--drivers/spi/spi-tegra20-slink.c5
-rw-r--r--drivers/spi/spi.c35
-rw-r--r--drivers/spi/spidev.c14
-rw-r--r--drivers/staging/greybus/uart.c62
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c2
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c2
-rw-r--r--drivers/staging/qlge/qlge_main.c8
-rw-r--r--drivers/staging/r8188eu/hal/hal_intf.c2
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c2
-rw-r--r--drivers/target/target_core_configfs.c32
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/tee/optee/core.c3
-rw-r--r--drivers/tee/optee/device.c22
-rw-r--r--drivers/tee/optee/optee_private.h1
-rw-r--r--drivers/tee/optee/shm_pool.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c5
-rw-r--r--drivers/thermal/qcom/tsens.c4
-rw-r--r--drivers/thermal/thermal_core.c7
-rw-r--r--drivers/thunderbolt/Makefile1
-rw-r--r--drivers/tty/hvc/hvc_xen.c13
-rw-r--r--drivers/tty/serial/8250/8250_omap.c2
-rw-r--r--drivers/tty/serial/8250/Kconfig8
-rw-r--r--drivers/tty/serial/mvebu-uart.c2
-rw-r--r--drivers/tty/synclink_gt.c44
-rw-r--r--drivers/tty/tty_ldisc.c1
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c14
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c15
-rw-r--r--drivers/usb/class/cdc-acm.c15
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/class/cdc-wdm.c6
-rw-r--r--drivers/usb/common/Kconfig3
-rw-r--r--drivers/usb/core/hcd.c68
-rw-r--r--drivers/usb/dwc2/gadget.c193
-rw-r--r--drivers/usb/dwc2/hcd.c4
-rw-r--r--drivers/usb/dwc3/core.c30
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/function/f_phonet.c5
-rw-r--r--drivers/usb/gadget/function/f_uac2.c33
-rw-r--r--drivers/usb/gadget/function/u_audio.c13
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c2
-rw-r--r--drivers/usb/host/bcma-hcd.c5
-rw-r--r--drivers/usb/host/ehci-hcd.c75
-rw-r--r--drivers/usb/host/ohci-omap.c72
-rw-r--r--drivers/usb/host/xhci-dbgtty.c28
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-ring.c39
-rw-r--r--drivers/usb/host/xhci-tegra.c12
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/musb_dsps.c4
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/serial/cp210x.c38
-rw-r--r--drivers/usb/serial/mos7840.c2
-rw-r--r--drivers/usb/serial/option.c19
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c1
-rw-r--r--drivers/usb/typec/tipd/core.c8
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c5
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c10
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c2
-rw-r--r--drivers/vhost/vdpa.c12
-rw-r--r--drivers/video/fbdev/Kconfig9
-rw-r--r--drivers/video/fbdev/gbefb.c2
-rw-r--r--drivers/virtio/virtio.c18
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/xen/Kconfig7
-rw-r--r--drivers/xen/balloon.c71
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/privcmd.c18
-rw-r--r--drivers/xen/swiotlb-xen.c44
-rw-r--r--fs/9p/cache.c8
-rw-r--r--fs/9p/fid.c14
-rw-r--r--fs/9p/v9fs.c8
-rw-r--r--fs/9p/vfs_addr.c14
-rw-r--r--fs/9p/vfs_file.c33
-rw-r--r--fs/9p/vfs_inode.c24
-rw-r--r--fs/9p/vfs_inode_dotl.c11
-rw-r--r--fs/afs/callback.c44
-rw-r--r--fs/afs/cell.c2
-rw-r--r--fs/afs/dir.c57
-rw-r--r--fs/afs/dir_edit.c4
-rw-r--r--fs/afs/dir_silly.c4
-rw-r--r--fs/afs/file.c86
-rw-r--r--fs/afs/fs_probe.c8
-rw-r--r--fs/afs/fsclient.c31
-rw-r--r--fs/afs/inode.c98
-rw-r--r--fs/afs/internal.h21
-rw-r--r--fs/afs/protocol_afs.h15
-rw-r--r--fs/afs/protocol_yfs.h6
-rw-r--r--fs/afs/rotate.c1
-rw-r--r--fs/afs/server.c2
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/write.c32
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/dir-item.c48
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/file-item.c13
-rw-r--r--fs/btrfs/file.c19
-rw-r--r--fs/btrfs/space-info.c5
-rw-r--r--fs/btrfs/tree-log.c79
-rw-r--r--fs/btrfs/verity.c6
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/buffer.c8
-rw-r--r--fs/ceph/caps.c16
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c17
-rw-r--r--fs/ceph/super.c17
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/cifs/cache.c2
-rw-r--r--fs/cifs/cifs_debug.c1
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_ioctl.h1
-rw-r--r--fs/cifs/cifs_spnego.c2
-rw-r--r--fs/cifs/cifs_spnego.h2
-rw-r--r--fs/cifs/cifs_unicode.c1
-rw-r--r--fs/cifs/cifsacl.c1
-rw-r--r--fs/cifs/cifsacl.h1
-rw-r--r--fs/cifs/cifsencrypt.c1
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifsfs.h1
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifspdu.h1
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/cifssmb.c1
-rw-r--r--fs/cifs/connect.c18
-rw-r--r--fs/cifs/dir.c1
-rw-r--r--fs/cifs/dns_resolve.c1
-rw-r--r--fs/cifs/dns_resolve.h4
-rw-r--r--fs/cifs/export.c1
-rw-r--r--fs/cifs/file.c7
-rw-r--r--fs/cifs/fscache.c2
-rw-r--r--fs/cifs/fscache.h2
-rw-r--r--fs/cifs/inode.c7
-rw-r--r--fs/cifs/ioctl.c3
-rw-r--r--fs/cifs/link.c1
-rw-r--r--fs/cifs/misc.c59
-rw-r--r--fs/cifs/netmisc.c1
-rw-r--r--fs/cifs/ntlmssp.h1
-rw-r--r--fs/cifs/readdir.c1
-rw-r--r--fs/cifs/rfc1002pdu.h1
-rw-r--r--fs/cifs/sess.c1
-rw-r--r--fs/cifs/smb2file.c1
-rw-r--r--fs/cifs/smb2glob.h1
-rw-r--r--fs/cifs/smb2inode.c1
-rw-r--r--fs/cifs/smb2misc.c1
-rw-r--r--fs/cifs/smb2pdu.c5
-rw-r--r--fs/cifs/smb2pdu.h1
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smb2status.h1
-rw-r--r--fs/cifs/smb2transport.c1
-rw-r--r--fs/cifs/smberr.h1
-rw-r--r--fs/cifs/transport.c1
-rw-r--r--fs/cifs/winucase.c1
-rw-r--r--fs/cifs/xattr.c1
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/erofs/inode.c2
-rw-r--r--fs/erofs/zmap.c3
-rw-r--r--fs/ext2/balloc.c14
-rw-r--r--fs/ext4/dir.c6
-rw-r--r--fs/ext4/ext4.h3
-rw-r--r--fs/ext4/extents.c19
-rw-r--r--fs/ext4/fast_commit.c6
-rw-r--r--fs/ext4/inline.c150
-rw-r--r--fs/ext4/inode.c176
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/fscache/object.c2
-rw-r--r--fs/fscache/operation.c3
-rw-r--r--fs/inode.c6
-rw-r--r--fs/io-wq.c31
-rw-r--r--fs/io_uring.c307
-rw-r--r--fs/kernel_read_file.c2
-rw-r--r--fs/kernfs/dir.c18
-rw-r--r--fs/ksmbd/auth.c205
-rw-r--r--fs/ksmbd/connection.c10
-rw-r--r--fs/ksmbd/crypto_ctx.c16
-rw-r--r--fs/ksmbd/crypto_ctx.h8
-rw-r--r--fs/ksmbd/glob.h2
-rw-r--r--fs/ksmbd/misc.c21
-rw-r--r--fs/ksmbd/misc.h4
-rw-r--r--fs/ksmbd/oplock.c41
-rw-r--r--fs/ksmbd/server.c3
-rw-r--r--fs/ksmbd/smb2misc.c98
-rw-r--r--fs/ksmbd/smb2ops.c5
-rw-r--r--fs/ksmbd/smb2pdu.c426
-rw-r--r--fs/ksmbd/smb2pdu.h10
-rw-r--r--fs/ksmbd/smb_common.c72
-rw-r--r--fs/ksmbd/smb_common.h11
-rw-r--r--fs/ksmbd/smbacl.c21
-rw-r--r--fs/ksmbd/transport_rdma.c1
-rw-r--r--fs/ksmbd/transport_tcp.c4
-rw-r--r--fs/ksmbd/vfs.c172
-rw-r--r--fs/ksmbd/vfs.h9
-rw-r--r--fs/lockd/svcxdr.h13
-rw-r--r--fs/netfs/read_helper.c2
-rw-r--r--fs/nfs_common/grace.c1
-rw-r--r--fs/nfsd/filecache.c2
-rw-r--r--fs/nfsd/nfs4state.c16
-rw-r--r--fs/nfsd/nfs4xdr.c19
-rw-r--r--fs/nfsd/nfsctl.c7
-rw-r--r--fs/ntfs3/attrib.c20
-rw-r--r--fs/ntfs3/attrlist.c9
-rw-r--r--fs/ntfs3/bitfunc.c10
-rw-r--r--fs/ntfs3/bitmap.c14
-rw-r--r--fs/ntfs3/debug.h3
-rw-r--r--fs/ntfs3/dir.c30
-rw-r--r--fs/ntfs3/file.c12
-rw-r--r--fs/ntfs3/frecord.c55
-rw-r--r--fs/ntfs3/fslog.c12
-rw-r--r--fs/ntfs3/fsntfs.c77
-rw-r--r--fs/ntfs3/index.c160
-rw-r--r--fs/ntfs3/inode.c159
-rw-r--r--fs/ntfs3/lib/decompress_common.h5
-rw-r--r--fs/ntfs3/lib/lib.h6
-rw-r--r--fs/ntfs3/lznt.c12
-rw-r--r--fs/ntfs3/namei.c24
-rw-r--r--fs/ntfs3/ntfs.h20
-rw-r--r--fs/ntfs3/ntfs_fs.h67
-rw-r--r--fs/ntfs3/record.c3
-rw-r--r--fs/ntfs3/run.c2
-rw-r--r--fs/ntfs3/super.c651
-rw-r--r--fs/ntfs3/upcase.c8
-rw-r--r--fs/ntfs3/xattr.c249
-rw-r--r--fs/ocfs2/alloc.c46
-rw-r--r--fs/ocfs2/dlmglue.c3
-rw-r--r--fs/ocfs2/super.c14
-rw-r--r--fs/overlayfs/dir.c10
-rw-r--r--fs/overlayfs/file.c15
-rw-r--r--fs/qnx4/dir.c36
-rw-r--r--fs/smbfs_common/smbfsctl.h2
-rw-r--r--fs/userfaultfd.c12
-rw-r--r--fs/vboxsf/super.c12
-rw-r--r--fs/verity/enable.c2
-rw-r--r--fs/verity/open.c2
-rw-r--r--include/acpi/acpi_io.h8
-rw-r--r--include/asm-generic/io.h28
-rw-r--r--include/asm-generic/iomap.h10
-rw-r--r--include/asm-generic/pci_iomap.h3
-rw-r--r--include/kunit/test.h6
-rw-r--r--include/kvm/arm_pmu.h3
-rw-r--r--include/linux/arm-smccc.h10
-rw-r--r--include/linux/bpf.h10
-rw-r--r--include/linux/brcmphy.h2
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/cpuhotplug.h4
-rw-r--r--include/linux/cpumask.h7
-rw-r--r--include/linux/dsa/8021q.h5
-rw-r--r--include/linux/dsa/mv88e6xxx.h13
-rw-r--r--include/linux/dsa/ocelot.h55
-rw-r--r--include/linux/dsa/sja1105.h41
-rw-r--r--include/linux/elfcore.h2
-rw-r--r--include/linux/etherdevice.h39
-rw-r--r--include/linux/ethtool.h23
-rw-r--r--include/linux/filter.h7
-rw-r--r--include/linux/fwnode.h11
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/ieee80211.h1
-rw-r--r--include/linux/irqdomain.h2
-rw-r--r--include/linux/kvm_host.h6
-rw-r--r--include/linux/mdio.h5
-rw-r--r--include/linux/memory.h5
-rw-r--r--include/linux/mfd/idt8a340_reg.h31
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/migrate.h6
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx4/driver.h22
-rw-r--r--include/linux/mlx5/device.h19
-rw-r--r--include/linux/mlx5/driver.h28
-rw-r--r--include/linux/mlx5/eq.h1
-rw-r--r--include/linux/mlx5/eswitch.h9
-rw-r--r--include/linux/mlx5/fs.h9
-rw-r--r--include/linux/mlx5/mlx5_ifc.h348
-rw-r--r--include/linux/mm_types.h13
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/netfilter_arp/arp_tables.h5
-rw-r--r--include/linux/netfilter_bridge/ebtables.h5
-rw-r--r--include/linux/netfilter_ingress.h58
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h5
-rw-r--r--include/linux/netfilter_netdev.h146
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nvmem-consumer.h14
-rw-r--r--include/linux/of_net.h8
-rw-r--r--include/linux/packing.h2
-rw-r--r--include/linux/perf/arm_pmu.h6
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/phylink.h1
-rw-r--r--include/linux/pkeys.h2
-rw-r--r--include/linux/platform_data/brcmfmac.h2
-rw-r--r--include/linux/platform_data/usb-omap1.h2
-rw-r--r--include/linux/property.h5
-rw-r--r--include/linux/qcom_scm.h71
-rw-r--r--include/linux/qed/common_hsi.h141
-rw-r--r--include/linux/qed/eth_common.h1
-rw-r--r--include/linux/qed/fcoe_common.h362
-rw-r--r--include/linux/qed/iscsi_common.h360
-rw-r--r--include/linux/qed/nvmetcp_common.h18
-rw-r--r--include/linux/qed/qed_chain.h97
-rw-r--r--include/linux/qed/qed_eth_if.h2
-rw-r--r--include/linux/qed/qed_if.h265
-rw-r--r--include/linux/qed/qed_iscsi_if.h2
-rw-r--r--include/linux/qed/qed_ll2_if.h42
-rw-r--r--include/linux/qed/qed_nvmetcp_if.h17
-rw-r--r--include/linux/qed/qed_rdma_if.h3
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/secretmem.h2
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h15
-rw-r--r--include/linux/spi/spi.h3
-rw-r--r--include/linux/trace_recursion.h49
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/u64_stats_sync.h10
-rw-r--r--include/linux/uio.h21
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/user_namespace.h2
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/net/act_api.h10
-rw-r--r--include/net/ax25.h13
-rw-r--r--include/net/bluetooth/bluetooth.h90
-rw-r--r--include/net/bluetooth/hci.h117
-rw-r--r--include/net/bluetooth/hci_core.h75
-rw-r--r--include/net/codel.h5
-rw-r--r--include/net/codel_impl.h18
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/devlink.h85
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dsa.h52
-rw-r--r--include/net/gen_stats.h59
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/inet_ecn.h17
-rw-r--r--include/net/ioam6.h3
-rw-r--r--include/net/ip.h6
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/ip_vs.h11
-rw-r--r--include/net/llc.h2
-rw-r--r--include/net/llc_if.h3
-rw-r--r--include/net/mac80211.h8
-rw-r--r--include/net/mctp.h58
-rw-r--r--include/net/mctpdevice.h5
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/neighbour.h34
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h1
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netfilter/xt_rateest.h2
-rw-r--r--include/net/netns/netfilter.h6
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/page_pool.h12
-rw-r--r--include/net/pkt_cls.h6
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/rose.h8
-rw-r--r--include/net/sch_generic.h78
-rw-r--r--include/net/sctp/sm.h6
-rw-r--r--include/net/sock.h105
-rw-r--r--include/net/tcp.h46
-rw-r--r--include/net/tls.h3
-rw-r--r--include/net/xdp.h8
-rw-r--r--include/net/xdp_sock_drv.h22
-rw-r--r--include/net/xsk_buff_pool.h48
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/soc/fsl/dpaa2-io.h9
-rw-r--r--include/soc/mscc/ocelot.h79
-rw-r--r--include/soc/mscc/ocelot_ptp.h3
-rw-r--r--include/soc/mscc/ocelot_vcap.h14
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/rawmidi.h1
-rw-r--r--include/trace/events/afs.h8
-rw-r--r--include/trace/events/cachefiles.h6
-rw-r--r--include/trace/events/devlink.h72
-rw-r--r--include/trace/events/erofs.h6
-rw-r--r--include/trace/events/kyber.h19
-rw-r--r--include/trace/events/mctp.h75
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/linux/android/binder.h7
-rw-r--r--include/uapi/linux/bpf.h16
-rw-r--r--include/uapi/linux/cifs/cifs_mount.h1
-rw-r--r--include/uapi/linux/devlink.h2
-rw-r--r--include/uapi/linux/ethtool.h29
-rw-r--r--include/uapi/linux/ethtool_netlink.h17
-rw-r--r--include/uapi/linux/hyperv.h2
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/io_uring.h8
-rw-r--r--include/uapi/linux/ioam6_iptunnel.h29
-rw-r--r--include/uapi/linux/mctp.h7
-rw-r--r--include/uapi/linux/neighbour.h35
-rw-r--r--include/uapi/linux/netfilter.h1
-rw-r--r--include/uapi/linux/pkt_sched.h2
-rw-r--r--include/uapi/linux/smc.h17
-rw-r--r--include/uapi/linux/vm_sockets.h13
-rw-r--r--include/uapi/linux/xfrm.h15
-rw-r--r--include/uapi/misc/habanalabs.h6
-rw-r--r--include/uapi/sound/asound.h1
-rw-r--r--include/xen/xen-ops.h27
-rw-r--r--init/do_mounts.c30
-rw-r--r--init/main.c7
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/bpf/arraymap.c7
-rw-r--r--kernel/bpf/bpf_struct_ops.c7
-rw-r--r--kernel/bpf/core.c7
-rw-r--r--kernel/bpf/hashtab.c13
-rw-r--r--kernel/bpf/helpers.c11
-rw-r--r--kernel/bpf/stackmap.c3
-rw-r--r--kernel/bpf/verifier.c123
-rw-r--r--kernel/cgroup/cgroup.c17
-rw-r--r--kernel/cgroup/cpuset.c56
-rw-r--r--kernel/cred.c9
-rw-r--r--kernel/dma/debug.c39
-rw-r--r--kernel/dma/debug.h24
-rw-r--r--kernel/dma/mapping.c27
-rw-r--r--kernel/entry/common.c4
-rw-r--r--kernel/events/core.c36
-rw-r--r--kernel/irq/irqdomain.c2
-rw-r--r--kernel/locking/rwbase_rt.c65
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/rseq.c14
-rw-r--r--kernel/sched/debug.c8
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/signal.c25
-rw-r--r--kernel/time/posix-cpu-timers.c3
-rw-r--r--kernel/trace/blktrace.c8
-rw-r--r--kernel/trace/bpf_trace.c54
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace.c11
-rw-r--r--kernel/trace/trace_eprobe.c61
-rw-r--r--kernel/trace/trace_events_hist.c2
-rw-r--r--kernel/ucount.c49
-rw-r--r--kernel/workqueue.c18
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Makefile2
-rw-r--r--lib/iov_iter.c36
-rw-r--r--lib/kunit/executor_test.c4
-rw-r--r--lib/packing.c2
-rw-r--r--lib/pci_iomap.c43
-rw-r--r--lib/test_bpf.c6031
-rw-r--r--lib/zlib_inflate/inffast.c13
-rw-r--r--mm/damon/dbgfs-test.h16
-rw-r--r--mm/debug.c4
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/memblock.c5
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/memory.c1
-rw-r--r--mm/mempolicy.c16
-rw-r--r--mm/migrate.c62
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c31
-rw-r--r--mm/swap.c19
-rw-r--r--mm/util.c4
-rw-r--r--mm/workingset.c1
-rw-r--r--net/802/hippi.c2
-rw-r--r--net/802/p8022.c2
-rw-r--r--net/802/psnap.c2
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/Kconfig2
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_dev.c2
-rw-r--r--net/ax25/ax25_iface.c6
-rw-r--r--net/ax25/ax25_in.c4
-rw-r--r--net/ax25/ax25_out.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c14
-rw-r--r--net/batman-adv/multicast.c2
-rw-r--r--net/batman-adv/routing.c3
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/batman-adv/tp_meter.c2
-rw-r--r--net/batman-adv/tvlv.c4
-rw-r--r--net/batman-adv/tvlv.h4
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/eir.c335
-rw-r--r--net/bluetooth/eir.h72
-rw-r--r--net/bluetooth/hci_codec.c238
-rw-r--r--net/bluetooth/hci_codec.h7
-rw-r--r--net/bluetooth/hci_conn.c168
-rw-r--r--net/bluetooth/hci_core.c320
-rw-r--r--net/bluetooth/hci_debugfs.c123
-rw-r--r--net/bluetooth/hci_debugfs.h5
-rw-r--r--net/bluetooth/hci_event.c135
-rw-r--r--net/bluetooth/hci_request.c478
-rw-r--r--net/bluetooth/hci_request.h25
-rw-r--r--net/bluetooth/hci_sock.c214
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bluetooth/l2cap_sock.c10
-rw-r--r--net/bluetooth/mgmt.c445
-rw-r--r--net/bluetooth/msft.c172
-rw-r--r--net/bluetooth/msft.h9
-rw-r--r--net/bluetooth/rfcomm/core.c50
-rw-r--r--net/bluetooth/rfcomm/sock.c46
-rw-r--r--net/bluetooth/sco.c209
-rw-r--r--net/bpf/test_run.c20
-rw-r--r--net/bridge/br.c4
-rw-r--r--net/bridge/br_fdb.c6
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_ioctl.c10
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c6
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netlink.c7
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c13
-rw-r--r--net/bridge/netfilter/ebtable_nat.c12
-rw-r--r--net/bridge/netfilter/ebtables.c10
-rw-r--r--net/can/isotp.c51
-rw-r--r--net/can/j1939/j1939-priv.h1
-rw-r--r--net/can/j1939/main.c7
-rw-r--r--net/can/j1939/transport.c14
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c101
-rw-r--r--net/core/dev_addr_lists.c6
-rw-r--r--net/core/devlink.c416
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/gen_estimator.c52
-rw-r--r--net/core/gen_stats.c186
-rw-r--r--net/core/neighbour.c204
-rw-r--r--net/core/net-procfs.c24
-rw-r--r--net/core/net-sysfs.c57
-rw-r--r--net/core/of_net.c (renamed from drivers/of/of_net.c)25
-rw-r--r--net/core/page_pool.c10
-rw-r--r--net/core/rtnetlink.c7
-rw-r--r--net/core/sock.c187
-rw-r--r--net/core/stream.c5
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/proto.c14
-rw-r--r--net/dsa/Kconfig25
-rw-r--r--net/dsa/Makefile3
-rw-r--r--net/dsa/dsa.c22
-rw-r--r--net/dsa/dsa2.c184
-rw-r--r--net/dsa/port.c21
-rw-r--r--net/dsa/slave.c40
-rw-r--r--net/dsa/switch.c171
-rw-r--r--net/dsa/tag_8021q.c114
-rw-r--r--net/dsa/tag_dsa.c30
-rw-r--r--net/dsa/tag_ksz.c1
-rw-r--r--net/dsa/tag_ocelot.c42
-rw-r--r--net/dsa/tag_ocelot_8021q.c44
-rw-r--r--net/dsa/tag_rtl8_4.c178
-rw-r--r--net/dsa/tag_sja1105.c52
-rw-r--r--net/ethernet/eth.c102
-rw-r--r--net/ethtool/Makefile2
-rw-r--r--net/ethtool/ioctl.c12
-rw-r--r--net/ethtool/module.c180
-rw-r--r--net/ethtool/netlink.c19
-rw-r--r--net/ethtool/netlink.h4
-rw-r--r--net/hsr/hsr_device.c2
-rw-r--r--net/hsr/hsr_main.c2
-rw-r--r--net/ieee802154/6lowpan/core.c2
-rw-r--r--net/ipv4/af_inet.c12
-rw-r--r--net/ipv4/datagram.c1
-rw-r--r--net/ipv4/fib_notifier.c1
-rw-r--r--net/ipv4/fib_semantics.c16
-rw-r--r--net/ipv4/icmp.c23
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/inet_hashtables.c6
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/arptable_filter.c10
-rw-r--r--net/ipv4/netfilter/ip_tables.c7
-rw-r--r--net/ipv4/netfilter/iptable_filter.c9
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c8
-rw-r--r--net/ipv4/netfilter/iptable_nat.c15
-rw-r--r--net/ipv4/netfilter/iptable_raw.c12
-rw-r--r--net/ipv4/netfilter/iptable_security.c9
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c30
-rw-r--r--net/ipv4/nexthop.c21
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c12
-rw-r--r--net/ipv4/tcp.c76
-rw-r--r--net/ipv4/tcp_input.c37
-rw-r--r--net/ipv4/tcp_ipv4.c51
-rw-r--r--net/ipv4/tcp_nv.c1
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/ipv4/tcp_rate.c6
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/Kconfig6
-rw-r--r--net/ipv6/Makefile11
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/ila/ila_xlat.c6
-rw-r--r--net/ipv6/inet6_hashtables.c2
-rw-r--r--net/ipv6/ioam6.c81
-rw-r--r--net/ipv6/ioam6_iptunnel.c306
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6_vti.c2
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c7
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c48
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c10
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c8
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c15
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c10
-rw-r--r--net/ipv6/netfilter/ip6table_security.c9
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c25
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/seg6.c8
-rw-r--r--net/ipv6/seg6_hmac.c4
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/ipv6/tcp_ipv6.c21
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/llc/llc_c_ac.c2
-rw-r--r--net/llc/llc_if.c2
-rw-r--r--net/llc/llc_output.c2
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c5
-rw-r--r--net/mac80211/mesh_ps.c3
-rw-r--r--net/mac80211/rate.c4
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/tx.c12
-rw-r--r--net/mac80211/wpa.c6
-rw-r--r--net/mac802154/iface.c17
-rw-r--r--net/mctp/Kconfig5
-rw-r--r--net/mctp/Makefile3
-rw-r--r--net/mctp/af_mctp.c66
-rw-r--r--net/mctp/device.c53
-rw-r--r--net/mctp/neigh.c4
-rw-r--r--net/mctp/route.c199
-rw-r--r--net/mctp/test/route-test.c544
-rw-r--r--net/mctp/test/utils.c67
-rw-r--r--net/mctp/test/utils.h20
-rw-r--r--net/mptcp/mib.c17
-rw-r--r--net/mptcp/mptcp_diag.c2
-rw-r--r--net/mptcp/options.c15
-rw-r--r--net/mptcp/pm_netlink.c13
-rw-r--r--net/mptcp/protocol.c269
-rw-r--r--net/mptcp/protocol.h6
-rw-r--r--net/mptcp/sockopt.c3
-rw-r--r--net/mptcp/subflow.c2
-rw-r--r--net/mptcp/syncookies.c13
-rw-r--r--net/mptcp/token.c11
-rw-r--r--net/mptcp/token_test.c14
-rw-r--r--net/netfilter/Kconfig13
-rw-r--r--net/netfilter/core.c38
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h4
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c166
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c5
-rw-r--r--net/netfilter/nf_conntrack_core.c154
-rw-r--r--net/netfilter/nf_nat_core.c17
-rw-r--r--net/netfilter/nf_nat_masquerade.c168
-rw-r--r--net/netfilter/nf_tables_api.c121
-rw-r--r--net/netfilter/nfnetlink_hook.c16
-rw-r--r--net/netfilter/nft_chain_filter.c13
-rw-r--r--net/netfilter/nft_compat.c17
-rw-r--r--net/netfilter/nft_dynset.c11
-rw-r--r--net/netfilter/nft_quota.c2
-rw-r--r--net/netfilter/xt_IDLETIMER.c2
-rw-r--r--net/netfilter/xt_LOG.c10
-rw-r--r--net/netfilter/xt_NFLOG.c10
-rw-r--r--net/netfilter/xt_RATEEST.c7
-rw-r--r--net/netlink/af_netlink.c37
-rw-r--r--net/netrom/af_netrom.c4
-rw-r--r--net/netrom/nr_dev.c8
-rw-r--r--net/netrom/nr_route.c4
-rw-r--r--net/nfc/af_nfc.c3
-rw-r--r--net/nfc/digital_core.c9
-rw-r--r--net/nfc/digital_technology.c8
-rw-r--r--net/nfc/hci/command.c16
-rw-r--r--net/nfc/hci/llc_shdlc.c12
-rw-r--r--net/nfc/llcp_commands.c8
-rw-r--r--net/nfc/llcp_core.c5
-rw-r--r--net/nfc/nci/core.c4
-rw-r--r--net/nfc/nci/hci.c4
-rw-r--r--net/nfc/nci/ntf.c9
-rw-r--r--net/nfc/nci/rsp.c2
-rw-r--r--net/nfc/nci/uart.c16
-rw-r--r--net/packet/af_packet.c35
-rw-r--r--net/qrtr/Makefile3
-rw-r--r--net/qrtr/af_qrtr.c (renamed from net/qrtr/qrtr.c)0
-rw-r--r--net/rose/af_rose.c5
-rw-r--r--net/rose/rose_dev.c8
-rw-r--r--net/rose/rose_link.c8
-rw-r--r--net/rose/rose_route.c10
-rw-r--r--net/rxrpc/rtt.c2
-rw-r--r--net/sched/act_api.c21
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_ct.c2
-rw-r--r--net/sched/act_ife.c4
-rw-r--r--net/sched/act_mpls.c2
-rw-r--r--net/sched/act_police.c4
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c2
-rw-r--r--net/sched/act_skbmod.c2
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sched/sch_api.c31
-rw-r--r--net/sched/sch_atm.c6
-rw-r--r--net/sched/sch_cbq.c15
-rw-r--r--net/sched/sch_drr.c13
-rw-r--r--net/sched/sch_ets.c17
-rw-r--r--net/sched/sch_fifo.c3
-rw-r--r--net/sched/sch_fq_codel.c20
-rw-r--r--net/sched/sch_generic.c13
-rw-r--r--net/sched/sch_gred.c15
-rw-r--r--net/sched/sch_hfsc.c11
-rw-r--r--net/sched/sch_htb.c43
-rw-r--r--net/sched/sch_mq.c30
-rw-r--r--net/sched/sch_mqprio.c57
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_prio.c4
-rw-r--r--net/sched/sch_qfq.c13
-rw-r--r--net/sched/sch_taprio.c6
-rw-r--r--net/sched/sch_tbf.c16
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/smc/af_smc.c431
-rw-r--r--net/smc/smc.h20
-rw-r--r--net/smc/smc_cdc.c7
-rw-r--r--net/smc/smc_clc.c150
-rw-r--r--net/smc/smc_clc.h55
-rw-r--r--net/smc/smc_core.c195
-rw-r--r--net/smc/smc_core.h50
-rw-r--r--net/smc/smc_ib.c160
-rw-r--r--net/smc/smc_ib.h16
-rw-r--r--net/smc/smc_llc.c684
-rw-r--r--net/smc/smc_llc.h12
-rw-r--r--net/smc/smc_pnet.c41
-rw-r--r--net/smc/smc_tx.c22
-rw-r--r--net/smc/smc_wr.c237
-rw-r--r--net/smc/smc_wr.h22
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/tipc/bearer.c4
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/eth_media.c2
-rw-r--r--net/tipc/ib_media.c2
-rw-r--r--net/tls/tls_sw.c20
-rw-r--r--net/unix/af_unix.c94
-rw-r--r--net/vmw_vsock/af_vsock.c80
-rw-r--r--net/xdp/xsk.c15
-rw-r--r--net/xdp/xsk_buff_pool.c132
-rw-r--r--net/xdp/xsk_queue.h12
-rw-r--r--net/xfrm/xfrm_user.c67
-rw-r--r--samples/bpf/Makefile17
-rw-r--r--samples/bpf/bpf_insn.h2
-rw-r--r--samples/bpf/xdp_redirect_map_multi.bpf.c5
-rw-r--r--samples/bpf/xdp_router_ipv4_user.c39
-rw-r--r--scripts/Makefile.clang5
-rw-r--r--scripts/Makefile.gcc-plugins4
-rw-r--r--scripts/Makefile.kasan3
-rw-r--r--scripts/Makefile.modpost2
-rwxr-xr-xscripts/checkkconfigsymbols.py11
-rwxr-xr-xscripts/checksyscalls.sh6
-rwxr-xr-xscripts/clang-tools/gen_compile_commands.py1
-rwxr-xr-xscripts/recordmcount.pl2
-rw-r--r--scripts/sorttable.c4
-rw-r--r--security/keys/process_keys.c8
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--security/selinux/nlmsgtab.c4
-rw-r--r--security/smack/smack_lsm.c4
-rw-r--r--sound/core/pcm_compat.c72
-rw-r--r--sound/core/rawmidi.c9
-rw-r--r--sound/core/seq_device.c8
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c2
-rw-r--r--sound/firewire/motu/amdtp-motu.c7
-rw-r--r--sound/firewire/oxfw/oxfw.c13
-rw-r--r--sound/hda/hdac_controller.c5
-rw-r--r--sound/pci/hda/hda_bind.c20
-rw-r--r--sound/pci/hda/hda_codec.c1
-rw-r--r--sound/pci/hda/hda_controller.c24
-rw-r--r--sound/pci/hda/hda_controller.h2
-rw-r--r--sound/pci/hda/hda_intel.c41
-rw-r--r--sound/pci/hda/hda_intel.h4
-rw-r--r--sound/pci/hda/patch_cs8409.c3
-rw-r--r--sound/pci/hda/patch_realtek.c238
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c2
-rw-r--r--sound/soc/codecs/Kconfig1
-rw-r--r--sound/soc/codecs/cs42l42.c16
-rw-r--r--sound/soc/codecs/cs4341.c7
-rw-r--r--sound/soc/codecs/nau8824.c4
-rw-r--r--sound/soc/codecs/pcm179x-spi.c1
-rw-r--r--sound/soc/codecs/pcm512x.c2
-rw-r--r--sound/soc/codecs/wcd938x.c6
-rw-r--r--sound/soc/codecs/wm8960.c13
-rw-r--r--sound/soc/fsl/fsl_esai.c16
-rw-r--r--sound/soc/fsl/fsl_micfil.c15
-rw-r--r--sound/soc/fsl/fsl_sai.c14
-rw-r--r--sound/soc/fsl/fsl_spdif.c14
-rw-r--r--sound/soc/fsl/fsl_xcvr.c32
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c37
-rw-r--r--sound/soc/intel/boards/sof_sdw.c5
-rw-r--r--sound/soc/mediatek/Kconfig3
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.c19
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c7
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/soc-dapm.c13
-rw-r--r--sound/soc/sof/core.c4
-rw-r--r--sound/soc/sof/imx/imx8.c9
-rw-r--r--sound/soc/sof/imx/imx8m.c9
-rw-r--r--sound/soc/sof/loader.c8
-rw-r--r--sound/soc/sof/trace.c1
-rw-r--r--sound/soc/sof/xtensa/core.c4
-rw-r--r--sound/usb/card.c18
-rw-r--r--sound/usb/mixer.c33
-rw-r--r--sound/usb/mixer.h3
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--sound/usb/mixer_scarlett_gen2.c2
-rw-r--r--sound/usb/quirks-table.h74
-rw-r--r--sound/usb/quirks.c11
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_32.h (renamed from tools/arch/x86/include/asm/unistd_32.h)0
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_64.h (renamed from tools/arch/x86/include/asm/unistd_64.h)3
-rw-r--r--tools/arch/x86/lib/insn.c4
-rw-r--r--tools/bpf/bpftool/feature.c1
-rw-r--r--tools/bpf/bpftool/gen.c5
-rw-r--r--tools/include/uapi/linux/bpf.h16
-rw-r--r--tools/include/uapi/sound/asound.h1
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat2
-rw-r--r--tools/lib/bpf/bpf_helpers.h51
-rw-r--r--tools/lib/bpf/gen_loader.c7
-rw-r--r--tools/lib/bpf/libbpf.c849
-rw-r--r--tools/lib/bpf/libbpf.h67
-rw-r--r--tools/lib/bpf/libbpf_internal.h7
-rw-r--r--tools/lib/bpf/libbpf_legacy.h9
-rw-r--r--tools/lib/bpf/linker.c8
-rw-r--r--tools/lib/bpf/skel_internal.h6
-rw-r--r--tools/lib/bpf/strset.c1
-rw-r--r--tools/lib/perf/evsel.c64
-rw-r--r--tools/lib/perf/tests/test-evlist.c6
-rw-r--r--tools/lib/perf/tests/test-evsel.c7
-rw-r--r--tools/objtool/arch/x86/decode.c2
-rw-r--r--tools/objtool/check.c16
-rw-r--r--tools/objtool/elf.c70
-rw-r--r--tools/objtool/include/objtool/elf.h1
-rw-r--r--tools/objtool/orc_gen.c2
-rw-r--r--tools/objtool/special.c22
-rw-r--r--tools/perf/Documentation/jitdump-specification.txt2
-rw-r--r--tools/perf/Documentation/perf-c2c.txt2
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt2
-rw-r--r--tools/perf/Documentation/perf-lock.txt2
-rw-r--r--tools/perf/Documentation/perf-script-perl.txt2
-rw-r--r--tools/perf/Documentation/perf-script-python.txt2
-rw-r--r--tools/perf/Documentation/perf-stat.txt2
-rw-r--r--tools/perf/Documentation/topdown.txt2
-rw-r--r--tools/perf/Makefile.config2
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c8
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c24
-rw-r--r--tools/perf/arch/arm/util/perf_regs.c2
-rw-r--r--tools/perf/arch/arm/util/pmu.c2
-rw-r--r--tools/perf/arch/arm/util/unwind-libdw.c6
-rw-r--r--tools/perf/arch/arm/util/unwind-libunwind.c4
-rw-r--r--tools/perf/arch/x86/util/iostat.c2
-rw-r--r--tools/perf/builtin-script.c24
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json2
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rw-r--r--tools/perf/tests/attr/test-stat-default97
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-1113
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-2137
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-3145
-rw-r--r--tools/perf/tests/code-reading.c4
-rw-r--r--tools/perf/tests/dwarf-unwind.c39
-rw-r--r--tools/perf/ui/browser.c33
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/ui/browsers/annotate.c24
-rw-r--r--tools/perf/util/bpf-event.c3
-rw-r--r--tools/perf/util/config.c2
-rw-r--r--tools/perf/util/machine.c1
-rw-r--r--tools/perf/util/session.c4
-rwxr-xr-xtools/testing/kunit/kunit.py24
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py8
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals_utils.c7
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rw-r--r--tools/testing/selftests/bpf/README.rst13
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c27
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_user.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_multi.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c58
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_printk.c24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_vprintk.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdpwall.c15
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c3
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c4
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c4
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_array_map_elem.c2
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c4
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c4
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c2
-rw-r--r--tools/testing/selftests/bpf/progs/perf_event_stackmap.c4
-rw-r--r--tools/testing/selftests/bpf/progs/skb_pkt_end.c2
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c12
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_multi.c5
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall1.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall2.c23
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall3.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall4.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall5.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall6.c6
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c11
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c15
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_map_in_map.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_cgroup_link.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_check_mtu.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_data.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func1.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func3.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func5.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func6.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func7.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_buffer.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_md_access.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup.c44
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_update.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_bpf.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_peer.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_link.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_loop.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c4
-rw-r--r--tools/testing/selftests/bpf/progs/trace_vprintk.c33
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_dummy.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c4
-rw-r--r--tools/testing/selftests/bpf/progs/xdping_kern.c4
-rw-r--r--tools/testing/selftests/bpf/progs/xdpwall.c365
-rw-r--r--tools/testing/selftests/bpf/test_bpftool.py22
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh13
-rwxr-xr-xtools/testing/selftests/bpf/test_tcp_check_syncookie.sh4
-rwxr-xr-xtools/testing/selftests/bpf/test_tunnel.sh5
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_meta.sh5
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect.sh4
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect_multi.sh2
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_veth.sh4
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan.sh7
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c161
-rw-r--r--tools/testing/selftests/bpf/xdping.c5
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.c133
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.h11
-rw-r--r--tools/testing/selftests/drivers/dma-buf/udmabuf.c5
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh50
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_offload.sh276
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh127
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh64
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/devlink_trap_tunnel_ipip6.sh250
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh52
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc54
-rw-r--r--tools/testing/selftests/kvm/.gitignore2
-rw-r--r--tools/testing/selftests/kvm/Makefile4
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c6
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c15
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c62
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h7
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h34
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c7
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c39
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c286
-rw-r--r--tools/testing/selftests/kvm/steal_time.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmio_warning_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c128
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c15
-rw-r--r--tools/testing/selftests/lib.mk1
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile5
-rw-r--r--tools/testing/selftests/net/af_unix/test_unix_oob.c5
-rw-r--r--tools/testing/selftests/net/config1
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh60
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh1
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile1
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample6
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh172
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat_key.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier_key.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh65
-rw-r--r--tools/testing/selftests/net/forwarding/ip6gre_lib.sh438
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh8
-rw-r--r--tools/testing/selftests/net/forwarding/tc_common.sh10
-rwxr-xr-xtools/testing/selftests/net/ioam6.sh228
-rw-r--r--tools/testing/selftests/net/ioam6_parser.c164
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh6
-rw-r--r--tools/testing/selftests/net/nettest.c28
-rw-r--r--tools/testing/selftests/net/tls.c28
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh1
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh145
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat_zones.sh309
-rwxr-xr-xtools/testing/selftests/netfilter/nft_zones_many.sh156
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall-asm.S37
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall.c36
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c23
-rw-r--r--tools/testing/vsock/vsock_diag_test.c2
-rw-r--r--tools/usb/testusb.c14
-rw-r--r--tools/vm/page-types.c2
-rw-r--r--virt/kvm/kvm_main.c68
2806 files changed, 84099 insertions, 35429 deletions
diff --git a/.mailmap b/.mailmap
index 6e849110cb4e..90e614d2bf7e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -33,6 +33,8 @@ Al Viro <[email protected]>
Andreas Herrmann <[email protected]>
+Andrej Shadura <[email protected]>
Andrew Morton <[email protected]>
diff --git a/CREDITS b/CREDITS
index 7ef7b136e71d..d8f63e8329e8 100644
--- a/CREDITS
+++ b/CREDITS
@@ -971,6 +971,7 @@ D: PowerPC
N: Daniel Drake
D: USBAT02 CompactFlash support in usb-storage
+D: ZD1211RW wireless driver
S: UK
N: Oleg Drokin
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index babbe04c8d37..4d8c27eca96b 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1226,7 +1226,7 @@ PAGE_SIZE multiple when read back.
Note that all fields in this file are hierarchical and the
file modified event can be generated due to an event down the
- hierarchy. For for the local events at the cgroup level see
+ hierarchy. For the local events at the cgroup level see
memory.events.local.
low
@@ -2170,19 +2170,19 @@ existing device files.
Cgroup v2 device controller has no interface files and is implemented
on top of cgroup BPF. To control access to device files, a user may
-create bpf programs of the BPF_CGROUP_DEVICE type and attach them
-to cgroups. On an attempt to access a device file, corresponding
-BPF programs will be executed, and depending on the return value
-the attempt will succeed or fail with -EPERM.
-
-A BPF_CGROUP_DEVICE program takes a pointer to the bpf_cgroup_dev_ctx
-structure, which describes the device access attempt: access type
-(mknod/read/write) and device (type, major and minor numbers).
-If the program returns 0, the attempt fails with -EPERM, otherwise
-it succeeds.
-
-An example of BPF_CGROUP_DEVICE program may be found in the kernel
-source tree in the tools/testing/selftests/bpf/progs/dev_cgroup.c file.
+create bpf programs of type BPF_PROG_TYPE_CGROUP_DEVICE and attach
+them to cgroups with BPF_CGROUP_DEVICE flag. On an attempt to access a
+device file, corresponding BPF programs will be executed, and depending
+on the return value the attempt will succeed or fail with -EPERM.
+
+A BPF_PROG_TYPE_CGROUP_DEVICE program takes a pointer to the
+bpf_cgroup_dev_ctx structure, which describes the device access attempt:
+access type (mknod/read/write) and device (type, major and minor numbers).
+If the program returns 0, the attempt fails with -EPERM, otherwise it
+succeeds.
+
+An example of BPF_PROG_TYPE_CGROUP_DEVICE program may be found in
+tools/testing/selftests/bpf/progs/dev_cgroup.c in the kernel source tree.
RDMA
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 91ba391f9b32..43dc35fe5bc0 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1266,7 +1266,7 @@
The VGA and EFI output is eventually overwritten by
the real console.
- The xen output can only be used by Xen PV guests.
+ The xen option can only be used in Xen domains.
The sclp output can only be used on s390.
diff --git a/Documentation/bpf/bpf_licensing.rst b/Documentation/bpf/bpf_licensing.rst
new file mode 100644
index 000000000000..b19c433f41d2
--- /dev/null
+++ b/Documentation/bpf/bpf_licensing.rst
@@ -0,0 +1,92 @@
+=============
+BPF licensing
+=============
+
+Background
+==========
+
+* Classic BPF was BSD licensed
+
+"BPF" was originally introduced as BSD Packet Filter in
+http://www.tcpdump.org/papers/bpf-usenix93.pdf. The corresponding instruction
+set and its implementation came from BSD with BSD license. That original
+instruction set is now known as "classic BPF".
+
+However an instruction set is a specification for machine-language interaction,
+similar to a programming language. It is not a code. Therefore, the
+application of a BSD license may be misleading in a certain context, as the
+instruction set may enjoy no copyright protection.
+
+* eBPF (extended BPF) instruction set continues to be BSD
+
+In 2014, the classic BPF instruction set was significantly extended. We
+typically refer to this instruction set as eBPF to disambiguate it from cBPF.
+The eBPF instruction set is still BSD licensed.
+
+Implementations of eBPF
+=======================
+
+Using the eBPF instruction set requires implementing code in both kernel space
+and user space.
+
+In Linux Kernel
+---------------
+
+The reference implementations of the eBPF interpreter and various just-in-time
+compilers are part of Linux and are GPLv2 licensed. The implementation of
+eBPF helper functions is also GPLv2 licensed. Interpreters, JITs, helpers,
+and verifiers are called eBPF runtime.
+
+In User Space
+-------------
+
+There are also implementations of eBPF runtime (interpreter, JITs, helper
+functions) under
+Apache2 (https://github.com/iovisor/ubpf),
+MIT (https://github.com/qmonnet/rbpf), and
+BSD (https://github.com/DPDK/dpdk/blob/main/lib/librte_bpf).
+
+In HW
+-----
+
+The HW can choose to execute eBPF instruction natively and provide eBPF runtime
+in HW or via the use of implementing firmware with a proprietary license.
+
+In other operating systems
+--------------------------
+
+Other kernels or user space implementations of eBPF instruction set and runtime
+can have proprietary licenses.
+
+Using BPF programs in the Linux kernel
+======================================
+
+Linux Kernel (while being GPLv2) allows linking of proprietary kernel modules
+under these rules:
+Documentation/process/license-rules.rst
+
+When a kernel module is loaded, the linux kernel checks which functions it
+intends to use. If any function is marked as "GPL only," the corresponding
+module or program has to have GPL compatible license.
+
+Loading BPF program into the Linux kernel is similar to loading a kernel
+module. BPF is loaded at run time and not statically linked to the Linux
+kernel. BPF program loading follows the same license checking rules as kernel
+modules. BPF programs can be proprietary if they don't use "GPL only" BPF
+helper functions.
+
+Further, some BPF program types - Linux Security Modules (LSM) and TCP
+Congestion Control (struct_ops), as of Aug 2021 - are required to be GPL
+compatible even if they don't use "GPL only" helper functions directly. The
+registration step of LSM and TCP congestion control modules of the Linux
+kernel is done through EXPORT_SYMBOL_GPL kernel functions. In that sense LSM
+and struct_ops BPF programs are implicitly calling "GPL only" functions.
+The same restriction applies to BPF programs that call kernel functions
+directly via unstable interface also known as "kfunc".
+
+Packaging BPF programs with user space applications
+====================================================
+
+Generally, proprietary-licensed applications and GPL licensed BPF programs
+written for the Linux kernel in the same package can co-exist because they are
+separate executable processes. This applies to both cBPF and eBPF programs.
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 1ceb5d704a97..37f273a7e8b6 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -82,6 +82,15 @@ Testing and debugging BPF
s390
+Licensing
+=========
+
+.. toctree::
+ :maxdepth: 1
+
+ bpf_licensing
+
+
Other
=====
diff --git a/Documentation/core-api/irq/irq-domain.rst b/Documentation/core-api/irq/irq-domain.rst
index 6979b4af2c1f..9c0e8758037a 100644
--- a/Documentation/core-api/irq/irq-domain.rst
+++ b/Documentation/core-api/irq/irq-domain.rst
@@ -175,9 +175,10 @@ for IRQ numbers that are passed to struct device registrations. In that
case the Linux IRQ numbers cannot be dynamically assigned and the legacy
mapping should be used.
-As the name implies, the *_legacy() functions are deprecated and only
+As the name implies, the \*_legacy() functions are deprecated and only
exist to ease the support of ancient platforms. No new users should be
-added.
+added. Same goes for the \*_simple() functions when their use results
+in the legacy behaviour.
The legacy map assumes a contiguous range of IRQ numbers has already
been allocated for the controller and that the IRQ number can be
diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml
index b962fa6d649c..d79d36ac0c44 100644
--- a/Documentation/devicetree/bindings/arm/tegra.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra.yaml
@@ -54,7 +54,7 @@ properties:
- const: toradex,apalis_t30
- const: nvidia,tegra30
- items:
- - const: toradex,apalis_t30-eval-v1.1
+ - const: toradex,apalis_t30-v1.1-eval
- const: toradex,apalis_t30-eval
- const: toradex,apalis_t30-v1.1
- const: toradex,apalis_t30
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
index 07b20383cbca..b446d0f0f1b4 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
@@ -50,7 +50,6 @@ properties:
data-lanes:
description: array of physical DSI data lane indexes.
minItems: 1
- maxItems: 4
items:
- const: 1
- const: 2
@@ -71,7 +70,6 @@ properties:
data-lanes:
description: array of physical DSI data lane indexes.
minItems: 1
- maxItems: 4
items:
- const: 1
- const: 2
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
index 1c2daf7c24cc..911564468c5e 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
@@ -18,7 +18,7 @@ properties:
const: ti,sn65dsi86
reg:
- const: 0x2d
+ enum: [ 0x2c, 0x2d ]
enable-gpios:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index fbb59c9ddda6..78044c340e20 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -9,7 +9,7 @@ function block.
All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
For a description of the MMSYS_CONFIG binding, see
-Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
+Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
DISP function blocks
====================
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
index 2ed010f91e2d..20ce88ab4b3a 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
@@ -22,7 +22,7 @@ properties:
items:
- enum:
# ili9341 240*320 Color on stm32f429-disco board
- - st,sf-tc240t-9370-t
+ - st,sf-tc240t-9370-t
- const: ilitek,ili9341
reg: true
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
index 29de7807df54..bcd41e491f1d 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
@@ -31,11 +31,11 @@ properties:
clocks:
minItems: 1
- maxItems: 3
+ maxItems: 7
clock-names:
minItems: 1
- maxItems: 3
+ maxItems: 7
required:
- compatible
@@ -72,6 +72,32 @@ allOf:
contains:
enum:
- qcom,sdm660-a2noc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Bus Clock.
+ - description: Bus A Clock.
+ - description: IPA Clock.
+ - description: UFS AXI Clock.
+ - description: Aggregate2 UFS AXI Clock.
+ - description: Aggregate2 USB3 AXI Clock.
+ - description: Config NoC USB2 AXI Clock.
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+ - const: ipa
+ - const: ufs_axi
+ - const: aggre2_ufs_axi
+ - const: aggre2_usb3_axi
+ - const: cfg_noc_usb2_axi
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sdm660-bimc
- qcom,sdm660-cnoc
- qcom,sdm660-gnoc
@@ -91,6 +117,7 @@ examples:
- |
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/clock/qcom,mmcc-sdm660.h>
+ #include <dt-bindings/clock/qcom,gcc-sdm660.h>
bimc: interconnect@1008000 {
compatible = "qcom,sdm660-bimc";
@@ -123,9 +150,20 @@ examples:
compatible = "qcom,sdm660-a2noc";
reg = <0x01704000 0xc100>;
#interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
+ clock-names = "bus",
+ "bus_a",
+ "ipa",
+ "ufs_axi",
+ "aggre2_ufs_axi",
+ "aggre2_usb3_axi",
+ "cfg_noc_usb2_axi";
clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
- <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
+ <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
+ <&rpmcc RPM_SMD_IPA_CLK>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_CFG_NOC_USB2_AXI_CLK>;
};
mnoc: interconnect@1745000 {
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
index 3e5d82df90a2..a2abed06a099 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
@@ -31,7 +31,7 @@ properties:
maxItems: 1
port:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
properties:
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
index ad42992c6da3..bf115ab9d926 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
@@ -38,7 +38,7 @@ properties:
port:
additionalProperties: false
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
properties:
endpoint:
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
index 881f79532501..cf2ca2702cc9 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
@@ -38,7 +38,7 @@ properties:
port:
additionalProperties: false
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
properties:
endpoint:
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
index 1edeabf39e6a..afcf70947f7e 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
@@ -38,7 +38,7 @@ properties:
port:
additionalProperties: false
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
properties:
endpoint:
diff --git a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
index e6c9a2f77cc7..f300ced4cdf3 100644
--- a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
@@ -20,9 +20,7 @@ properties:
- snps,dwcmshc-sdhci
reg:
- minItems: 1
- items:
- - description: Offset and length of the register set for the device
+ maxItems: 1
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/asix,ax88796c.yaml b/Documentation/devicetree/bindings/net/asix,ax88796c.yaml
new file mode 100644
index 000000000000..699ebf452479
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/asix,ax88796c.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/asix,ax88796c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ASIX AX88796C SPI Ethernet Adapter
+
+maintainers:
+ - Łukasz Stelmach <[email protected]>
+
+description: |
+ ASIX AX88796C is an Ethernet controller with a built in PHY. This
+ describes SPI mode of the chip.
+
+ The node for this driver must be a child node of an SPI controller,
+ hence all mandatory properties described in
+ ../spi/spi-controller.yaml must be specified.
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ const: asix,ax88796c
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 40000000
+
+ interrupts:
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ A GPIO line handling reset of the chip. As the line is active low,
+ it should be marked GPIO_ACTIVE_LOW.
+ maxItems: 1
+
+ local-mac-address: true
+
+ mac-address: true
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+ - interrupts
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ # Artik5 eval board
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet@0 {
+ compatible = "asix,ax88796c";
+ reg = <0x0>;
+ local-mac-address = [00 00 00 00 00 00]; /* Filled in by a bootloader */
+ interrupt-parent = <&gpx2>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ spi-max-frequency = <40000000>;
+ reset-gpios = <&gpe0 2 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.yaml b/Documentation/devicetree/bindings/net/dsa/dsa.yaml
index 16aa192c118e..2ad7f79ad371 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.yaml
@@ -46,6 +46,9 @@ patternProperties:
type: object
description: Ethernet switch ports
+ allOf:
+ - $ref: "http://devicetree.org/schemas/net/ethernet-controller.yaml#"
+
properties:
reg:
description: Port number
@@ -73,11 +76,14 @@ patternProperties:
dsa-tag-protocol:
description:
Instead of the default, the switch will use this tag protocol if
- possible. Useful when a device supports multiple protcols and
+ possible. Useful when a device supports multiple protocols and
the default is incompatible with the Ethernet device.
enum:
- dsa
- edsa
+ - ocelot
+ - ocelot-8021q
+ - seville
phy-handle: true
@@ -91,6 +97,10 @@ patternProperties:
managed: true
+ rx-internal-delay-ps: true
+
+ tx-internal-delay-ps: true
+
required:
- reg
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 30c11fea491b..2363b412410c 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -83,7 +83,7 @@ Example:
#interrupt-cells = <2>;
switch0: switch@0 {
- compatible = "marvell,mv88e6390";
+ compatible = "marvell,mv88e6190";
reg = <0>;
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
index f978f8719d8e..24cd733c11d1 100644
--- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
@@ -74,10 +74,42 @@ properties:
- compatible
- reg
+patternProperties:
+ "^(ethernet-)?ports$":
+ patternProperties:
+ "^(ethernet-)?port@[0-9]+$":
+ allOf:
+ - if:
+ properties:
+ phy-mode:
+ contains:
+ enum:
+ - rgmii
+ - rgmii-rxid
+ - rgmii-txid
+ - rgmii-id
+ then:
+ properties:
+ rx-internal-delay-ps:
+ $ref: "#/$defs/internal-delay-ps"
+ tx-internal-delay-ps:
+ $ref: "#/$defs/internal-delay-ps"
+
required:
- compatible
- reg
+$defs:
+ internal-delay-ps:
+ description:
+ Disable tunable delay lines using 0 ps, or enable them and select
+ the phase between 1640 ps (73.8 degree shift at 1Gbps) and 2260 ps
+ (101.7 degree shift) in increments of 0.9 degrees (20 ps).
+ enum:
+ [0, 1640, 1660, 1680, 1700, 1720, 1740, 1760, 1780, 1800, 1820, 1840,
+ 1860, 1880, 1900, 1920, 1940, 1960, 1980, 2000, 2020, 2040, 2060, 2080,
+ 2100, 2120, 2140, 2160, 2180, 2200, 2220, 2240, 2260]
+
unevaluatedProperties: false
examples:
@@ -97,29 +129,40 @@ examples:
port@0 {
phy-handle = <&rgmii_phy6>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
reg = <0>;
};
port@1 {
phy-handle = <&rgmii_phy3>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
reg = <1>;
};
port@2 {
phy-handle = <&rgmii_phy4>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
reg = <2>;
};
port@3 {
+ phy-handle = <&rgmii_phy4>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
reg = <3>;
};
port@4 {
ethernet = <&enet2>;
phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
reg = <4>;
fixed-link {
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
deleted file mode 100644
index 8c73f67c43ca..000000000000
--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt
+++ /dev/null
@@ -1,215 +0,0 @@
-* Qualcomm Atheros QCA8xxx switch family
-
-Required properties:
-
-- compatible: should be one of:
- "qca,qca8327"
- "qca,qca8334"
- "qca,qca8337"
-
-- #size-cells: must be 0
-- #address-cells: must be 1
-
-Optional properties:
-
-- reset-gpios: GPIO to be used to reset the whole device
-
-Subnodes:
-
-The integrated switch subnode should be specified according to the binding
-described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external
-mdio-bus each subnode describing a port needs to have a valid phandle
-referencing the internal PHY it is connected to. This is because there's no
-N:N mapping of port and PHY id.
-To declare the internal mdio-bus configuration, declare a mdio node in the
-switch node and declare the phandle for the port referencing the internal
-PHY is connected to. In this config a internal mdio-bus is registered and
-the mdio MASTER is used as communication.
-
-Don't use mixed external and internal mdio-bus configurations, as this is
-not supported by the hardware.
-
-The CPU port of this switch is always port 0.
-
-A CPU port node has the following optional node:
-
-- fixed-link : Fixed-link subnode describing a link to a non-MDIO
- managed entity. See
- Documentation/devicetree/bindings/net/fixed-link.txt
- for details.
-
-For QCA8K the 'fixed-link' sub-node supports only the following properties:
-
-- 'speed' (integer, mandatory), to indicate the link speed. Accepted
- values are 10, 100 and 1000
-- 'full-duplex' (boolean, optional), to indicate that full duplex is
- used. When absent, half duplex is assumed.
-
-Examples:
-
-for the external mdio-bus configuration:
-
- &mdio0 {
- phy_port1: phy@0 {
- reg = <0>;
- };
-
- phy_port2: phy@1 {
- reg = <1>;
- };
-
- phy_port3: phy@2 {
- reg = <2>;
- };
-
- phy_port4: phy@3 {
- reg = <3>;
- };
-
- phy_port5: phy@4 {
- reg = <4>;
- };
-
- switch@10 {
- compatible = "qca,qca8337";
- #address-cells = <1>;
- #size-cells = <0>;
-
- reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>;
- reg = <0x10>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- label = "cpu";
- ethernet = <&gmac1>;
- phy-mode = "rgmii";
- fixed-link {
- speed = 1000;
- full-duplex;
- };
- };
-
- port@1 {
- reg = <1>;
- label = "lan1";
- phy-handle = <&phy_port1>;
- };
-
- port@2 {
- reg = <2>;
- label = "lan2";
- phy-handle = <&phy_port2>;
- };
-
- port@3 {
- reg = <3>;
- label = "lan3";
- phy-handle = <&phy_port3>;
- };
-
- port@4 {
- reg = <4>;
- label = "lan4";
- phy-handle = <&phy_port4>;
- };
-
- port@5 {
- reg = <5>;
- label = "wan";
- phy-handle = <&phy_port5>;
- };
- };
- };
- };
-
-for the internal master mdio-bus configuration:
-
- &mdio0 {
- switch@10 {
- compatible = "qca,qca8337";
- #address-cells = <1>;
- #size-cells = <0>;
-
- reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>;
- reg = <0x10>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- label = "cpu";
- ethernet = <&gmac1>;
- phy-mode = "rgmii";
- fixed-link {
- speed = 1000;
- full-duplex;
- };
- };
-
- port@1 {
- reg = <1>;
- label = "lan1";
- phy-mode = "internal";
- phy-handle = <&phy_port1>;
- };
-
- port@2 {
- reg = <2>;
- label = "lan2";
- phy-mode = "internal";
- phy-handle = <&phy_port2>;
- };
-
- port@3 {
- reg = <3>;
- label = "lan3";
- phy-mode = "internal";
- phy-handle = <&phy_port3>;
- };
-
- port@4 {
- reg = <4>;
- label = "lan4";
- phy-mode = "internal";
- phy-handle = <&phy_port4>;
- };
-
- port@5 {
- reg = <5>;
- label = "wan";
- phy-mode = "internal";
- phy-handle = <&phy_port5>;
- };
- };
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy_port1: phy@0 {
- reg = <0>;
- };
-
- phy_port2: phy@1 {
- reg = <1>;
- };
-
- phy_port3: phy@2 {
- reg = <2>;
- };
-
- phy_port4: phy@3 {
- reg = <3>;
- };
-
- phy_port5: phy@4 {
- reg = <4>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
new file mode 100644
index 000000000000..48de0ace265d
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
@@ -0,0 +1,362 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/qca8k.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros QCA83xx switch family
+
+maintainers:
+ - John Crispin <[email protected]>
+
+description:
+ If the QCA8K switch is connect to an SoC's external mdio-bus, each subnode
+ describing a port needs to have a valid phandle referencing the internal PHY
+ it is connected to. This is because there is no N:N mapping of port and PHY
+ ID. To declare the internal mdio-bus configuration, declare an MDIO node in
+ the switch node and declare the phandle for the port, referencing the internal
+ PHY it is connected to. In this config, an internal mdio-bus is registered and
+ the MDIO master is used for communication. Mixed external and internal
+ mdio-bus configurations are not supported by the hardware.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - qca,qca8327
+ - qca,qca8328
+ - qca,qca8334
+ - qca,qca8337
+ description: |
+ qca,qca8328: referenced as AR8328(N)-AK1(A/B) QFN 176 pin package
+ qca,qca8327: referenced as AR8327(N)-AL1A DR-QFN 148 pin package
+ qca,qca8334: referenced as QCA8334-AL3C QFN 88 pin package
+ qca,qca8337: referenced as QCA8337N-AL3(B/C) DR-QFN 148 pin package
+
+ reg:
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ GPIO to be used to reset the whole device
+ maxItems: 1
+
+ qca,ignore-power-on-sel:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Ignore power-on pin strapping to configure LED open-drain or EEPROM
+ presence. This is needed for devices with incorrect configuration or when
+ the OEM has decided not to use pin strapping and falls back to SW regs.
+
+ qca,led-open-drain:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set LEDs to open-drain mode. This requires the qca,ignore-power-on-sel to
+ be set, otherwise the driver will fail at probe. This is required if the
+ OEM does not use pin strapping to set this mode and prefers to set it
+ using SW regs. The pin strappings related to LED open-drain mode are
+ B68 on the QCA832x and B49 on the QCA833x.
+
+ mdio:
+ type: object
+ description: Qca8k switch have an internal mdio to access switch port.
+ If this is not present, the legacy mapping is used and the
+ internal mdio access is used.
+ With the legacy mapping the reg corresponding to the internal
+ mdio is the switch reg with an offset of -1.
+
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ "^(ethernet-)?phy@[0-4]$":
+ type: object
+
+ allOf:
+ - $ref: "http://devicetree.org/schemas/net/mdio.yaml#"
+
+ properties:
+ reg:
+ maxItems: 1
+
+ required:
+ - reg
+
+patternProperties:
+ "^(ethernet-)?ports$":
+ type: object
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ "^(ethernet-)?port@[0-6]$":
+ type: object
+ description: Ethernet switch ports
+
+ properties:
+ reg:
+ description: Port number
+
+ label:
+ description:
+ Describes the label associated with this port, which will become
+ the netdev name
+ $ref: /schemas/types.yaml#/definitions/string
+
+ link:
+ description:
+ Should be a list of phandles to other switch's DSA port. This
+ port is used as the outgoing port towards the phandle ports. The
+ full routing information must be given, not just the one hop
+ routes to neighbouring switches
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+
+ ethernet:
+ description:
+ Should be a phandle to a valid Ethernet device node. This host
+ device is what the switch port is connected to
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ phy-handle: true
+
+ phy-mode: true
+
+ fixed-link: true
+
+ mac-address: true
+
+ sfp: true
+
+ qca,sgmii-rxclk-falling-edge:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set the receive clock phase to falling edge. Mostly commonly used on
+ the QCA8327 with CPU port 0 set to SGMII.
+
+ qca,sgmii-txclk-falling-edge:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set the transmit clock phase to falling edge.
+
+ qca,sgmii-enable-pll:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ For SGMII CPU port, explicitly enable PLL, TX and RX chain along with
+ Signal Detection. On the QCA8327 this should not be enabled, otherwise
+ the SGMII port will not initialize. When used on the QCA8337, revision 3
+ or greater, a warning will be displayed. When the CPU port is set to
+ SGMII on the QCA8337, it is advised to set this unless a communication
+ issue is observed.
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+oneOf:
+ - required:
+ - ports
+ - required:
+ - ethernet-ports
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: true
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ external_phy_port1: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ external_phy_port2: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ external_phy_port3: ethernet-phy@2 {
+ reg = <2>;
+ };
+
+ external_phy_port4: ethernet-phy@3 {
+ reg = <3>;
+ };
+
+ external_phy_port5: ethernet-phy@4 {
+ reg = <4>;
+ };
+
+ switch@10 {
+ compatible = "qca,qca8337";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>;
+ reg = <0x10>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&gmac1>;
+ phy-mode = "rgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-handle = <&external_phy_port1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-handle = <&external_phy_port2>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-handle = <&external_phy_port3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ phy-handle = <&external_phy_port4>;
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "wan";
+ phy-handle = <&external_phy_port5>;
+ };
+ };
+ };
+ };
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@10 {
+ compatible = "qca,qca8337";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>;
+ reg = <0x10>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&gmac1>;
+ phy-mode = "rgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-mode = "internal";
+ phy-handle = <&internal_phy_port1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-mode = "internal";
+ phy-handle = <&internal_phy_port2>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-mode = "internal";
+ phy-handle = <&internal_phy_port3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ phy-mode = "internal";
+ phy-handle = <&internal_phy_port4>;
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "wan";
+ phy-mode = "internal";
+ phy-handle = <&internal_phy_port5>;
+ };
+
+ port@6 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&gmac1>;
+ phy-mode = "sgmii";
+
+ qca,sgmii-rxclk-falling-edge;
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ internal_phy_port1: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ internal_phy_port2: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ internal_phy_port3: ethernet-phy@2 {
+ reg = <2>;
+ };
+
+ internal_phy_port4: ethernet-phy@3 {
+ reg = <3>;
+ };
+
+ internal_phy_port5: ethernet-phy@4 {
+ reg = <4>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
index b6ae8541bd55..7959ec237983 100644
--- a/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
+++ b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
@@ -9,6 +9,7 @@ SMI-based Realtek devices.
Required properties:
- compatible: must be exactly one of:
+ "realtek,rtl8365mb" (4+1 ports)
"realtek,rtl8366"
"realtek,rtl8366rb" (4+1 ports)
"realtek,rtl8366s" (4+1 ports)
@@ -62,6 +63,8 @@ and subnodes of DSA switches.
Examples:
+An example for the RTL8366RB:
+
switch {
compatible = "realtek,rtl8366rb";
/* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */
@@ -151,3 +154,87 @@ switch {
};
};
};
+
+An example for the RTL8365MB-VC:
+
+switch {
+ compatible = "realtek,rtl8365mb";
+ mdc-gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>;
+ mdio-gpios = <&gpio1 17 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+
+ switch_intc: interrupt-controller {
+ interrupt-parent = <&gpio5>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ port@0 {
+ reg = <0>;
+ label = "swp0";
+ phy-handle = <&ethphy0>;
+ };
+ port@1 {
+ reg = <1>;
+ label = "swp1";
+ phy-handle = <&ethphy1>;
+ };
+ port@2 {
+ reg = <2>;
+ label = "swp2";
+ phy-handle = <&ethphy2>;
+ };
+ port@3 {
+ reg = <3>;
+ label = "swp3";
+ phy-handle = <&ethphy3>;
+ };
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&fec1>;
+ phy-mode = "rgmii";
+ tx-internal-delay-ps = <2000>;
+ rx-internal-delay-ps = <2000>;
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+
+ mdio {
+ compatible = "realtek,smi-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: phy@0 {
+ reg = <0>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <0>;
+ };
+ ethphy1: phy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <1>;
+ };
+ ethphy2: phy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <2>;
+ };
+ ethphy3: phy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <3>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
index 5629b2e4ccf8..ee4afe361fac 100644
--- a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
+++ b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
@@ -34,7 +34,6 @@ properties:
clocks:
minItems: 3
- maxItems: 5
items:
- description: MAC host clock
- description: MAC apb clock
diff --git a/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml
index 948677ade6d1..d7748dd33199 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml
@@ -51,6 +51,9 @@ examples:
switch@10 {
compatible = "qca,qca8337";
reg = <0x10>;
- /* ... */
+
+ ports {
+ /* ... */
+ };
};
};
diff --git a/Documentation/devicetree/bindings/net/renesas,ether.yaml b/Documentation/devicetree/bindings/net/renesas,ether.yaml
index c101a1ec846e..06b38c9bc6ec 100644
--- a/Documentation/devicetree/bindings/net/renesas,ether.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,ether.yaml
@@ -100,15 +100,18 @@ additionalProperties: false
examples:
# Lager board
- |
- #include <dt-bindings/clock/r8a7790-clock.h>
- #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+ #include <dt-bindings/gpio/gpio.h>
ethernet@ee700000 {
compatible = "renesas,ether-r8a7790", "renesas,rcar-gen2-ether";
reg = <0xee700000 0x400>;
- interrupt-parent = <&gic>;
- interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
+ interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 813>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 813>;
phy-mode = "rmii";
phy-handle = <&phy1>;
renesas,ether-link-active-low;
@@ -116,8 +119,12 @@ examples:
#size-cells = <0>;
phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-id0022.1537",
+ "ethernet-phy-ieee802.3-c22";
reg = <1>;
interrupt-parent = <&irqc0>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ micrel,led-mode = <1>;
+ reset-gpios = <&gpio5 31 GPIO_ACTIVE_LOW>;
};
};
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index 4c927d2c17d3..bda821065a2b 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -287,6 +287,7 @@ examples:
"ch13", "ch14", "ch15", "ch16", "ch17", "ch18",
"ch19", "ch20", "ch21", "ch22", "ch23", "ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
iommus = <&ipmmu_ds0 16>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
resets = <&cpg 812>;
@@ -298,6 +299,8 @@ examples:
#size-cells = <0>;
phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-id0022.1622",
+ "ethernet-phy-ieee802.3-c22";
rxc-skew-ps = <1500>;
reg = <0>;
interrupt-parent = <&gpio2>;
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 42689b7d03a2..c115c95ee584 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -21,6 +21,7 @@ select:
contains:
enum:
- snps,dwmac
+ - snps,dwmac-3.40a
- snps,dwmac-3.50a
- snps,dwmac-3.610
- snps,dwmac-3.70a
@@ -76,6 +77,7 @@ properties:
- rockchip,rk3399-gmac
- rockchip,rv1108-gmac
- snps,dwmac
+ - snps,dwmac-3.40a
- snps,dwmac-3.50a
- snps,dwmac-3.610
- snps,dwmac-3.70a
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
index 8a03a24a2019..6bc61c42418f 100644
--- a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
+++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
@@ -24,6 +24,7 @@ properties:
- socionext,uniphier-ld11-ave4
- socionext,uniphier-ld20-ave4
- socionext,uniphier-pxs3-ave4
+ - socionext,uniphier-nx1-ave4
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
index 2911e565b260..acea1cd444fd 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
@@ -41,7 +41,6 @@ properties:
- description: builtin MSI controller.
interrupt-names:
- minItems: 1
items:
- const: msi
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
index ca91201a9926..d7e08b03e204 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
@@ -171,7 +171,7 @@ examples:
cs-gpios = <&gpio0 13 0>,
<&gpio0 14 0>;
rx-sample-delay-ns = <3>;
- spi-flash@1 {
+ flash@1 {
compatible = "spi-nand";
reg = <1>;
rx-sample-delay-ns = <7>;
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
new file mode 100644
index 000000000000..b9ca8ef4f2be
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC series UFS host controller Device Tree Bindings
+
+maintainers:
+ - Alim Akhtar <[email protected]>
+
+description: |
+ Each Samsung UFS host controller instance should have its own node.
+ This binding define Samsung specific binding other then what is used
+ in the common ufshcd bindings
+ [1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+
+properties:
+
+ compatible:
+ enum:
+ - samsung,exynos7-ufs
+
+ reg:
+ items:
+ - description: HCI register
+ - description: vendor specific register
+ - description: unipro register
+ - description: UFS protector register
+
+ reg-names:
+ items:
+ - const: hci
+ - const: vs_hci
+ - const: unipro
+ - const: ufsp
+
+ clocks:
+ items:
+ - description: ufs link core clock
+ - description: unipro main clock
+
+ clock-names:
+ items:
+ - const: core_clk
+ - const: sclk_unipro_main
+
+ interrupts:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: ufs-phy
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - phys
+ - phy-names
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/exynos7-clk.h>
+
+ ufs: ufs@15570000 {
+ compatible = "samsung,exynos7-ufs";
+ reg = <0x15570000 0x100>,
+ <0x15570100 0x100>,
+ <0x15571000 0x200>,
+ <0x15572000 0x300>;
+ reg-names = "hci", "vs_hci", "unipro", "ufsp";
+ interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
+ <&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
+ clock-names = "core_clk", "sclk_unipro_main";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+ phys = <&ufs_phy>;
+ phy-names = "ufs-phy";
+ };
+...
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index a867f7102c35..e2eb0c738f3a 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -131,6 +131,8 @@ patternProperties:
description: Asahi Kasei Corp.
"^asc,.*":
description: All Sensors Corporation
+ "^asix,.*":
+ description: ASIX Electronics Corporation
"^aspeed,.*":
description: ASPEED Technology Inc.
"^asus,.*":
diff --git a/Documentation/filesystems/ntfs3.rst b/Documentation/filesystems/ntfs3.rst
index ffe9ea0c1499..d67ccd22c63b 100644
--- a/Documentation/filesystems/ntfs3.rst
+++ b/Documentation/filesystems/ntfs3.rst
@@ -4,103 +4,112 @@
NTFS3
=====
-
Summary and Features
====================
-NTFS3 is fully functional NTFS Read-Write driver. The driver works with
-NTFS versions up to 3.1, normal/compressed/sparse files
-and journal replaying. File system type to use on mount is 'ntfs3'.
+NTFS3 is fully functional NTFS Read-Write driver. The driver works with NTFS
+versions up to 3.1. File system type to use on mount is *ntfs3*.
- This driver implements NTFS read/write support for normal, sparse and
compressed files.
-- Supports native journal replaying;
-- Supports extended attributes
- Predefined extended attributes:
- - 'system.ntfs_security' gets/sets security
- descriptor (SECURITY_DESCRIPTOR_RELATIVE)
- - 'system.ntfs_attrib' gets/sets ntfs file/dir attributes.
- Note: applied to empty files, this allows to switch type between
- sparse(0x200), compressed(0x800) and normal;
+- Supports native journal replaying.
- Supports NFS export of mounted NTFS volumes.
+- Supports extended attributes. Predefined extended attributes:
+
+ - *system.ntfs_security* gets/sets security
+
+ Descriptor: SECURITY_DESCRIPTOR_RELATIVE
+
+ - *system.ntfs_attrib* gets/sets ntfs file/dir attributes.
+
+ Note: Applied to empty files, this allows to switch type between
+ sparse(0x200), compressed(0x800) and normal.
Mount Options
=============
The list below describes mount options supported by NTFS3 driver in addition to
-generic ones.
+generic ones. You can use every mount option with **no** option. If it is in
+this table marked with no it means default is without **no**.
-===============================================================================
+.. flat-table::
+ :widths: 1 5
+ :fill-cells:
-nls=name This option informs the driver how to interpret path
- strings and translate them to Unicode and back. If
- this option is not set, the default codepage will be
- used (CONFIG_NLS_DEFAULT).
- Examples:
- 'nls=utf8'
+ * - iocharset=name
+ - This option informs the driver how to interpret path strings and
+ translate them to Unicode and back. If this option is not set, the
+ default codepage will be used (CONFIG_NLS_DEFAULT).
-uid=
-gid=
-umask= Controls the default permissions for files/directories created
- after the NTFS volume is mounted.
+ Example: iocharset=utf8
-fmask=
-dmask= Instead of specifying umask which applies both to
- files and directories, fmask applies only to files and
- dmask only to directories.
+ * - uid=
+ - :rspan:`1`
+ * - gid=
-nohidden Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN)
- attribute will not be shown under Linux.
+ * - umask=
+ - Controls the default permissions for files/directories created after
+ the NTFS volume is mounted.
-sys_immutable Files with the Windows-specific SYSTEM
- (FILE_ATTRIBUTE_SYSTEM) attribute will be marked as system
- immutable files.
+ * - dmask=
+ - :rspan:`1` Instead of specifying umask which applies both to files and
+ directories, fmask applies only to files and dmask only to directories.
+ * - fmask=
-discard Enable support of the TRIM command for improved performance
- on delete operations, which is recommended for use with the
- solid-state drives (SSD).
+ * - noacsrules
+ - "No access rules" mount option sets access rights for files/folders to
+ 777 and owner/group to root. This mount option absorbs all other
+ permissions.
-force Forces the driver to mount partitions even if 'dirty' flag
- (volume dirty) is set. Not recommended for use.
+ - Permissions change for files/folders will be reported as successful,
+ but they will remain 777.
-sparse Create new files as "sparse".
+ - Owner/group change will be reported as successful, butthey will stay
+ as root.
-showmeta Use this parameter to show all meta-files (System Files) on
- a mounted NTFS partition.
- By default, all meta-files are hidden.
+ * - nohidden
+ - Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN) attribute
+ will not be shown under Linux.
-prealloc Preallocate space for files excessively when file size is
- increasing on writes. Decreases fragmentation in case of
- parallel write operations to different files.
+ * - sys_immutable
+ - Files with the Windows-specific SYSTEM (FILE_ATTRIBUTE_SYSTEM) attribute
+ will be marked as system immutable files.
-no_acs_rules "No access rules" mount option sets access rights for
- files/folders to 777 and owner/group to root. This mount
- option absorbs all other permissions:
- - permissions change for files/folders will be reported
- as successful, but they will remain 777;
- - owner/group change will be reported as successful, but
- they will stay as root
+ * - discard
+ - Enable support of the TRIM command for improved performance on delete
+ operations, which is recommended for use with the solid-state drives
+ (SSD).
-acl Support POSIX ACLs (Access Control Lists). Effective if
- supported by Kernel. Not to be confused with NTFS ACLs.
- The option specified as acl enables support for POSIX ACLs.
+ * - force
+ - Forces the driver to mount partitions even if volume is marked dirty.
+ Not recommended for use.
-noatime All files and directories will not update their last access
- time attribute if a partition is mounted with this parameter.
- This option can speed up file system operation.
+ * - sparse
+ - Create new files as sparse.
-===============================================================================
+ * - showmeta
+ - Use this parameter to show all meta-files (System Files) on a mounted
+ NTFS partition. By default, all meta-files are hidden.
-ToDo list
-=========
+ * - prealloc
+ - Preallocate space for files excessively when file size is increasing on
+ writes. Decreases fragmentation in case of parallel write operations to
+ different files.
-- Full journaling support (currently journal replaying is supported) over JBD.
+ * - acl
+ - Support POSIX ACLs (Access Control Lists). Effective if supported by
+ Kernel. Not to be confused with NTFS ACLs. The option specified as acl
+ enables support for POSIX ACLs.
+Todo list
+=========
+- Full journaling support over JBD. Currently journal replaying is supported
+ which is not necessarily as effectice as JBD would be.
References
==========
-https://www.paragon-software.com/home/ntfs-linux-professional/
- - Commercial version of the NTFS driver for Linux.
+- Commercial version of the NTFS driver for Linux.
+ https://www.paragon-software.com/home/ntfs-linux-professional/
- - Direct e-mail address for feedback and requests on the NTFS3 implementation.
+- Direct e-mail address for feedback and requests on the NTFS3 implementation.
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
index 364680cdad2e..8ba72e898099 100644
--- a/Documentation/gpu/amdgpu.rst
+++ b/Documentation/gpu/amdgpu.rst
@@ -300,8 +300,8 @@ pcie_replay_count
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
:doc: pcie_replay_count
-+GPU SmartShift Information
-============================
+GPU SmartShift Information
+==========================
GPU SmartShift information via sysfs
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 06af044c882f..607f78f0f189 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -111,15 +111,6 @@ Component Helper Usage
.. kernel-doc:: drivers/gpu/drm/drm_drv.c
:doc: component helper usage recommendations
-IRQ Helper Library
-~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
- :doc: irq helpers
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
- :export:
-
Memory Manager Initialization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/hwmon/k10temp.rst b/Documentation/hwmon/k10temp.rst
index 8557e26281c3..91b99adc6c48 100644
--- a/Documentation/hwmon/k10temp.rst
+++ b/Documentation/hwmon/k10temp.rst
@@ -132,20 +132,3 @@ On Family 17h and Family 18h CPUs, additional temperature sensors may report
Core Complex Die (CCD) temperatures. Up to 8 such temperatures are reported
as temp{3..10}_input, labeled Tccd{1..8}. Actual support depends on the CPU
variant.
-
-Various Family 17h and 18h CPUs report voltage and current telemetry
-information. The following attributes may be reported.
-
-Attribute Label Description
-=============== ======= ================
-in0_input Vcore Core voltage
-in1_input Vsoc SoC voltage
-curr1_input Icore Core current
-curr2_input Isoc SoC current
-=============== ======= ================
-
-Current values are raw (unscaled) as reported by the CPU. Core current is
-reported as multiples of 1A / LSB. SoC is reported as multiples of 0.25A
-/ LSB. The real current is board specific. Reported currents should be seen
-as rough guidance, and should be scaled using sensors3.conf as appropriate
-for a given board.
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
index e7d9cbff771b..67b7a701ce9e 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
@@ -851,7 +851,7 @@ NOTES:
- 0x88A8 traffic will not be received unless VLAN stripping is disabled with
the following command::
- # ethool -K <ethX> rxvlan off
+ # ethtool -K <ethX> rxvlan off
- 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS
configured on the same port. 0x88a8/0x8100 traffic will not be received if
diff --git a/Documentation/networking/devlink/devlink-region.rst b/Documentation/networking/devlink/devlink-region.rst
index 58fe95e9a49d..f06dca9a1eb6 100644
--- a/Documentation/networking/devlink/devlink-region.rst
+++ b/Documentation/networking/devlink/devlink-region.rst
@@ -44,8 +44,8 @@ example usage
# Show all of the exposed regions with region sizes:
$ devlink region show
- pci/0000:00:05.0/cr-space: size 1048576 snapshot [1 2]
- pci/0000:00:05.0/fw-health: size 64 snapshot [1 2]
+ pci/0000:00:05.0/cr-space: size 1048576 snapshot [1 2] max 8
+ pci/0000:00:05.0/fw-health: size 64 snapshot [1 2] max 8
# Delete a snapshot using:
$ devlink region del pci/0000:00:05.0/cr-space snapshot 1
diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
index a432dc419fa4..59c78e9717d2 100644
--- a/Documentation/networking/devlink/ice.rst
+++ b/Documentation/networking/devlink/ice.rst
@@ -30,10 +30,11 @@ The ``ice`` driver reports the following versions
PHY, link, etc.
* - ``fw.mgmt.api``
- running
- - 1.5
- - 2-digit version number of the API exported over the AdminQ by the
- management firmware. Used by the driver to identify what commands
- are supported.
+ - 1.5.1
+ - 3-digit version number (major.minor.patch) of the API exported over
+ the AdminQ by the management firmware. Used by the driver to
+ identify what commands are supported. Historical versions of the
+ kernel only displayed a 2-digit version number (major.minor).
* - ``fw.mgmt.build``
- running
- 0x305d955f
@@ -141,6 +142,10 @@ Users can request an immediate capture of a snapshot via the
.. code:: shell
+ $ devlink region show
+ pci/0000:01:00.0/nvm-flash: size 10485760 snapshot [] max 1
+ pci/0000:01:00.0/device-caps: size 4096 snapshot [] max 10
+
$ devlink region new pci/0000:01:00.0/nvm-flash snapshot 1
$ devlink region dump pci/0000:01:00.0/nvm-flash snapshot 1
diff --git a/Documentation/networking/devlink/iosm.rst b/Documentation/networking/devlink/iosm.rst
index dd460c1c15cd..6136181339aa 100644
--- a/Documentation/networking/devlink/iosm.rst
+++ b/Documentation/networking/devlink/iosm.rst
@@ -26,23 +26,6 @@ The ``iosm`` driver implements the following driver-specific parameters.
the device during firmware flashing.
If set, Full nand erase command will be sent to the device. By default,
only conditional erase support is enabled.
- * - ``download_region``
- - u8
- - runtime
- - download_region parameter is used to identify if we are flashing the
- loadmap/region file during the firmware flashing.
- * - ``address``
- - u32
- - runtime
- - address parameter is used to send the address information of the
- loadmap/region file which is required during the firmware flashing
- process. Each region file has be flashed to its respective flash address.
- * - ``region_count``
- - u8
- - runtime
- - region_count parameter is used to inform the driver on how many total
- loadmap/region files are present in modem firmware image that has to be
- flashed.
Flash Update
@@ -87,7 +70,7 @@ Flash Commands:
1) When modem is in Boot ROM stage, user can use below command to inject PSI RAM
image using devlink flash command.
-$ devlink dev flash pci/0000:02:00.0 file <PSI_RAM_File_name> component PSI
+$ devlink dev flash pci/0000:02:00.0 file <PSI_RAM_File_name>
2) If user want to do a full erase, below command need to be issued to set the
erase full flash param (To be set only if full erase required).
@@ -95,22 +78,19 @@ erase full flash param (To be set only if full erase required).
$ devlink dev param set pci/0000:02:00.0 name erase_full_flash value true cmode runtime
3) Inject EBL after the modem is in PSI stage.
-$ devlink dev flash pci/0000:02:00.0 file <EBL_File_name> component EBL
+
+$ devlink dev flash pci/0000:02:00.0 file <EBL_File_name>
4) Once EBL is injected successfully, then the actual firmware flashing takes
place. Below is the sequence of commands used for each of the firmware images.
a) Flash secure bin file.
-$ devlink dev flash pci/0000:02:00.0 file <Secure_bin_file_name> component FLS
-
-b) Flashing the Loadmap/Region file
-$ devlink dev param set pci/0000:02:00.0 name region_count value 1 cmode runtime
-$ devlink dev param set pci/0000:02:00.0 name download_region value true cmode runtime
+$ devlink dev flash pci/0000:02:00.0 file <Secure_bin_file_name>
-$ devlink dev param set pci/0000:02:00.0 name address value <Nand_address> cmode runtime
+b) Flashing the Loadmap/Region file
-$ devlink dev flash pci/0000:02:00.0 file <Load_map_file_name> component FLS
+$ devlink dev flash pci/0000:02:00.0 file <Load_map_file_name>
Regions
=======
diff --git a/Documentation/networking/dsa/sja1105.rst b/Documentation/networking/dsa/sja1105.rst
index 564caeebe2b2..29b1bae0cf00 100644
--- a/Documentation/networking/dsa/sja1105.rst
+++ b/Documentation/networking/dsa/sja1105.rst
@@ -296,7 +296,7 @@ not available.
Device Tree bindings and board design
=====================================
-This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt``
+This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml``
and aims to showcase some potential switch caveats.
RMII PHY role and out-of-band signaling
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index d9b55b7a1a4d..7b598c7e3912 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -41,6 +41,11 @@ In the message structure descriptions below, if an attribute name is suffixed
with "+", parent nest can contain multiple attributes of the same type. This
implements an array of entries.
+Attributes that need to be filled-in by device drivers and that are dumped to
+user space based on whether they are valid or not should not use zero as a
+valid value. This avoids the need to explicitly signal the validity of the
+attribute in the device driver API.
+
Request header
==============
@@ -179,7 +184,7 @@ according to message purpose:
Userspace to kernel:
- ===================================== ================================
+ ===================================== =================================
``ETHTOOL_MSG_STRSET_GET`` get string set
``ETHTOOL_MSG_LINKINFO_GET`` get link settings
``ETHTOOL_MSG_LINKINFO_SET`` set link settings
@@ -213,7 +218,9 @@ Userspace to kernel:
``ETHTOOL_MSG_MODULE_EEPROM_GET`` read SFP module EEPROM
``ETHTOOL_MSG_STATS_GET`` get standard statistics
``ETHTOOL_MSG_PHC_VCLOCKS_GET`` get PHC virtual clocks info
- ===================================== ================================
+ ``ETHTOOL_MSG_MODULE_SET`` set transceiver module parameters
+ ``ETHTOOL_MSG_MODULE_GET`` get transceiver module parameters
+ ===================================== =================================
Kernel to userspace:
@@ -252,6 +259,7 @@ Kernel to userspace:
``ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY`` read SFP module EEPROM
``ETHTOOL_MSG_STATS_GET_REPLY`` standard statistics
``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY`` PHC virtual clocks info
+ ``ETHTOOL_MSG_MODULE_GET_REPLY`` transceiver module parameters
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -520,6 +528,8 @@ Link extended states:
power required from cable or module
``ETHTOOL_LINK_EXT_STATE_OVERHEAT`` The module is overheated
+
+ ``ETHTOOL_LINK_EXT_STATE_MODULE`` Transceiver module issue
================================================ ============================================
Link extended substates:
@@ -613,6 +623,14 @@ Link extended substates:
``ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE`` Cable test failure
=================================================== ============================================
+ Transceiver module issue substates:
+
+ =================================================== ============================================
+ ``ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY`` The CMIS Module State Machine did not reach
+ the ModuleReady state. For example, if the
+ module is stuck at ModuleFault state
+ =================================================== ============================================
+
DEBUG_GET
=========
@@ -1521,6 +1539,63 @@ Kernel response contents:
``ETHTOOL_A_PHC_VCLOCKS_INDEX`` s32 PHC index array
==================================== ====== ==========================
+MODULE_GET
+==========
+
+Gets transceiver module parameters.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_MODULE_HEADER`` nested request header
+ ===================================== ====== ==========================
+
+Kernel response contents:
+
+ ====================================== ====== ==========================
+ ``ETHTOOL_A_MODULE_HEADER`` nested reply header
+ ``ETHTOOL_A_MODULE_POWER_MODE_POLICY`` u8 power mode policy
+ ``ETHTOOL_A_MODULE_POWER_MODE`` u8 operational power mode
+ ====================================== ====== ==========================
+
+The optional ``ETHTOOL_A_MODULE_POWER_MODE_POLICY`` attribute encodes the
+transceiver module power mode policy enforced by the host. The default policy
+is driver-dependent, but "auto" is the recommended default and it should be
+implemented by new drivers and drivers where conformance to a legacy behavior
+is not critical.
+
+The optional ``ETHTHOOL_A_MODULE_POWER_MODE`` attribute encodes the operational
+power mode policy of the transceiver module. It is only reported when a module
+is plugged-in. Possible values are:
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_module_power_mode
+
+MODULE_SET
+==========
+
+Sets transceiver module parameters.
+
+Request contents:
+
+ ====================================== ====== ==========================
+ ``ETHTOOL_A_MODULE_HEADER`` nested request header
+ ``ETHTOOL_A_MODULE_POWER_MODE_POLICY`` u8 power mode policy
+ ====================================== ====== ==========================
+
+When set, the optional ``ETHTOOL_A_MODULE_POWER_MODE_POLICY`` attribute is used
+to set the transceiver module power policy enforced by the host. Possible
+values are:
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_module_power_mode_policy
+
+For SFF-8636 modules, low power mode is forced by the host according to table
+6-10 in revision 2.10a of the specification.
+
+For CMIS modules, low power mode is forced by the host according to table 6-12
+in revision 5.0 of the specification.
+
Request translation
===================
@@ -1620,4 +1695,6 @@ are netlink only.
n/a ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT``
n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET``
n/a ``ETHTOOL_MSG_PHC_VCLOCKS_GET``
+ n/a ``ETHTOOL_MSG_MODULE_GET``
+ n/a ``ETHTOOL_MSG_MODULE_SET``
=================================== =====================================
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index d91ab28718d4..16b8bf72feaf 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -989,14 +989,6 @@ tcp_challenge_ack_limit - INTEGER
in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
Default: 1000
-tcp_rx_skb_cache - BOOLEAN
- Controls a per TCP socket cache of one skb, that might help
- performance of some workloads. This might be dangerous
- on systems with a lot of TCP sockets, since it increases
- memory usage.
-
- Default: 0 (disabled)
-
UDP variables
=============
diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst
index 2afccc63856e..95ef56d62077 100644
--- a/Documentation/networking/ipvs-sysctl.rst
+++ b/Documentation/networking/ipvs-sysctl.rst
@@ -300,3 +300,14 @@ sync_version - INTEGER
Kernels with this sync_version entry are able to receive messages
of both version 1 and version 2 of the synchronisation protocol.
+
+run_estimation - BOOLEAN
+ 0 - disabled
+ not 0 - enabled (default)
+
+ If disabled, the estimation will be stop, and you can't see
+ any update on speed estimation data.
+
+ You can always re-enable estimation by setting this value to 1.
+ But be careful, the first estimation after re-enable is not
+ accurate.
diff --git a/Documentation/networking/mctp.rst b/Documentation/networking/mctp.rst
index 6100cdc220f6..46f74bffce0f 100644
--- a/Documentation/networking/mctp.rst
+++ b/Documentation/networking/mctp.rst
@@ -59,11 +59,11 @@ specified with a ``sockaddr`` type, with a single-byte endpoint address:
};
struct sockaddr_mctp {
- unsigned short int smctp_family;
- int smctp_network;
- struct mctp_addr smctp_addr;
- __u8 smctp_type;
- __u8 smctp_tag;
+ __kernel_sa_family_t smctp_family;
+ unsigned int smctp_network;
+ struct mctp_addr smctp_addr;
+ __u8 smctp_type;
+ __u8 smctp_tag;
};
#define MCTP_NET_ANY 0x0
@@ -211,3 +211,62 @@ remote address is already known, or the message does not require a reply.
Like the send calls, sockets will only receive responses to requests they have
sent (TO=1) and may only respond (TO=0) to requests they have received.
+
+Kernel internals
+================
+
+There are a few possible packet flows in the MCTP stack:
+
+1. local TX to remote endpoint, message <= MTU::
+
+ sendmsg()
+ -> mctp_local_output()
+ : route lookup
+ -> rt->output() (== mctp_route_output)
+ -> dev_queue_xmit()
+
+2. local TX to remote endpoint, message > MTU::
+
+ sendmsg()
+ -> mctp_local_output()
+ -> mctp_do_fragment_route()
+ : creates packet-sized skbs. For each new skb:
+ -> rt->output() (== mctp_route_output)
+ -> dev_queue_xmit()
+
+3. remote TX to local endpoint, single-packet message::
+
+ mctp_pkttype_receive()
+ : route lookup
+ -> rt->output() (== mctp_route_input)
+ : sk_key lookup
+ -> sock_queue_rcv_skb()
+
+4. remote TX to local endpoint, multiple-packet message::
+
+ mctp_pkttype_receive()
+ : route lookup
+ -> rt->output() (== mctp_route_input)
+ : sk_key lookup
+ : stores skb in struct sk_key->reasm_head
+
+ mctp_pkttype_receive()
+ : route lookup
+ -> rt->output() (== mctp_route_input)
+ : sk_key lookup
+ : finds existing reassembly in sk_key->reasm_head
+ : appends new fragment
+ -> sock_queue_rcv_skb()
+
+Key refcounts
+-------------
+
+ * keys are refed by:
+
+ - a skb: during route output, stored in ``skb->cb``.
+
+ - netns and sock lists.
+
+ * keys can be associated with a device, in which case they hold a
+ reference to the dev (set through ``key->dev``, counted through
+ ``dev->key_count``). Multiple keys can reference the device.
diff --git a/Documentation/userspace-api/vduse.rst b/Documentation/userspace-api/vduse.rst
index 42ef59ea5314..bdb880e01132 100644
--- a/Documentation/userspace-api/vduse.rst
+++ b/Documentation/userspace-api/vduse.rst
@@ -18,7 +18,7 @@ types can be added after the security issue of corresponding device driver
is clarified or fixed in the future.
Create/Destroy VDUSE devices
-------------------------
+----------------------------
VDUSE devices are created as follows:
diff --git a/MAINTAINERS b/MAINTAINERS
index eeb4c70b3d5b..4bd5819869ca 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -414,7 +414,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
F: drivers/acpi/pmic/
ACPI THERMAL DRIVER
-M: Zhang Rui <[email protected]>
+M: Rafael J. Wysocki <[email protected]>
+R: Zhang Rui <[email protected]>
S: Supported
W: https://01.org/linux-acpi
@@ -810,7 +811,7 @@ F: Documentation/devicetree/bindings/dma/altr,msgdma.yaml
F: drivers/dma/altera-msgdma.c
ALTERA PIO DRIVER
-M: Joyce Ooi <[email protected]>
+M: Mun Yew Tham <[email protected]>
S: Maintained
F: drivers/gpio/gpio-altera.c
@@ -977,12 +978,12 @@ L: [email protected]
S: Maintained
F: drivers/platform/x86/amd-pmc.*
-AMD POWERPLAY
+AMD POWERPLAY AND SWSMU
M: Evan Quan <[email protected]>
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
-F: drivers/gpu/drm/amd/pm/powerplay/
+F: drivers/gpu/drm/amd/pm/
AMD PTDMA DRIVER
M: Sanjay R Mehta <[email protected]>
@@ -1275,6 +1276,7 @@ F: drivers/input/mouse/bcm5974.c
APPLE DART IOMMU DRIVER
M: Sven Peter <[email protected]>
+R: Alyssa Rosenzweig <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/iommu/apple,dart.yaml
@@ -1711,6 +1713,8 @@ F: drivers/*/*alpine*
ARM/APPLE MACHINE SUPPORT
M: Hector Martin <[email protected]>
+M: Sven Peter <[email protected]>
+R: Alyssa Rosenzweig <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Maintained
W: https://asahilinux.org
@@ -2236,6 +2240,7 @@ F: arch/arm/mach-pxa/mioa701.c
ARM/MStar/Sigmastar Armv7 SoC support
M: Daniel Palmer <[email protected]>
+M: Romain Perier <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Maintained
W: http://linux-chenxing.org/
@@ -2712,6 +2717,7 @@ F: drivers/power/reset/keystone-reset.c
ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE
M: Nishanth Menon <[email protected]>
+M: Vignesh Raghavendra <[email protected]>
M: Tero Kristo <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Supported
@@ -2804,9 +2810,8 @@ F: arch/arm/mach-pxa/include/mach/vpac270.h
F: arch/arm/mach-pxa/vpac270.c
ARM/VT8500 ARM ARCHITECTURE
-M: Tony Prisk <[email protected]>
L: [email protected] (moderated for non-subscribers)
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
F: arch/arm/mach-vt8500/
F: drivers/clocksource/timer-vt8500.c
@@ -2894,6 +2899,12 @@ S: Maintained
F: Documentation/hwmon/asc7621.rst
F: drivers/hwmon/asc7621.c
+ASIX AX88796C SPI ETHERNET ADAPTER
+M: Łukasz Stelmach <[email protected]>
+S: Maintained
+F: Documentation/devicetree/bindings/net/asix,ax88796c.yaml
+F: drivers/net/ethernet/asix/ax88796c_*
+
ASPEED PINCTRL DRIVERS
M: Andrew Jeffery <[email protected]>
L: [email protected] (moderated for non-subscribers)
@@ -2962,7 +2973,7 @@ F: crypto/async_tx/
F: include/linux/async_tx.h
AT24 EEPROM DRIVER
-M: Bartosz Golaszewski <[email protected]>
+M: Bartosz Golaszewski <[email protected]>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@@ -3385,9 +3396,11 @@ F: Documentation/networking/filter.rst
F: Documentation/userspace-api/ebpf/
F: arch/*/net/*
F: include/linux/bpf*
+F: include/linux/btf*
F: include/linux/filter.h
F: include/trace/events/xdp.h
F: include/uapi/linux/bpf*
+F: include/uapi/linux/btf*
F: include/uapi/linux/filter.h
F: kernel/bpf/
F: kernel/trace/bpf_trace.c
@@ -3821,7 +3834,6 @@ F: drivers/scsi/mpi3mr/
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <[email protected]>
-M: Naresh Kumar PBS <[email protected]>
S: Supported
W: http://www.broadcom.com
@@ -4656,7 +4668,7 @@ W: http://linux-cifs.samba.org/
T: git git://git.samba.org/sfrench/cifs-2.6.git
F: Documentation/admin-guide/cifs/
F: fs/cifs/
-F: fs/cifs_common/
+F: fs/smbfs_common/
COMPACTPCI HOTPLUG CORE
M: Scott Murray <[email protected]>
@@ -7006,7 +7018,6 @@ F: drivers/net/mdio/fwnode_mdio.c
F: drivers/net/mdio/of_mdio.c
F: drivers/net/pcs/
F: drivers/net/phy/
-F: drivers/of/of_net.c
F: include/dt-bindings/net/qca-ar803x.h
F: include/linux/*mdio*.h
F: include/linux/mdio/*.h
@@ -7018,6 +7029,7 @@ F: include/linux/platform_data/mdio-gpio.h
F: include/trace/events/mdio.h
F: include/uapi/linux/mdio.h
F: include/uapi/linux/mii.h
+F: net/core/of_net.c
EXFAT FILE SYSTEM
M: Namjae Jeon <[email protected]>
@@ -7337,10 +7349,11 @@ F: include/uapi/linux/fpga-dfl.h
FPGA MANAGER FRAMEWORK
M: Moritz Fischer <[email protected]>
+M: Wu Hao <[email protected]>
+M: Xu Yilun <[email protected]>
R: Tom Rix <[email protected]>
S: Maintained
-W: http://www.rocketboards.org
Q: http://patchwork.kernel.org/project/linux-fpga/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
F: Documentation/devicetree/bindings/fpga/
@@ -7434,7 +7447,7 @@ FREESCALE IMX / MXC FEC DRIVER
M: Joakim Zhang <[email protected]>
S: Maintained
-F: Documentation/devicetree/bindings/net/fsl-fec.txt
+F: Documentation/devicetree/bindings/net/fsl,fec.yaml
F: drivers/net/ethernet/freescale/fec.h
F: drivers/net/ethernet/freescale/fec_main.c
F: drivers/net/ethernet/freescale/fec_ptp.c
@@ -7986,7 +7999,7 @@ F: include/linux/gpio/regmap.h
GPIO SUBSYSTEM
M: Linus Walleij <[email protected]>
-M: Bartosz Golaszewski <[email protected]>
+M: Bartosz Golaszewski <[email protected]>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
@@ -8608,9 +8621,8 @@ F: Documentation/devicetree/bindings/iio/humidity/st,hts221.yaml
F: drivers/iio/humidity/hts221*
HUAWEI ETHERNET DRIVER
-M: Bin Luo <[email protected]>
-S: Supported
+S: Orphan
F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst
F: drivers/net/ethernet/huawei/hinic/
@@ -9302,7 +9314,7 @@ S: Maintained
F: drivers/platform/x86/intel/atomisp2/led.c
INTEL BIOS SAR INT1092 DRIVER
-M: Shravan S <[email protected]>
+M: Shravan Sudhakar <[email protected]>
M: Intel Corporation <[email protected]>
S: Maintained
@@ -9624,7 +9636,7 @@ F: include/uapi/linux/isst_if.h
F: tools/power/x86/intel-speed-select/
INTEL STRATIX10 FIRMWARE DRIVERS
-M: Richard Gong <[email protected]>
+M: Dinh Nguyen <[email protected]>
S: Maintained
F: Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu
@@ -10194,8 +10206,8 @@ M: Hyunchul Lee <[email protected]>
S: Maintained
T: git git://git.samba.org/ksmbd.git
-F: fs/cifs_common/
F: fs/ksmbd/
+F: fs/smbfs_common/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <[email protected]>
@@ -10274,7 +10286,6 @@ KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
M: Christian Borntraeger <[email protected]>
M: Janosch Frank <[email protected]>
R: David Hildenbrand <[email protected]>
-R: Cornelia Huck <[email protected]>
R: Claudio Imbrenda <[email protected]>
S: Supported
@@ -11148,6 +11159,7 @@ S: Maintained
F: Documentation/devicetree/bindings/net/dsa/marvell.txt
F: Documentation/networking/devlink/mv88e6xxx.rst
F: drivers/net/dsa/mv88e6xxx/
+F: include/linux/dsa/mv88e6xxx.h
F: include/linux/platform_data/mv88e6xxx.h
MARVELL ARMADA 3700 PHY DRIVERS
@@ -11367,7 +11379,7 @@ F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
F: drivers/iio/proximity/mb1232.c
MAXIM MAX77650 PMIC MFD DRIVER
-M: Bartosz Golaszewski <[email protected]>
+M: Bartosz Golaszewski <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/*/*max77650.yaml
@@ -13255,9 +13267,9 @@ F: Documentation/scsi/NinjaSCSI.rst
F: drivers/scsi/nsp32*
NIOS2 ARCHITECTURE
-M: Ley Foon Tan <[email protected]>
+M: Dinh Nguyen <[email protected]>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
F: arch/nios2/
NITRO ENCLAVES (NE)
@@ -14342,7 +14354,8 @@ F: Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
F: drivers/pci/controller/pci-ixp4xx.c
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
-M: Jonathan Derrick <[email protected]>
+M: Nirmal Patel <[email protected]>
+R: Jonathan Derrick <[email protected]>
S: Supported
F: drivers/pci/controller/vmd.c
@@ -16295,6 +16308,7 @@ S390
M: Heiko Carstens <[email protected]>
M: Vasily Gorbik <[email protected]>
M: Christian Borntraeger <[email protected]>
+R: Alexander Gordeev <[email protected]>
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -16373,7 +16387,6 @@ F: drivers/s390/crypto/vfio_ap_ops.c
F: drivers/s390/crypto/vfio_ap_private.h
S390 VFIO-CCW DRIVER
-M: Cornelia Huck <[email protected]>
M: Eric Farman <[email protected]>
M: Matthew Rosato <[email protected]>
R: Halil Pasic <[email protected]>
@@ -16650,13 +16663,6 @@ M: Lubomir Rintel <[email protected]>
S: Supported
F: drivers/char/pcmcia/scr24x_cs.c
-SCSI CDROM DRIVER
-M: Jens Axboe <[email protected]>
-S: Maintained
-W: http://www.kernel.dk
-F: drivers/scsi/sr*
-
SCSI RDMA PROTOCOL (SRP) INITIATOR
M: Bart Van Assche <[email protected]>
@@ -16955,7 +16961,6 @@ F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: Karsten Graul <[email protected]>
-M: Guvenc Gulce <[email protected]>
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -17800,7 +17805,6 @@ F: drivers/staging/nvec/
STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
M: Jens Frederich <[email protected]>
-M: Daniel Drake <[email protected]>
M: Jon Nettleton <[email protected]>
S: Maintained
W: http://wiki.laptop.org/go/DCON
@@ -17891,7 +17895,8 @@ M: Olivier Moysan <[email protected]>
M: Arnaud Pouliquen <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
+F: Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+F: Documentation/devicetree/bindings/sound/st,stm32-*.yaml
F: sound/soc/stm/
STM32 TIMER/LPTIMER DRIVERS
@@ -17968,10 +17973,11 @@ F: Documentation/admin-guide/svga.rst
F: arch/x86/boot/video*
SWIOTLB SUBSYSTEM
-M: Konrad Rzeszutek Wilk <[email protected]>
+M: Christoph Hellwig <[email protected]>
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
+W: http://git.infradead.org/users/hch/dma-mapping.git
+T: git git://git.infradead.org/users/hch/dma-mapping.git
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
F: kernel/dma/swiotlb.c
@@ -17987,7 +17993,7 @@ F: net/switchdev/
SY8106A REGULATOR DRIVER
M: Icenowy Zheng <[email protected]>
S: Maintained
-F: Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt
+F: Documentation/devicetree/bindings/regulator/silergy,sy8106a.yaml
F: drivers/regulator/sy8106a-regulator.c
SYNC FILE FRAMEWORK
@@ -18554,13 +18560,14 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/radio/radio-raremono.c
THERMAL
-M: Zhang Rui <[email protected]>
+M: Rafael J. Wysocki <[email protected]>
M: Daniel Lezcano <[email protected]>
R: Amit Kucheria <[email protected]>
+R: Zhang Rui <[email protected]>
S: Supported
Q: https://patchwork.kernel.org/project/linux-pm/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/thermal/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal
F: Documentation/devicetree/bindings/thermal/
F: drivers/thermal/
F: include/linux/cpu_cooling.h
@@ -18689,7 +18696,7 @@ F: include/linux/clk/ti.h
TI DAVINCI MACHINE SUPPORT
M: Sekhar Nori <[email protected]>
-R: Bartosz Golaszewski <[email protected]>
+R: Bartosz Golaszewski <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
@@ -19288,13 +19295,12 @@ S: Maintained
F: drivers/usb/misc/chaoskey.c
USB CYPRESS C67X00 DRIVER
-M: Peter Korsgaard <[email protected]>
-S: Maintained
+S: Orphan
F: drivers/usb/c67x00/
USB DAVICOM DM9601 DRIVER
-M: Peter Korsgaard <[email protected]>
+M: Peter Korsgaard <[email protected]>
S: Maintained
W: http://www.linux-usb.org/usbnet
@@ -20474,7 +20480,6 @@ F: samples/bpf/xdpsock*
F: tools/lib/bpf/xsk*
XEN BLOCK SUBSYSTEM
-M: Konrad Rzeszutek Wilk <[email protected]>
M: Roger Pau Monné <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Supported
@@ -20522,7 +20527,7 @@ S: Supported
F: drivers/net/xen-netback/*
XEN PCI SUBSYSTEM
-M: Konrad Rzeszutek Wilk <[email protected]>
+M: Juergen Gross <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Supported
F: arch/x86/pci/*xen*
@@ -20545,7 +20550,8 @@ S: Supported
F: sound/xen/*
XEN SWIOTLB SUBSYSTEM
-M: Konrad Rzeszutek Wilk <[email protected]>
+M: Juergen Gross <[email protected]>
+M: Stefano Stabellini <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Supported
@@ -20704,7 +20710,6 @@ S: Maintained
F: mm/zbud.c
ZD1211RW WIRELESS DRIVER
-M: Daniel Drake <[email protected]>
M: Ulrich Kunitz <[email protected]>
L: [email protected] (subscribers-only)
diff --git a/Makefile b/Makefile
index 34a0afc3a8eb..91297670da8e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc6
NAME = Opossums on Parade
# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 02e5b673bdad..4e87783c90ad 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -20,7 +20,7 @@ config ALPHA
select NEED_SG_DMA_LENGTH
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
- select GENERIC_PCI_IOMAP if PCI
+ select GENERIC_PCI_IOMAP
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
@@ -199,7 +199,6 @@ config ALPHA_EIGER
config ALPHA_JENSEN
bool "Jensen"
- depends on BROKEN
select HAVE_EISA
help
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
diff --git a/arch/alpha/include/asm/asm-prototypes.h b/arch/alpha/include/asm/asm-prototypes.h
index b34cc1f06720..c8ae46fc2e74 100644
--- a/arch/alpha/include/asm/asm-prototypes.h
+++ b/arch/alpha/include/asm/asm-prototypes.h
@@ -16,3 +16,4 @@ extern void __divlu(void);
extern void __remlu(void);
extern void __divqu(void);
extern void __remqu(void);
+extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h
index 916895155a88..1c4131453db2 100644
--- a/arch/alpha/include/asm/jensen.h
+++ b/arch/alpha/include/asm/jensen.h
@@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
* convinced that I need one of the newer machines.
*/
-static inline unsigned int jensen_local_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
{
return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
}
-static inline void jensen_local_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
{
*(vuip)((addr << 9) + EISA_VL82C106) = b;
mb();
}
-static inline unsigned int jensen_bus_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
{
long result;
@@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
return __kernel_extbl(result, addr & 3);
}
-static inline void jensen_bus_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
{
jensen_set_hae(0);
*(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 1dd9baf4a6c2..284d28755b8d 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -131,6 +131,8 @@
#define SO_BUF_LOCK 72
+#define SO_RESERVE_MEM 73
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index e5d870ff225f..5c9c88428124 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -7,6 +7,11 @@
*
* Code supporting the Jensen.
*/
+#define __EXTERN_INLINE
+#include <asm/io.h>
+#include <asm/jensen.h>
+#undef __EXTERN_INLINE
+
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -17,11 +22,6 @@
#include <asm/ptrace.h>
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/jensen.h>
-#undef __EXTERN_INLINE
-
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 854d5e79979e..1cc74f7b50ef 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
ev67-$(CONFIG_ALPHA_EV67) := ev67-
lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
+ udiv-qrnnd.o \
udelay.o \
$(ev6-y)memset.o \
$(ev6-y)memcpy.o \
diff --git a/arch/alpha/math-emu/qrnnd.S b/arch/alpha/lib/udiv-qrnnd.S
index d6373ec1bff9..b887aa5428e5 100644
--- a/arch/alpha/math-emu/qrnnd.S
+++ b/arch/alpha/lib/udiv-qrnnd.S
@@ -25,6 +25,7 @@
# along with GCC; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
+#include <asm/export.h>
.set noreorder
.set noat
@@ -161,3 +162,4 @@ $Odd:
ret $31,($26),1
.end __udiv_qrnnd
+EXPORT_SYMBOL(__udiv_qrnnd)
diff --git a/arch/alpha/math-emu/Makefile b/arch/alpha/math-emu/Makefile
index 6eda0973e183..3206402a8792 100644
--- a/arch/alpha/math-emu/Makefile
+++ b/arch/alpha/math-emu/Makefile
@@ -7,4 +7,4 @@ ccflags-y := -w
obj-$(CONFIG_MATHEMU) += math-emu.o
-math-emu-objs := math.o qrnnd.o
+math-emu-objs := math.o
diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c
index f7cef66af88d..4212258f3cfd 100644
--- a/arch/alpha/math-emu/math.c
+++ b/arch/alpha/math-emu/math.c
@@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
egress:
return si_code;
}
-
-EXPORT_SYMBOL(__udiv_qrnnd);
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 9320b04c04bf..4cf45a99fd79 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -26,11 +26,6 @@ extern char empty_zero_page[PAGE_SIZE];
extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
-/* Macro to mark a page protection as uncacheable */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
-
-extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
-
/* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fc196421b2ce..59baf6c132a7 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1989,8 +1989,6 @@ config ARCH_HIBERNATION_POSSIBLE
endmenu
-source "drivers/firmware/Kconfig"
-
if CRYPTO
source "arch/arm/crypto/Kconfig"
endif
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
index 614999dcb990..cd4672501add 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
@@ -71,7 +71,6 @@
isc: isc@f0008000 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>;
- status = "okay";
};
qspi1: spi@f0024000 {
diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts
index 4cbed98cc2f4..f3d6aaa3a78d 100644
--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
@@ -196,11 +196,13 @@
regulator-state-standby {
regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1350000>;
regulator-mode = <4>;
};
regulator-state-mem {
regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1350000>;
regulator-mode = <4>;
};
};
@@ -353,7 +355,10 @@
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_gmac0_default &pinctrl_gmac0_txck_default &pinctrl_gmac0_phy_irq>;
+ pinctrl-0 = <&pinctrl_gmac0_default
+ &pinctrl_gmac0_mdio_default
+ &pinctrl_gmac0_txck_default
+ &pinctrl_gmac0_phy_irq>;
phy-mode = "rgmii-id";
status = "okay";
@@ -368,7 +373,9 @@
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_gmac1_default &pinctrl_gmac1_phy_irq>;
+ pinctrl-0 = <&pinctrl_gmac1_default
+ &pinctrl_gmac1_mdio_default
+ &pinctrl_gmac1_phy_irq>;
phy-mode = "rmii";
status = "okay";
@@ -423,14 +430,20 @@
<PIN_PA15__G0_TXEN>,
<PIN_PA30__G0_RXCK>,
<PIN_PA18__G0_RXDV>,
- <PIN_PA22__G0_MDC>,
- <PIN_PA23__G0_MDIO>,
<PIN_PA25__G0_125CK>;
+ slew-rate = <0>;
+ bias-disable;
+ };
+
+ pinctrl_gmac0_mdio_default: gmac0_mdio_default {
+ pinmux = <PIN_PA22__G0_MDC>,
+ <PIN_PA23__G0_MDIO>;
bias-disable;
};
pinctrl_gmac0_txck_default: gmac0_txck_default {
pinmux = <PIN_PA24__G0_TXCK>;
+ slew-rate = <0>;
bias-pull-up;
};
@@ -447,8 +460,13 @@
<PIN_PD25__G1_RX0>,
<PIN_PD26__G1_RX1>,
<PIN_PD27__G1_RXER>,
- <PIN_PD24__G1_RXDV>,
- <PIN_PD28__G1_MDC>,
+ <PIN_PD24__G1_RXDV>;
+ slew-rate = <0>;
+ bias-disable;
+ };
+
+ pinctrl_gmac1_mdio_default: gmac1_mdio_default {
+ pinmux = <PIN_PD28__G1_MDC>,
<PIN_PD29__G1_MDIO>;
bias-disable;
};
@@ -540,6 +558,7 @@
<PIN_PA8__SDMMC0_DAT5>,
<PIN_PA9__SDMMC0_DAT6>,
<PIN_PA10__SDMMC0_DAT7>;
+ slew-rate = <0>;
bias-pull-up;
};
@@ -547,6 +566,7 @@
pinmux = <PIN_PA0__SDMMC0_CK>,
<PIN_PA2__SDMMC0_RSTN>,
<PIN_PA11__SDMMC0_DS>;
+ slew-rate = <0>;
bias-pull-up;
};
};
@@ -558,6 +578,7 @@
<PIN_PC0__SDMMC1_DAT1>,
<PIN_PC1__SDMMC1_DAT2>,
<PIN_PC2__SDMMC1_DAT3>;
+ slew-rate = <0>;
bias-pull-up;
};
@@ -566,6 +587,7 @@
<PIN_PB28__SDMMC1_RSTN>,
<PIN_PC5__SDMMC1_1V8SEL>,
<PIN_PC4__SDMMC1_CD>;
+ slew-rate = <0>;
bias-pull-up;
};
};
@@ -577,11 +599,13 @@
<PIN_PD6__SDMMC2_DAT1>,
<PIN_PD7__SDMMC2_DAT2>,
<PIN_PD8__SDMMC2_DAT3>;
+ slew-rate = <0>;
bias-pull-up;
};
ck {
pinmux = <PIN_PD4__SDMMC2_CK>;
+ slew-rate = <0>;
bias-pull-up;
};
};
@@ -634,6 +658,15 @@
pinctrl-0 = <&pinctrl_sdmmc2_default>;
};
+&shdwc {
+ atmel,shdwc-debouncer = <976>;
+ status = "okay";
+
+ input@0 {
+ reg = <0>;
+ };
+};
+
&spdifrx {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spdifrx_default>;
diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
index f24bdd0870a5..72ce80fbf266 100644
--- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
+++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
@@ -40,8 +40,8 @@
regulator-always-on;
regulator-settling-time-us = <5000>;
gpios = <&expgpio 4 GPIO_ACTIVE_HIGH>;
- states = <1800000 0x1
- 3300000 0x0>;
+ states = <1800000 0x1>,
+ <3300000 0x0>;
status = "okay";
};
@@ -217,15 +217,16 @@
};
&pcie0 {
- pci@1,0 {
+ pci@0,0 {
+ device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
ranges;
reg = <0 0 0 0 0>;
- usb@1,0 {
- reg = <0x10000 0 0 0 0>;
+ usb@0,0 {
+ reg = <0 0 0 0 0>;
resets = <&reset RASPBERRYPI_FIRMWARE_RESET_ID_USB>;
};
};
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
index b8a4096192aa..3b60297af7f6 100644
--- a/arch/arm/boot/dts/bcm2711.dtsi
+++ b/arch/arm/boot/dts/bcm2711.dtsi
@@ -300,6 +300,14 @@
status = "disabled";
};
+ vec: vec@7ec13000 {
+ compatible = "brcm,bcm2711-vec";
+ reg = <0x7ec13000 0x1000>;
+ clocks = <&clocks BCM2835_CLOCK_VEC>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
dvp: clock@7ef00000 {
compatible = "brcm,brcm2711-dvp";
reg = <0x7ef00000 0x10>;
@@ -532,8 +540,8 @@
compatible = "brcm,genet-mdio-v5";
reg = <0xe14 0x8>;
reg-names = "mdio";
- #address-cells = <0x0>;
- #size-cells = <0x1>;
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
};
};
};
diff --git a/arch/arm/boot/dts/bcm2835-common.dtsi b/arch/arm/boot/dts/bcm2835-common.dtsi
index 4119271c979d..c25e797b9060 100644
--- a/arch/arm/boot/dts/bcm2835-common.dtsi
+++ b/arch/arm/boot/dts/bcm2835-common.dtsi
@@ -106,6 +106,14 @@
status = "okay";
};
+ vec: vec@7e806000 {
+ compatible = "brcm,bcm2835-vec";
+ reg = <0x7e806000 0x1000>;
+ clocks = <&clocks BCM2835_CLOCK_VEC>;
+ interrupts = <2 27>;
+ status = "disabled";
+ };
+
pixelvalve@7e807000 {
compatible = "brcm,bcm2835-pixelvalve2";
reg = <0x7e807000 0x100>;
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 0f3be55201a5..a3e06b680947 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -464,14 +464,6 @@
status = "disabled";
};
- vec: vec@7e806000 {
- compatible = "brcm,bcm2835-vec";
- reg = <0x7e806000 0x1000>;
- clocks = <&clocks BCM2835_CLOCK_VEC>;
- interrupts = <2 27>;
- status = "disabled";
- };
-
usb: usb@7e980000 {
compatible = "brcm,bcm2835-usb";
reg = <0x7e980000 0x10000>;
diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts
index d3082b9774e4..4f88e96d81dd 100644
--- a/arch/arm/boot/dts/imx53-m53menlo.dts
+++ b/arch/arm/boot/dts/imx53-m53menlo.dts
@@ -56,6 +56,7 @@
panel {
compatible = "edt,etm0700g0dh6";
pinctrl-0 = <&pinctrl_display_gpio>;
+ pinctrl-names = "default";
enable-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>;
port {
@@ -76,8 +77,7 @@
regulator-name = "vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
- gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
- enable-active-high;
+ gpio = <&gpio1 2 0>;
};
};
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index cb8b539eb29d..e5c4dc65fbab 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -5,6 +5,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/pwm/pwm.h>
/ {
@@ -277,6 +278,7 @@
led-cur = /bits/ 8 <0x20>;
max-cur = /bits/ 8 <0x60>;
reg = <0>;
+ color = <LED_COLOR_ID_RED>;
};
chan@1 {
@@ -284,6 +286,7 @@
led-cur = /bits/ 8 <0x20>;
max-cur = /bits/ 8 <0x60>;
reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
};
chan@2 {
@@ -291,6 +294,7 @@
led-cur = /bits/ 8 <0x20>;
max-cur = /bits/ 8 <0x60>;
reg = <2>;
+ color = <LED_COLOR_ID_BLUE>;
};
chan@3 {
@@ -298,6 +302,7 @@
led-cur = /bits/ 8 <0x0>;
max-cur = /bits/ 8 <0x0>;
reg = <3>;
+ color = <LED_COLOR_ID_WHITE>;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-pico.dtsi b/arch/arm/boot/dts/imx6qdl-pico.dtsi
index 5de4ccb97916..f7a56d6b160c 100644
--- a/arch/arm/boot/dts/imx6qdl-pico.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-pico.dtsi
@@ -176,7 +176,18 @@
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>;
+ phy-handle = <&phy>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: ethernet-phy@1 {
+ reg = <1>;
+ qca,clk-out-frequency = <125000000>;
+ };
+ };
};
&hdmi {
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 5a63ca615722..99f4cf777a38 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -114,7 +114,7 @@
compatible = "micron,n25q256a", "jedec,spi-nor";
spi-max-frequency = <29000000>;
spi-rx-bus-width = <4>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
reg = <0>;
};
@@ -124,7 +124,7 @@
compatible = "micron,n25q256a", "jedec,spi-nor";
spi-max-frequency = <29000000>;
spi-rx-bus-width = <4>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
reg = <2>;
};
};
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
index 779cc536566d..a3fde3316c73 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
@@ -292,7 +292,7 @@
compatible = "micron,n25q256a", "jedec,spi-nor";
spi-max-frequency = <29000000>;
spi-rx-bus-width = <4>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
reg = <0>;
};
};
diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
index c5b903718414..7d530ae3483b 100644
--- a/arch/arm/boot/dts/omap3430-sdp.dts
+++ b/arch/arm/boot/dts/omap3430-sdp.dts
@@ -101,7 +101,7 @@
nand@1,0 {
compatible = "ti,omap2-nand";
- reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+ reg = <1 0 4>; /* CS1, offset 0, IO size 4 */
interrupt-parent = <&gpmc>;
interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
<1 IRQ_TYPE_NONE>; /* termcount */
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 0b2bed6e7adf..d1c1c6aab2b8 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -198,7 +198,7 @@
clock-frequency = <19200000>;
};
- pxo_board {
+ pxo_board: pxo_board {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <27000000>;
@@ -1148,22 +1148,21 @@
};
gpu: adreno-3xx@4300000 {
- compatible = "qcom,adreno-3xx";
+ compatible = "qcom,adreno-320.2", "qcom,adreno";
reg = <0x04300000 0x20000>;
reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "kgsl_3d0_irq";
clock-names =
- "core_clk",
- "iface_clk",
- "mem_clk",
- "mem_iface_clk";
+ "core",
+ "iface",
+ "mem",
+ "mem_iface";
clocks =
<&mmcc GFX3D_CLK>,
<&mmcc GFX3D_AHB_CLK>,
<&mmcc GFX3D_AXI_CLK>,
<&mmcc MMSS_IMEM_AHB_CLK>;
- qcom,chipid = <0x03020002>;
iommus = <&gfx3d 0
&gfx3d 1
@@ -1306,7 +1305,7 @@
reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
clock-names = "iface_clk", "ref";
clocks = <&mmcc DSI_M_AHB_CLK>,
- <&cxo_board>;
+ <&pxo_board>;
};
diff --git a/arch/arm/boot/dts/sama7g5.dtsi b/arch/arm/boot/dts/sama7g5.dtsi
index cc6be6db7b80..6c58c151c6d9 100644
--- a/arch/arm/boot/dts/sama7g5.dtsi
+++ b/arch/arm/boot/dts/sama7g5.dtsi
@@ -75,6 +75,17 @@
#size-cells = <1>;
ranges;
+ securam: securam@e0000000 {
+ compatible = "microchip,sama7g5-securam", "atmel,sama5d2-securam", "mmio-sram";
+ reg = <0xe0000000 0x4000>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 18>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0xe0000000 0x4000>;
+ no-memory-wc;
+ status = "okay";
+ };
+
secumod: secumod@e0004000 {
compatible = "microchip,sama7g5-secumod", "atmel,sama5d2-secumod", "syscon";
reg = <0xe0004000 0x4000>;
@@ -111,6 +122,17 @@
clock-names = "td_slck", "md_slck", "main_xtal";
};
+ shdwc: shdwc@e001d010 {
+ compatible = "microchip,sama7g5-shdwc", "syscon";
+ reg = <0xe001d010 0x10>;
+ clocks = <&clk32k 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ atmel,wakeup-rtc-timer;
+ atmel,wakeup-rtt-timer;
+ status = "disabled";
+ };
+
rtt: rtt@e001d020 {
compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xe001d020 0x30>;
@@ -137,6 +159,11 @@
clocks = <&clk32k 0>;
};
+ chipid@e0020000 {
+ compatible = "microchip,sama7g5-chipid";
+ reg = <0xe0020000 0x8>;
+ };
+
sdmmc0: mmc@e1204000 {
compatible = "microchip,sama7g5-sdhci", "microchip,sam9x60-sdhci";
reg = <0xe1204000 0x4000>;
@@ -515,6 +542,18 @@
};
};
+ uddrc: uddrc@e3800000 {
+ compatible = "microchip,sama7g5-uddrc";
+ reg = <0xe3800000 0x4000>;
+ status = "okay";
+ };
+
+ ddr3phy: ddr3phy@e3804000 {
+ compatible = "microchip,sama7g5-ddr3phy";
+ reg = <0xe3804000 0x1000>;
+ status = "okay";
+ };
+
gic: interrupt-controller@e8c11000 {
compatible = "arm,cortex-a7-gic";
#interrupt-cells = <3>;
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index f266b7b03482..cc88ebe7a60c 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -47,7 +47,7 @@
};
gmac: eth@e0800000 {
- compatible = "st,spear600-gmac";
+ compatible = "snps,dwmac-3.40a";
reg = <0xe0800000 0x8000>;
interrupts = <23 22>;
interrupt-names = "macirq", "eth_wake_irq";
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index 2ad9fd7c94ec..8af4b77fe655 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -17,6 +17,7 @@
* TAKE CARE WHEN MAINTAINING THIS FILE TO PROPAGATE ANY RELEVANT
* CHANGES TO vexpress-v2m.dtsi!
*/
+#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
v2m_fixed_3v3: fixed-regulator-0 {
@@ -101,16 +102,68 @@
};
bus@8000000 {
- motherboard-bus {
- model = "V2M-P1";
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 63>;
+ interrupt-map = <0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+ motherboard-bus@8000000 {
arm,hbi = <0x190>;
arm,vexpress,site = <0>;
- arm,v2m-memory-map = "rs1";
compatible = "arm,vexpress,v2m-p1", "simple-bus";
#address-cells = <2>; /* SMB chipselect number and offset */
#size-cells = <1>;
- #interrupt-cells = <1>;
- ranges;
+ ranges = <0 0 0x08000000 0x04000000>,
+ <1 0 0x14000000 0x04000000>,
+ <2 0 0x18000000 0x04000000>,
+ <3 0 0x1c000000 0x04000000>,
+ <4 0 0x0c000000 0x04000000>,
+ <5 0 0x10000000 0x04000000>;
nor_flash: flash@0 {
compatible = "arm,vexpress-flash", "cfi-flash";
@@ -215,7 +268,7 @@
clock-names = "apb_pclk";
};
- mmci@50000 {
+ mmc@50000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
interrupts = <9>, <10>;
@@ -275,7 +328,7 @@
clock-names = "uartclk", "apb_pclk";
};
- wdt@f0000 {
+ watchdog@f0000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0f0000 0x1000>;
interrupts = <0>;
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index ec13ceb9ed36..f434fe5cf4a1 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -17,18 +17,73 @@
* TAKE CARE WHEN MAINTAINING THIS FILE TO PROPAGATE ANY RELEVANT
* CHANGES TO vexpress-v2m-rs1.dtsi!
*/
+#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
- bus@4000000 {
- motherboard {
- model = "V2M-P1";
+ bus@40000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x40000000 0x40000000 0x10000000>,
+ <0x10000000 0x10000000 0x00020000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 63>;
+ interrupt-map = <0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+ motherboard-bus@40000000 {
arm,hbi = <0x190>;
arm,vexpress,site = <0>;
compatible = "arm,vexpress,v2m-p1", "simple-bus";
#address-cells = <2>; /* SMB chipselect number and offset */
#size-cells = <1>;
- #interrupt-cells = <1>;
- ranges;
+ ranges = <0 0 0x40000000 0x04000000>,
+ <1 0 0x44000000 0x04000000>,
+ <2 0 0x48000000 0x04000000>,
+ <3 0 0x4c000000 0x04000000>,
+ <7 0 0x10000000 0x00020000>;
flash@0,00000000 {
compatible = "arm,vexpress-flash", "cfi-flash";
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index e63c5c0bfb43..679537e17ff5 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -237,62 +237,7 @@
};
bus@8000000 {
- compatible = "simple-bus";
-
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0 0x08000000 0x04000000>,
- <1 0 0 0x14000000 0x04000000>,
- <2 0 0 0x18000000 0x04000000>,
- <3 0 0 0x1c000000 0x04000000>,
- <4 0 0 0x0c000000 0x04000000>,
- <5 0 0 0x10000000 0x04000000>;
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 63>;
- interrupt-map = <0 0 0 &gic 0 0 4>,
- <0 0 1 &gic 0 1 4>,
- <0 0 2 &gic 0 2 4>,
- <0 0 3 &gic 0 3 4>,
- <0 0 4 &gic 0 4 4>,
- <0 0 5 &gic 0 5 4>,
- <0 0 6 &gic 0 6 4>,
- <0 0 7 &gic 0 7 4>,
- <0 0 8 &gic 0 8 4>,
- <0 0 9 &gic 0 9 4>,
- <0 0 10 &gic 0 10 4>,
- <0 0 11 &gic 0 11 4>,
- <0 0 12 &gic 0 12 4>,
- <0 0 13 &gic 0 13 4>,
- <0 0 14 &gic 0 14 4>,
- <0 0 15 &gic 0 15 4>,
- <0 0 16 &gic 0 16 4>,
- <0 0 17 &gic 0 17 4>,
- <0 0 18 &gic 0 18 4>,
- <0 0 19 &gic 0 19 4>,
- <0 0 20 &gic 0 20 4>,
- <0 0 21 &gic 0 21 4>,
- <0 0 22 &gic 0 22 4>,
- <0 0 23 &gic 0 23 4>,
- <0 0 24 &gic 0 24 4>,
- <0 0 25 &gic 0 25 4>,
- <0 0 26 &gic 0 26 4>,
- <0 0 27 &gic 0 27 4>,
- <0 0 28 &gic 0 28 4>,
- <0 0 29 &gic 0 29 4>,
- <0 0 30 &gic 0 30 4>,
- <0 0 31 &gic 0 31 4>,
- <0 0 32 &gic 0 32 4>,
- <0 0 33 &gic 0 33 4>,
- <0 0 34 &gic 0 34 4>,
- <0 0 35 &gic 0 35 4>,
- <0 0 36 &gic 0 36 4>,
- <0 0 37 &gic 0 37 4>,
- <0 0 38 &gic 0 38 4>,
- <0 0 39 &gic 0 39 4>,
- <0 0 40 &gic 0 40 4>,
- <0 0 41 &gic 0 41 4>,
- <0 0 42 &gic 0 42 4>;
+ ranges = <0x8000000 0 0x8000000 0x18000000>;
};
site2: hsb@40000000 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 012d40a7228c..511e87cc2bc5 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -609,62 +609,7 @@
};
smb: bus@8000000 {
- compatible = "simple-bus";
-
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0 0x08000000 0x04000000>,
- <1 0 0 0x14000000 0x04000000>,
- <2 0 0 0x18000000 0x04000000>,
- <3 0 0 0x1c000000 0x04000000>,
- <4 0 0 0x0c000000 0x04000000>,
- <5 0 0 0x10000000 0x04000000>;
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 63>;
- interrupt-map = <0 0 0 &gic 0 0 4>,
- <0 0 1 &gic 0 1 4>,
- <0 0 2 &gic 0 2 4>,
- <0 0 3 &gic 0 3 4>,
- <0 0 4 &gic 0 4 4>,
- <0 0 5 &gic 0 5 4>,
- <0 0 6 &gic 0 6 4>,
- <0 0 7 &gic 0 7 4>,
- <0 0 8 &gic 0 8 4>,
- <0 0 9 &gic 0 9 4>,
- <0 0 10 &gic 0 10 4>,
- <0 0 11 &gic 0 11 4>,
- <0 0 12 &gic 0 12 4>,
- <0 0 13 &gic 0 13 4>,
- <0 0 14 &gic 0 14 4>,
- <0 0 15 &gic 0 15 4>,
- <0 0 16 &gic 0 16 4>,
- <0 0 17 &gic 0 17 4>,
- <0 0 18 &gic 0 18 4>,
- <0 0 19 &gic 0 19 4>,
- <0 0 20 &gic 0 20 4>,
- <0 0 21 &gic 0 21 4>,
- <0 0 22 &gic 0 22 4>,
- <0 0 23 &gic 0 23 4>,
- <0 0 24 &gic 0 24 4>,
- <0 0 25 &gic 0 25 4>,
- <0 0 26 &gic 0 26 4>,
- <0 0 27 &gic 0 27 4>,
- <0 0 28 &gic 0 28 4>,
- <0 0 29 &gic 0 29 4>,
- <0 0 30 &gic 0 30 4>,
- <0 0 31 &gic 0 31 4>,
- <0 0 32 &gic 0 32 4>,
- <0 0 33 &gic 0 33 4>,
- <0 0 34 &gic 0 34 4>,
- <0 0 35 &gic 0 35 4>,
- <0 0 36 &gic 0 36 4>,
- <0 0 37 &gic 0 37 4>,
- <0 0 38 &gic 0 38 4>,
- <0 0 39 &gic 0 39 4>,
- <0 0 40 &gic 0 40 4>,
- <0 0 41 &gic 0 41 4>,
- <0 0 42 &gic 0 42 4>;
+ ranges = <0x8000000 0 0x8000000 0x18000000>;
};
site2: hsb@40000000 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index 7aa64ae25779..3b88209bacea 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -207,62 +207,7 @@
};
smb: bus@8000000 {
- compatible = "simple-bus";
-
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0x08000000 0x04000000>,
- <1 0 0x14000000 0x04000000>,
- <2 0 0x18000000 0x04000000>,
- <3 0 0x1c000000 0x04000000>,
- <4 0 0x0c000000 0x04000000>,
- <5 0 0x10000000 0x04000000>;
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 63>;
- interrupt-map = <0 0 0 &gic 0 0 4>,
- <0 0 1 &gic 0 1 4>,
- <0 0 2 &gic 0 2 4>,
- <0 0 3 &gic 0 3 4>,
- <0 0 4 &gic 0 4 4>,
- <0 0 5 &gic 0 5 4>,
- <0 0 6 &gic 0 6 4>,
- <0 0 7 &gic 0 7 4>,
- <0 0 8 &gic 0 8 4>,
- <0 0 9 &gic 0 9 4>,
- <0 0 10 &gic 0 10 4>,
- <0 0 11 &gic 0 11 4>,
- <0 0 12 &gic 0 12 4>,
- <0 0 13 &gic 0 13 4>,
- <0 0 14 &gic 0 14 4>,
- <0 0 15 &gic 0 15 4>,
- <0 0 16 &gic 0 16 4>,
- <0 0 17 &gic 0 17 4>,
- <0 0 18 &gic 0 18 4>,
- <0 0 19 &gic 0 19 4>,
- <0 0 20 &gic 0 20 4>,
- <0 0 21 &gic 0 21 4>,
- <0 0 22 &gic 0 22 4>,
- <0 0 23 &gic 0 23 4>,
- <0 0 24 &gic 0 24 4>,
- <0 0 25 &gic 0 25 4>,
- <0 0 26 &gic 0 26 4>,
- <0 0 27 &gic 0 27 4>,
- <0 0 28 &gic 0 28 4>,
- <0 0 29 &gic 0 29 4>,
- <0 0 30 &gic 0 30 4>,
- <0 0 31 &gic 0 31 4>,
- <0 0 32 &gic 0 32 4>,
- <0 0 33 &gic 0 33 4>,
- <0 0 34 &gic 0 34 4>,
- <0 0 35 &gic 0 35 4>,
- <0 0 36 &gic 0 36 4>,
- <0 0 37 &gic 0 37 4>,
- <0 0 38 &gic 0 38 4>,
- <0 0 39 &gic 0 39 4>,
- <0 0 40 &gic 0 40 4>,
- <0 0 41 &gic 0 41 4>,
- <0 0 42 &gic 0 42 4>;
+ ranges = <0 0x8000000 0x18000000>;
};
site2: hsb@40000000 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index 4c5847955856..5916e4877eac 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -295,64 +295,6 @@
};
};
- smb: bus@4000000 {
- compatible = "simple-bus";
-
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0x40000000 0x04000000>,
- <1 0 0x44000000 0x04000000>,
- <2 0 0x48000000 0x04000000>,
- <3 0 0x4c000000 0x04000000>,
- <7 0 0x10000000 0x00020000>;
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 63>;
- interrupt-map = <0 0 0 &gic 0 0 4>,
- <0 0 1 &gic 0 1 4>,
- <0 0 2 &gic 0 2 4>,
- <0 0 3 &gic 0 3 4>,
- <0 0 4 &gic 0 4 4>,
- <0 0 5 &gic 0 5 4>,
- <0 0 6 &gic 0 6 4>,
- <0 0 7 &gic 0 7 4>,
- <0 0 8 &gic 0 8 4>,
- <0 0 9 &gic 0 9 4>,
- <0 0 10 &gic 0 10 4>,
- <0 0 11 &gic 0 11 4>,
- <0 0 12 &gic 0 12 4>,
- <0 0 13 &gic 0 13 4>,
- <0 0 14 &gic 0 14 4>,
- <0 0 15 &gic 0 15 4>,
- <0 0 16 &gic 0 16 4>,
- <0 0 17 &gic 0 17 4>,
- <0 0 18 &gic 0 18 4>,
- <0 0 19 &gic 0 19 4>,
- <0 0 20 &gic 0 20 4>,
- <0 0 21 &gic 0 21 4>,
- <0 0 22 &gic 0 22 4>,
- <0 0 23 &gic 0 23 4>,
- <0 0 24 &gic 0 24 4>,
- <0 0 25 &gic 0 25 4>,
- <0 0 26 &gic 0 26 4>,
- <0 0 27 &gic 0 27 4>,
- <0 0 28 &gic 0 28 4>,
- <0 0 29 &gic 0 29 4>,
- <0 0 30 &gic 0 30 4>,
- <0 0 31 &gic 0 31 4>,
- <0 0 32 &gic 0 32 4>,
- <0 0 33 &gic 0 33 4>,
- <0 0 34 &gic 0 34 4>,
- <0 0 35 &gic 0 35 4>,
- <0 0 36 &gic 0 36 4>,
- <0 0 37 &gic 0 37 4>,
- <0 0 38 &gic 0 38 4>,
- <0 0 39 &gic 0 39 4>,
- <0 0 40 &gic 0 40 4>,
- <0 0 41 &gic 0 41 4>,
- <0 0 42 &gic 0 42 4>;
- };
-
site2: hsb@e0000000 {
compatible = "simple-bus";
#address-cells = <1>;
diff --git a/arch/arm/common/sharpsl_param.c b/arch/arm/common/sharpsl_param.c
index efeb5724d9e9..6237ede2f0c7 100644
--- a/arch/arm/common/sharpsl_param.c
+++ b/arch/arm/common/sharpsl_param.c
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(sharpsl_param);
void sharpsl_save_param(void)
{
- memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
+ struct sharpsl_param_info *params = param_start(PARAM_BASE);
+
+ memcpy(&sharpsl_param, params, sizeof(*params));
if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
sharpsl_param.comadj=-1;
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
index d2d5f1cf815f..e6ff844821cf 100644
--- a/arch/arm/configs/gemini_defconfig
+++ b/arch/arm/configs/gemini_defconfig
@@ -76,6 +76,7 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_ILITEK_IL9322=y
CONFIG_DRM_TVE200=y
+CONFIG_FB=y
CONFIG_LOGO=y
CONFIG_USB=y
CONFIG_USB_MON=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index ccee86d0045d..5e4128dadd8d 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -292,6 +292,7 @@ CONFIG_DRM_IMX_LDB=y
CONFIG_DRM_IMX_HDMI=y
CONFIG_DRM_ETNAVIV=y
CONFIG_DRM_MXSFB=y
+CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_L4F00242T03=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index ba67c4717dcc..33572998dbbe 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -197,7 +197,6 @@ CONFIG_PCI_EPF_TEST=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_OMAP_OCP2SCP=y
-CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
@@ -456,6 +455,7 @@ CONFIG_PINCTRL_STMFX=y
CONFIG_PINCTRL_PALMAS=y
CONFIG_PINCTRL_OWL=y
CONFIG_PINCTRL_S500=y
+CONFIG_PINCTRL_MSM=y
CONFIG_PINCTRL_APQ8064=y
CONFIG_PINCTRL_APQ8084=y
CONFIG_PINCTRL_IPQ8064=y
@@ -725,6 +725,7 @@ CONFIG_DRM_PL111=m
CONFIG_DRM_LIMA=m
CONFIG_DRM_PANFROST=m
CONFIG_DRM_ASPEED_GFX=m
+CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_WM8505=y
CONFIG_FB_SH_MOBILE_LCDC=y
@@ -1122,6 +1123,7 @@ CONFIG_PHY_DM816X_USB=m
CONFIG_OMAP_USB2=y
CONFIG_TI_PIPE3=y
CONFIG_TWL4030_USB=m
+CONFIG_RAS=y
CONFIG_NVMEM_IMX_OCOTP=y
CONFIG_ROCKCHIP_EFUSE=m
CONFIG_NVMEM_SUNXI_SID=y
diff --git a/arch/arm/configs/oxnas_v6_defconfig b/arch/arm/configs/oxnas_v6_defconfig
index cae0db6b4eaf..de37f7e90999 100644
--- a/arch/arm/configs/oxnas_v6_defconfig
+++ b/arch/arm/configs/oxnas_v6_defconfig
@@ -46,7 +46,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=64
-CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index d9a27e4e0914..18d2a960b2d2 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -40,7 +40,6 @@ CONFIG_PCI_RCAR_GEN2=y
CONFIG_PCIE_RCAR_HOST=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index d0a800be0486..a41e27ace391 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -628,7 +628,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
uprobe_notify_resume(regs);
} else {
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(NULL, regs);
}
}
local_irq_disable();
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index d6cfe7c4bb00..8711d6824c1f 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -47,12 +47,26 @@ struct at91_pm_bu {
unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
};
+/*
+ * struct at91_pm_sfrbu_offsets: registers mapping for SFRBU
+ * @pswbu: power switch BU control registers
+ */
+struct at91_pm_sfrbu_regs {
+ struct {
+ u32 key;
+ u32 ctrl;
+ u32 state;
+ u32 softsw;
+ } pswbu;
+};
+
/**
* struct at91_soc_pm - AT91 SoC power management data structure
* @config_shdwc_ws: wakeup sources configuration function for SHDWC
* @config_pmc_ws: wakeup srouces configuration function for PMC
* @ws_ids: wakup sources of_device_id array
* @data: PM data to be used on last phase of suspend
+ * @sfrbu_regs: SFRBU registers mapping
* @bu: backup unit mapped data (for backup mode)
* @memcs: memory chip select
*/
@@ -62,6 +76,7 @@ struct at91_soc_pm {
const struct of_device_id *ws_ids;
struct at91_pm_bu *bu;
struct at91_pm_data data;
+ struct at91_pm_sfrbu_regs sfrbu_regs;
void *memcs;
};
@@ -356,9 +371,36 @@ static int at91_suspend_finish(unsigned long val)
return 0;
}
+static void at91_pm_switch_ba_to_vbat(void)
+{
+ unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
+ unsigned int val;
+
+ /* Just for safety. */
+ if (!soc_pm.data.sfrbu)
+ return;
+
+ val = readl(soc_pm.data.sfrbu + offset);
+
+ /* Already on VBAT. */
+ if (!(val & soc_pm.sfrbu_regs.pswbu.state))
+ return;
+
+ val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
+ val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
+ writel(val, soc_pm.data.sfrbu + offset);
+
+ /* Wait for update. */
+ val = readl(soc_pm.data.sfrbu + offset);
+ while (val & soc_pm.sfrbu_regs.pswbu.state)
+ val = readl(soc_pm.data.sfrbu + offset);
+}
+
static void at91_pm_suspend(suspend_state_t state)
{
if (soc_pm.data.mode == AT91_PM_BACKUP) {
+ at91_pm_switch_ba_to_vbat();
+
cpu_suspend(0, at91_suspend_finish);
/* The SRAM is lost between suspend cycles */
@@ -589,18 +631,22 @@ static const struct of_device_id ramc_phy_ids[] __initconst = {
{ /* Sentinel. */ },
};
-static __init void at91_dt_ramc(bool phy_mandatory)
+static __init int at91_dt_ramc(bool phy_mandatory)
{
struct device_node *np;
const struct of_device_id *of_id;
int idx = 0;
void *standby = NULL;
const struct ramc_info *ramc;
+ int ret;
for_each_matching_node_and_match(np, ramc_ids, &of_id) {
soc_pm.data.ramc[idx] = of_iomap(np, 0);
- if (!soc_pm.data.ramc[idx])
- panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx);
+ if (!soc_pm.data.ramc[idx]) {
+ pr_err("unable to map ramc[%d] cpu registers\n", idx);
+ ret = -ENOMEM;
+ goto unmap_ramc;
+ }
ramc = of_id->data;
if (ramc) {
@@ -612,25 +658,42 @@ static __init void at91_dt_ramc(bool phy_mandatory)
idx++;
}
- if (!idx)
- panic(pr_fmt("unable to find compatible ram controller node in dtb\n"));
+ if (!idx) {
+ pr_err("unable to find compatible ram controller node in dtb\n");
+ ret = -ENODEV;
+ goto unmap_ramc;
+ }
/* Lookup for DDR PHY node, if any. */
for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
soc_pm.data.ramc_phy = of_iomap(np, 0);
- if (!soc_pm.data.ramc_phy)
- panic(pr_fmt("unable to map ramc phy cpu registers\n"));
+ if (!soc_pm.data.ramc_phy) {
+ pr_err("unable to map ramc phy cpu registers\n");
+ ret = -ENOMEM;
+ goto unmap_ramc;
+ }
}
- if (phy_mandatory && !soc_pm.data.ramc_phy)
- panic(pr_fmt("DDR PHY is mandatory!\n"));
+ if (phy_mandatory && !soc_pm.data.ramc_phy) {
+ pr_err("DDR PHY is mandatory!\n");
+ ret = -ENODEV;
+ goto unmap_ramc;
+ }
if (!standby) {
pr_warn("ramc no standby function available\n");
- return;
+ return 0;
}
at91_cpuidle_device.dev.platform_data = standby;
+
+ return 0;
+
+unmap_ramc:
+ while (idx)
+ iounmap(soc_pm.data.ramc[--idx]);
+
+ return ret;
}
static void at91rm9200_idle(void)
@@ -1017,6 +1080,8 @@ static void __init at91_pm_init(void (*pm_idle)(void))
void __init at91rm9200_pm_init(void)
{
+ int ret;
+
if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
return;
@@ -1028,7 +1093,9 @@ void __init at91rm9200_pm_init(void)
soc_pm.data.standby_mode = AT91_PM_STANDBY;
soc_pm.data.suspend_mode = AT91_PM_ULP0;
- at91_dt_ramc(false);
+ ret = at91_dt_ramc(false);
+ if (ret)
+ return;
/*
* AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
@@ -1046,13 +1113,17 @@ void __init sam9x60_pm_init(void)
static const int iomaps[] __initconst = {
[AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC),
};
+ int ret;
if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
return;
at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
- at91_dt_ramc(false);
+ ret = at91_dt_ramc(false);
+ if (ret)
+ return;
+
at91_pm_init(NULL);
soc_pm.ws_ids = sam9x60_ws_ids;
@@ -1061,6 +1132,8 @@ void __init sam9x60_pm_init(void)
void __init at91sam9_pm_init(void)
{
+ int ret;
+
if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
return;
@@ -1072,7 +1145,10 @@ void __init at91sam9_pm_init(void)
soc_pm.data.standby_mode = AT91_PM_STANDBY;
soc_pm.data.suspend_mode = AT91_PM_ULP0;
- at91_dt_ramc(false);
+ ret = at91_dt_ramc(false);
+ if (ret)
+ return;
+
at91_pm_init(at91sam9_idle);
}
@@ -1081,12 +1157,16 @@ void __init sama5_pm_init(void)
static const int modes[] __initconst = {
AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
};
+ int ret;
if (!IS_ENABLED(CONFIG_SOC_SAMA5))
return;
at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
- at91_dt_ramc(false);
+ ret = at91_dt_ramc(false);
+ if (ret)
+ return;
+
at91_pm_init(NULL);
}
@@ -1101,18 +1181,27 @@ void __init sama5d2_pm_init(void)
[AT91_PM_BACKUP] = AT91_PM_IOMAP(SHDWC) |
AT91_PM_IOMAP(SFRBU),
};
+ int ret;
if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
return;
at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
- at91_dt_ramc(false);
+ ret = at91_dt_ramc(false);
+ if (ret)
+ return;
+
at91_pm_init(NULL);
soc_pm.ws_ids = sama5d2_ws_ids;
soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
+
+ soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
+ soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
+ soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
+ soc_pm.sfrbu_regs.pswbu.state = BIT(3);
}
void __init sama7_pm_init(void)
@@ -1127,18 +1216,27 @@ void __init sama7_pm_init(void)
[AT91_PM_BACKUP] = AT91_PM_IOMAP(SFRBU) |
AT91_PM_IOMAP(SHDWC),
};
+ int ret;
if (!IS_ENABLED(CONFIG_SOC_SAMA7))
return;
at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
- at91_dt_ramc(true);
+ ret = at91_dt_ramc(true);
+ if (ret)
+ return;
+
at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
at91_pm_init(NULL);
soc_pm.ws_ids = sama7g5_ws_ids;
soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
+
+ soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
+ soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
+ soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
+ soc_pm.sfrbu_regs.pswbu.state = BIT(2);
}
static int __init at91_pm_modes_select(char *str)
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index cbd61a3bcab1..fdb4f63ecde4 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -1014,31 +1014,55 @@ ENTRY(at91_pm_suspend_in_sram)
mov tmp1, #0
mcr p15, 0, tmp1, c7, c10, 4
+ /* Flush tlb. */
+ mov r4, #0
+ mcr p15, 0, r4, c8, c7, 0
+
+ ldr tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET]
+ str tmp1, .mckr_offset
+ ldr tmp1, [r0, #PM_DATA_PMC_VERSION]
+ str tmp1, .pmc_version
+ ldr tmp1, [r0, #PM_DATA_MEMCTRL]
+ str tmp1, .memtype
+ ldr tmp1, [r0, #PM_DATA_MODE]
+ str tmp1, .pm_mode
+
+ /*
+ * ldrne below are here to preload their address in the TLB as access
+ * to RAM may be limited while in self-refresh.
+ */
ldr tmp1, [r0, #PM_DATA_PMC]
str tmp1, .pmc_base
+ cmp tmp1, #0
+ ldrne tmp2, [tmp1, #0]
+
ldr tmp1, [r0, #PM_DATA_RAMC0]
str tmp1, .sramc_base
+ cmp tmp1, #0
+ ldrne tmp2, [tmp1, #0]
+
ldr tmp1, [r0, #PM_DATA_RAMC1]
str tmp1, .sramc1_base
+ cmp tmp1, #0
+ ldrne tmp2, [tmp1, #0]
+
+#ifndef CONFIG_SOC_SAM_V4_V5
+ /* ldrne below are here to preload their address in the TLB */
ldr tmp1, [r0, #PM_DATA_RAMC_PHY]
str tmp1, .sramc_phy_base
- ldr tmp1, [r0, #PM_DATA_MEMCTRL]
- str tmp1, .memtype
- ldr tmp1, [r0, #PM_DATA_MODE]
- str tmp1, .pm_mode
- ldr tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET]
- str tmp1, .mckr_offset
- ldr tmp1, [r0, #PM_DATA_PMC_VERSION]
- str tmp1, .pmc_version
- /* Both ldrne below are here to preload their address in the TLB */
+ cmp tmp1, #0
+ ldrne tmp2, [tmp1, #0]
+
ldr tmp1, [r0, #PM_DATA_SHDWC]
str tmp1, .shdwc
cmp tmp1, #0
ldrne tmp2, [tmp1, #0]
+
ldr tmp1, [r0, #PM_DATA_SFRBU]
str tmp1, .sfrbu
cmp tmp1, #0
ldrne tmp2, [tmp1, #0x10]
+#endif
/* Active the self-refresh mode */
at91_sramc_self_refresh_ena
diff --git a/arch/arm/mach-dove/include/mach/uncompress.h b/arch/arm/mach-dove/include/mach/uncompress.h
index 7a4bd8838036..ddf873f35e2b 100644
--- a/arch/arm/mach-dove/include/mach/uncompress.h
+++ b/arch/arm/mach-dove/include/mach/uncompress.h
@@ -11,7 +11,7 @@
#define LSR_THRE 0x20
-static void putc(const char c)
+static inline void putc(const char c)
{
int i;
@@ -24,7 +24,7 @@ static void putc(const char c)
*UART_THR = c;
}
-static void flush(void)
+static inline void flush(void)
{
}
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 11dcc369ec14..c9d7c29d95e1 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -172,6 +172,9 @@ static void __init imx6q_init_machine(void)
imx_get_soc_revision());
imx6q_enet_phy_init();
+
+ of_platform_default_populate(NULL, NULL, NULL);
+
imx_anatop_init();
cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init();
imx6q_1588_init();
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 9244437cb1b9..f2ecca339910 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/genalloc.h>
+#include <linux/irqchip/arm-gic.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/of.h>
@@ -619,6 +620,7 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata
static void imx6_pm_stby_poweroff(void)
{
+ gic_cpu_if_down(0);
imx6_set_lpm(STOP_POWER_OFF);
imx6q_suspend_finish(0);
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index 95fd1fbb0826..59a8e8cc4469 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -9,6 +9,7 @@
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/smp.h>
#include <asm/smp_plat.h>
@@ -81,11 +82,6 @@ static const struct reset_control_ops imx_src_ops = {
.reset = imx_src_reset_module,
};
-static struct reset_controller_dev imx_reset_controller = {
- .ops = &imx_src_ops,
- .nr_resets = ARRAY_SIZE(sw_reset_bits),
-};
-
static void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset)
{
writel_relaxed(enable, gpc_base + offset);
@@ -177,10 +173,6 @@ void __init imx_src_init(void)
src_base = of_iomap(np, 0);
WARN_ON(!src_base);
- imx_reset_controller.of_node = np;
- if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
- reset_controller_register(&imx_reset_controller);
-
/*
* force warm reset sources to generate cold reset
* for a more reliable restart
@@ -214,3 +206,33 @@ void __init imx7_src_init(void)
if (!gpc_base)
return;
}
+
+static const struct of_device_id imx_src_dt_ids[] = {
+ { .compatible = "fsl,imx51-src" },
+ { /* sentinel */ }
+};
+
+static int imx_src_probe(struct platform_device *pdev)
+{
+ struct reset_controller_dev *rcdev;
+
+ rcdev = devm_kzalloc(&pdev->dev, sizeof(*rcdev), GFP_KERNEL);
+ if (!rcdev)
+ return -ENOMEM;
+
+ rcdev->ops = &imx_src_ops;
+ rcdev->dev = &pdev->dev;
+ rcdev->of_node = pdev->dev.of_node;
+ rcdev->nr_resets = ARRAY_SIZE(sw_reset_bits);
+
+ return devm_reset_controller_register(&pdev->dev, rcdev);
+}
+
+static struct platform_driver imx_src_driver = {
+ .driver = {
+ .name = "imx-src",
+ .of_match_table = imx_src_dt_ids,
+ },
+ .probe = imx_src_probe,
+};
+builtin_platform_driver(imx_src_driver);
diff --git a/arch/arm/mach-omap1/include/mach/memory.h b/arch/arm/mach-omap1/include/mach/memory.h
index 36bc0000cb6a..ba3a350479c8 100644
--- a/arch/arm/mach-omap1/include/mach/memory.h
+++ b/arch/arm/mach-omap1/include/mach/memory.h
@@ -9,16 +9,4 @@
/* REVISIT: omap1 legacy drivers still rely on this */
#include <mach/soc.h>
-/*
- * Bus address is physical address, except for OMAP-1510 Local Bus.
- * OMAP-1510 bus address is translated into a Local Bus address if the
- * OMAP bus type is lbus. We do the address translation based on the
- * device overriding the defaults used in the dma-mapping API.
- */
-
-/*
- * OMAP-1510 Local Bus address offset
- */
-#define OMAP1510_LB_OFFSET UL(0x30000000)
-
#endif
diff --git a/arch/arm/mach-omap1/usb.c b/arch/arm/mach-omap1/usb.c
index 86d3b3c157af..e60831c82b78 100644
--- a/arch/arm/mach-omap1/usb.c
+++ b/arch/arm/mach-omap1/usb.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/dma-map-ops.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include <asm/irq.h>
@@ -206,8 +207,6 @@ static inline void udc_device_init(struct omap_usb_config *pdata)
#endif
-#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
-
/* The dmamask must be set for OHCI to work */
static u64 ohci_dmamask = ~(u32)0;
@@ -236,20 +235,15 @@ static struct platform_device ohci_device = {
static inline void ohci_device_init(struct omap_usb_config *pdata)
{
+ if (!IS_ENABLED(CONFIG_USB_OHCI_HCD))
+ return;
+
if (cpu_is_omap7xx())
ohci_resources[1].start = INT_7XX_USB_HHC_1;
pdata->ohci_device = &ohci_device;
pdata->ocpi_enable = &ocpi_enable;
}
-#else
-
-static inline void ohci_device_init(struct omap_usb_config *pdata)
-{
-}
-
-#endif
-
#if defined(CONFIG_USB_OTG) && defined(CONFIG_ARCH_OMAP_OTG)
static struct resource otg_resources[] = {
@@ -534,33 +528,87 @@ bad:
}
#ifdef CONFIG_ARCH_OMAP15XX
+/* OMAP-1510 OHCI has its own MMU for DMA */
+#define OMAP1510_LB_MEMSIZE 32 /* Should be same as SDRAM size */
+#define OMAP1510_LB_CLOCK_DIV 0xfffec10c
+#define OMAP1510_LB_MMU_CTL 0xfffec208
+#define OMAP1510_LB_MMU_LCK 0xfffec224
+#define OMAP1510_LB_MMU_LD_TLB 0xfffec228
+#define OMAP1510_LB_MMU_CAM_H 0xfffec22c
+#define OMAP1510_LB_MMU_CAM_L 0xfffec230
+#define OMAP1510_LB_MMU_RAM_H 0xfffec234
+#define OMAP1510_LB_MMU_RAM_L 0xfffec238
-/* ULPD_DPLL_CTRL */
-#define DPLL_IOB (1 << 13)
-#define DPLL_PLL_ENABLE (1 << 4)
-#define DPLL_LOCK (1 << 0)
+/*
+ * Bus address is physical address, except for OMAP-1510 Local Bus.
+ * OMAP-1510 bus address is translated into a Local Bus address if the
+ * OMAP bus type is lbus.
+ */
+#define OMAP1510_LB_OFFSET UL(0x30000000)
-/* ULPD_APLL_CTRL */
-#define APLL_NDPLL_SWITCH (1 << 0)
+/*
+ * OMAP-1510 specific Local Bus clock on/off
+ */
+static int omap_1510_local_bus_power(int on)
+{
+ if (on) {
+ omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
+ udelay(200);
+ } else {
+ omap_writel(0, OMAP1510_LB_MMU_CTL);
+ }
-static int omap_1510_usb_ohci_notifier(struct notifier_block *nb,
- unsigned long event, void *data)
+ return 0;
+}
+
+/*
+ * OMAP-1510 specific Local Bus initialization
+ * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
+ * See also arch/mach-omap/memory.h for __virt_to_dma() and
+ * __dma_to_virt() which need to match with the physical
+ * Local Bus address below.
+ */
+static int omap_1510_local_bus_init(void)
{
- struct device *dev = data;
+ unsigned int tlb;
+ unsigned long lbaddr, physaddr;
+
+ omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
+ OMAP1510_LB_CLOCK_DIV);
+
+ /* Configure the Local Bus MMU table */
+ for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
+ lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
+ physaddr = tlb * 0x00100000 + PHYS_OFFSET;
+ omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
+ omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
+ OMAP1510_LB_MMU_CAM_L);
+ omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
+ omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
+ omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
+ omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
+ }
- if (event != BUS_NOTIFY_ADD_DEVICE)
- return NOTIFY_DONE;
+ /* Enable the walking table */
+ omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
+ udelay(200);
- if (strncmp(dev_name(dev), "ohci", 4) == 0 &&
- dma_direct_set_offset(dev, PHYS_OFFSET, OMAP1510_LB_OFFSET,
- (u64)-1))
- WARN_ONCE(1, "failed to set DMA offset\n");
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block omap_1510_usb_ohci_nb = {
- .notifier_call = omap_1510_usb_ohci_notifier,
-};
+static void omap_1510_local_bus_reset(void)
+{
+ omap_1510_local_bus_power(1);
+ omap_1510_local_bus_init();
+}
+
+/* ULPD_DPLL_CTRL */
+#define DPLL_IOB (1 << 13)
+#define DPLL_PLL_ENABLE (1 << 4)
+#define DPLL_LOCK (1 << 0)
+
+/* ULPD_APLL_CTRL */
+#define APLL_NDPLL_SWITCH (1 << 0)
static void __init omap_1510_usb_init(struct omap_usb_config *config)
{
@@ -616,19 +664,19 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config)
}
#endif
-#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
- if (config->register_host) {
+ if (IS_ENABLED(CONFIG_USB_OHCI_HCD) && config->register_host) {
int status;
- bus_register_notifier(&platform_bus_type,
- &omap_1510_usb_ohci_nb);
ohci_device.dev.platform_data = config;
+ dma_direct_set_offset(&ohci_device.dev, PHYS_OFFSET,
+ OMAP1510_LB_OFFSET, (u64)-1);
status = platform_device_register(&ohci_device);
if (status)
pr_debug("can't register OHCI device, %d\n", status);
/* hcd explicitly gates 48MHz */
+
+ config->lb_reset = omap_1510_local_bus_reset;
}
-#endif
}
#else
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 7f13adf26e61..02c253de9b6e 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -112,7 +112,6 @@ config ARCH_OMAP2PLUS
select PM_GENERIC_DOMAINS
select PM_GENERIC_DOMAINS_OF
select RESET_CONTROLLER
- select SIMPLE_PM_BUS
select SOC_BUS
select TI_SYSC
select OMAP_IRQCHIP
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 12b26e04686f..0c2936c7a379 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -3614,6 +3614,8 @@ int omap_hwmod_init_module(struct device *dev,
oh->flags |= HWMOD_SWSUP_SIDLE_ACT;
if (data->cfg->quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
oh->flags |= HWMOD_SWSUP_MSTANDBY;
+ if (data->cfg->quirks & SYSC_QUIRK_CLKDM_NOAUTO)
+ oh->flags |= HWMOD_CLKDM_NOAUTO;
error = omap_hwmod_check_module(dev, oh, data, sysc_fields,
rev_offs, sysc_offs, syss_offs,
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index a951276f0547..a903b26cde40 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -36,6 +36,10 @@
* +-----+
* |RSVD | JIT scratchpad
* current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
+ * | ... | caller-saved registers
+ * +-----+
+ * | ... | arguments passed on stack
+ * ARM_SP during call => +-----|
* | |
* | ... | Function call stack
* | |
@@ -63,6 +67,12 @@
*
* When popping registers off the stack at the end of a BPF function, we
* reference them via the current ARM_FP register.
+ *
+ * Some eBPF operations are implemented via a call to a helper function.
+ * Such calls are "invisible" in the eBPF code, so it is up to the calling
+ * program to preserve any caller-saved ARM registers during the call. The
+ * JIT emits code to push and pop those registers onto the stack, immediately
+ * above the callee stack frame.
*/
#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
@@ -70,6 +80,8 @@
#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
+#define CALLER_MASK (1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3)
+
enum {
/* Stack layout - these are offsets from (top of stack - 4) */
BPF_R2_HI,
@@ -464,6 +476,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
{
+ const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
const s8 *tmp = bpf2a32[TMP_REG_1];
#if __LINUX_ARM_ARCH__ == 7
@@ -495,11 +508,17 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
emit(ARM_MOV_R(ARM_R0, rm), ctx);
}
+ /* Push caller-saved registers on stack */
+ emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
+
/* Call appropriate function */
emit_mov_i(ARM_IP, op == BPF_DIV ?
(u32)jit_udiv32 : (u32)jit_mod32, ctx);
emit_blx_r(ARM_IP, ctx);
+ /* Restore caller-saved registers from stack */
+ emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx);
+
/* Save return value */
if (rd != ARM_R0)
emit(ARM_MOV_R(rd, ARM_R0), ctx);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5c7ae4c3954b..fee914c716aa 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1931,8 +1931,6 @@ source "drivers/cpufreq/Kconfig"
endmenu
-source "drivers/firmware/Kconfig"
-
source "drivers/acpi/Kconfig"
source "arch/arm64/kvm/Kconfig"
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
index 05ae893d1b2e..fbf13f7c2baf 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
@@ -115,7 +115,6 @@
bus@8000000 {
compatible = "arm,vexpress,v2m-p1", "simple-bus";
- arm,v2m-memory-map = "rs1";
#address-cells = <2>; /* SMB chipselect number and offset */
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
index b8a21092db4d..269b649934b5 100644
--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
@@ -192,32 +192,9 @@
remote-endpoint = <&clcd_pads>;
};
};
-
- panel-timing {
- clock-frequency = <63500127>;
- hactive = <1024>;
- hback-porch = <152>;
- hfront-porch = <48>;
- hsync-len = <104>;
- vactive = <768>;
- vback-porch = <23>;
- vfront-porch = <3>;
- vsync-len = <4>;
- };
};
bus@8000000 {
- compatible = "simple-bus";
-
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0 0x08000000 0x04000000>,
- <1 0 0 0x14000000 0x04000000>,
- <2 0 0 0x18000000 0x04000000>,
- <3 0 0 0x1c000000 0x04000000>,
- <4 0 0 0x0c000000 0x04000000>,
- <5 0 0 0x10000000 0x04000000>;
-
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 63>;
interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 8e7a66943b01..6288e104a089 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -27,8 +27,6 @@
reg = <0x0 0x2b1f0000 0x0 0x1000>;
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "mhu_lpri_rx",
- "mhu_hpri_rx";
#mbox-cells = <1>;
clocks = <&soc_refclk100mhz>;
clock-names = "apb_pclk";
@@ -804,16 +802,6 @@
};
bus@8000000 {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0 0x08000000 0x04000000>,
- <1 0 0 0x14000000 0x04000000>,
- <2 0 0 0x18000000 0x04000000>,
- <3 0 0 0x1c000000 0x04000000>,
- <4 0 0 0x0c000000 0x04000000>,
- <5 0 0 0x10000000 0x04000000>;
-
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 15>;
interrupt-map = <0 0 0 &gic 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index 40d95c58b55e..fefd2b5f0176 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -92,16 +92,23 @@
};
bus@8000000 {
- motherboard-bus {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0x8000000 0 0x8000000 0x18000000>;
+
+ motherboard-bus@8000000 {
compatible = "arm,vexpress,v2p-p1", "simple-bus";
#address-cells = <2>; /* SMB chipselect number and offset */
#size-cells = <1>;
- #interrupt-cells = <1>;
- ranges;
- model = "V2M-Juno";
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
arm,hbi = <0x252>;
arm,vexpress,site = <0>;
- arm,v2m-memory-map = "rs1";
flash@0 {
/* 2 * 32MiB NOR Flash memory mounted on CS0 */
@@ -218,7 +225,7 @@
};
};
- mmci@50000 {
+ mmc@50000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
interrupts = <5>;
@@ -246,7 +253,7 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
- wdt@f0000 {
+ watchdog@f0000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0f0000 0x10000>;
interrupts = <7>;
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index 3050f45bade4..258991ad7cc0 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -133,17 +133,6 @@
};
bus@8000000 {
- compatible = "simple-bus";
-
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0 0x08000000 0x04000000>,
- <1 0 0 0x14000000 0x04000000>,
- <2 0 0 0x18000000 0x04000000>,
- <3 0 0 0x1c000000 0x04000000>,
- <4 0 0 0x0c000000 0x04000000>,
- <5 0 0 0x10000000 0x04000000>;
-
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 63>;
interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
index b917d9d3f1c4..33182d9e5826 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
@@ -6,7 +6,7 @@
*/
/ {
bus@8000000 {
- motherboard-bus {
+ motherboard-bus@8000000 {
arm,v2m-memory-map = "rs2";
iofpga-bus@300000000 {
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
index 4c4a381d2c75..5f6cab668aa0 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
@@ -77,13 +77,21 @@
};
bus@8000000 {
- motherboard-bus {
- arm,v2m-memory-map = "rs1";
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0x8000000 0 0x8000000 0x18000000>;
+
+ motherboard-bus@8000000 {
compatible = "arm,vexpress,v2m-p1", "simple-bus";
#address-cells = <2>; /* SMB chipselect number and offset */
#size-cells = <1>;
- #interrupt-cells = <1>;
- ranges;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
flash@0 {
compatible = "arm,vexpress-flash", "cfi-flash";
@@ -130,7 +138,7 @@
clock-names = "apb_pclk";
};
- mmci@50000 {
+ mmc@50000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
interrupts = <9>, <10>;
@@ -190,7 +198,7 @@
clock-names = "uartclk", "apb_pclk";
};
- wdt@f0000 {
+ watchdog@f0000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0f0000 0x1000>;
interrupts = <0>;
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
index d859914500a7..5b6d9d8e934d 100644
--- a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -145,61 +145,6 @@
};
smb: bus@8000000 {
- compatible = "simple-bus";
-
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0 0x08000000 0x04000000>,
- <1 0 0 0x14000000 0x04000000>,
- <2 0 0 0x18000000 0x04000000>,
- <3 0 0 0x1c000000 0x04000000>,
- <4 0 0 0x0c000000 0x04000000>,
- <5 0 0 0x10000000 0x04000000>;
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 63>;
- interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ ranges = <0x8000000 0 0x8000000 0x18000000>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index 343ecf0e8973..06b36cc65865 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -405,9 +405,9 @@
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <0>; /* fixed up by bootloader */
clocks = <&clockgen QORIQ_CLK_HWACCEL 1>;
- voltage-ranges = <1800 1800 3300 3300>;
+ voltage-ranges = <1800 1800>;
sdhci,auto-cmd12;
- broken-cd;
+ non-removable;
little-endian;
bus-width = <4>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
index 988f8ab679ad..40f5e7a3b064 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
@@ -91,7 +91,7 @@
#size-cells = <1>;
compatible = "jedec,spi-nor";
spi-max-frequency = <80000000>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
index 4e2820d19244..a2b24d4d4e3e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
@@ -48,7 +48,7 @@
#size-cells = <1>;
compatible = "jedec,spi-nor";
spi-max-frequency = <80000000>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
index d0456daefda8..9db9b90bf2bc 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
@@ -102,6 +102,7 @@
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <950000>;
regulator-boot-on;
+ regulator-always-on;
regulator-ramp-delay = <3125>;
nxp,dvs-run-voltage = <950000>;
nxp,dvs-standby-voltage = <850000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
index 05cb60991fb9..d52686f4c059 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
@@ -647,7 +647,7 @@
pinctrl_hog: hoggrp {
fsl,pins = <
MX8MM_IOMUXC_NAND_CE0_B_GPIO3_IO1 0x40000159 /* M2_GDIS# */
- MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x40000041 /* M2_RST# */
+ MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x40000041 /* M2_RST# */
MX8MM_IOMUXC_NAND_DATA01_GPIO3_IO7 0x40000119 /* M2_OFF# */
MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x40000159 /* M2_WDIS# */
MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x40000041 /* AMP GPIO1 */
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
index 54eaf3d6055b..3b2d627a0342 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
@@ -101,7 +101,7 @@
#size-cells = <1>;
compatible = "jedec,spi-nor";
spi-max-frequency = <80000000>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
index e77db4996e58..236f425e1570 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
@@ -633,7 +633,7 @@
pinctrl_hog: hoggrp {
fsl,pins = <
MX8MN_IOMUXC_NAND_CE0_B_GPIO3_IO1 0x40000159 /* M2_GDIS# */
- MX8MN_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x40000041 /* M2_RST# */
+ MX8MN_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x40000041 /* M2_RST# */
MX8MN_IOMUXC_NAND_DATA01_GPIO3_IO7 0x40000119 /* M2_OFF# */
MX8MN_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x40000159 /* M2_WDIS# */
MX8MN_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x40000041 /* APP GPIO1 */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
index aa78e0d8c72b..fc178eebf8aa 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
@@ -74,7 +74,7 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <80000000>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index 49f9db971f3b..b83df77195ec 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -337,6 +337,8 @@
#size-cells = <1>;
compatible = "micron,n25q256a", "jedec,spi-nor";
spi-max-frequency = <29000000>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts b/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
index f593e4ff62e1..564746d5000d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
@@ -281,7 +281,7 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
m25p,fast-read;
spi-max-frequency = <50000000>;
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index a620ac0d0b19..db333001df4d 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -487,7 +487,6 @@
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_0>, <&usb0_ssphy>;
phy-names = "usb2-phy", "usb3-phy";
- tx-fifo-resize;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk;
@@ -528,7 +527,6 @@
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_1>, <&usb1_ssphy>;
phy-names = "usb2-phy", "usb3-phy";
- tx-fifo-resize;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk;
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index c566a64b1373..0df76f7b1cc1 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -48,8 +48,10 @@
#size-cells = <0>;
pon: power-on@800 {
- compatible = "qcom,pm8916-pon";
+ compatible = "qcom,pm8998-pon";
reg = <0x0800>;
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
pon_pwrkey: pwrkey {
compatible = "qcom,pm8941-pwrkey";
diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
index 8ac96f8e79d4..28d5b5528516 100644
--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
@@ -804,6 +804,16 @@
};
};
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ status = "okay";
+
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
&qupv3_id_0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
index 0f2b3c00e434..70c88c37de32 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
@@ -273,7 +273,6 @@
"Headphone Jack", "HPOL",
"Headphone Jack", "HPOR";
- #sound-dai-cells = <0>;
#address-cells = <1>;
#size-cells = <0>;
@@ -301,11 +300,11 @@
};
};
- dai-link@2 {
+ dai-link@5 {
link-name = "MultiMedia2";
- reg = <2>;
+ reg = <LPASS_DP_RX>;
cpu {
- sound-dai = <&lpass_cpu 2>;
+ sound-dai = <&lpass_cpu LPASS_DP_RX>;
};
codec {
@@ -782,7 +781,7 @@ hp_i2c: &i2c9 {
qcom,playback-sd-lines = <0>;
};
- hdmi-primary@0 {
+ hdmi@5 {
reg = <LPASS_DP_RX>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 53a21d086178..fd78f16181dd 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -1850,9 +1850,9 @@
cpufreq_hw: cpufreq@18591000 {
compatible = "qcom,cpufreq-epss";
- reg = <0 0x18591100 0 0x900>,
- <0 0x18592100 0 0x900>,
- <0 0x18593100 0 0x900>;
+ reg = <0 0x18591000 0 0x1000>,
+ <0 0x18592000 0 0x1000>,
+ <0 0x18593000 0 0x1000>;
clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
clock-names = "xo", "alternate";
#freq-domain-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
index 9153e6616ba4..9c7f87e42fcc 100644
--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
@@ -654,9 +654,20 @@
compatible = "qcom,sdm660-a2noc";
reg = <0x01704000 0xc100>;
#interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
+ clock-names = "bus",
+ "bus_a",
+ "ipa",
+ "ufs_axi",
+ "aggre2_ufs_axi",
+ "aggre2_usb3_axi",
+ "cfg_noc_usb2_axi";
clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
- <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
+ <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
+ <&rpmcc RPM_SMD_IPA_CLK>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_CFG_NOC_USB2_AXI_CLK>;
};
mnoc: interconnect@1745000 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 6d7172e6f4c3..b3b911926184 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -128,23 +128,28 @@
no-map;
};
- wlan_msa_mem: memory@8c400000 {
- reg = <0 0x8c400000 0 0x100000>;
+ ipa_fw_mem: memory@8c400000 {
+ reg = <0 0x8c400000 0 0x10000>;
no-map;
};
- gpu_mem: memory@8c515000 {
- reg = <0 0x8c515000 0 0x2000>;
+ ipa_gsi_mem: memory@8c410000 {
+ reg = <0 0x8c410000 0 0x5000>;
no-map;
};
- ipa_fw_mem: memory@8c517000 {
- reg = <0 0x8c517000 0 0x5a000>;
+ gpu_mem: memory@8c415000 {
+ reg = <0 0x8c415000 0 0x2000>;
no-map;
};
- adsp_mem: memory@8c600000 {
- reg = <0 0x8c600000 0 0x1a00000>;
+ adsp_mem: memory@8c500000 {
+ reg = <0 0x8c500000 0 0x1a00000>;
+ no-map;
+ };
+
+ wlan_msa_mem: memory@8df00000 {
+ reg = <0 0x8df00000 0 0x100000>;
no-map;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index 385e5029437d..2ba23aa582a1 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -16,6 +16,17 @@
#include "sdm850.dtsi"
#include "pm8998.dtsi"
+/*
+ * Update following upstream (sdm845.dtsi) reserved
+ * memory mappings for firmware loading to succeed
+ * and enable the IPA device.
+ */
+/delete-node/ &ipa_fw_mem;
+/delete-node/ &ipa_gsi_mem;
+/delete-node/ &gpu_mem;
+/delete-node/ &adsp_mem;
+/delete-node/ &wlan_msa_mem;
+
/ {
model = "Lenovo Yoga C630";
compatible = "lenovo,yoga-c630", "qcom,sdm845";
@@ -58,6 +69,29 @@
};
};
+ /* Reserved memory changes for IPA */
+ reserved-memory {
+ wlan_msa_mem: memory@8c400000 {
+ reg = <0 0x8c400000 0 0x100000>;
+ no-map;
+ };
+
+ gpu_mem: memory@8c515000 {
+ reg = <0 0x8c515000 0 0x2000>;
+ no-map;
+ };
+
+ ipa_fw_mem: memory@8c517000 {
+ reg = <0 0x8c517000 0 0x5a000>;
+ no-map;
+ };
+
+ adsp_mem: memory@8c600000 {
+ reg = <0 0x8c600000 0 0x1a00000>;
+ no-map;
+ };
+ };
+
sn65dsi86_refclk: sn65dsi86-refclk {
compatible = "fixed-clock";
#clock-cells = <0>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 156d96afbbfc..545197bc0501 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -245,7 +245,6 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_HISILICON_LPC=y
-CONFIG_SIMPLE_PM_BUS=y
CONFIG_FSL_MC_BUS=y
CONFIG_TEGRA_ACONNECT=m
CONFIG_GNSS=m
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 7535dc7cc5aa..bd68e1b7f29f 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -50,9 +50,6 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
#define acpi_os_ioremap acpi_os_ioremap
-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size);
-#define acpi_os_memmap acpi_os_memmap
-
typedef u64 phys_cpuid_t;
#define PHYS_CPUID_INVALID INVALID_HWID
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 89faca0e740d..bfa58409a4d4 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -525,6 +525,11 @@ alternative_endif
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
#endif
+#ifdef CONFIG_KASAN_HW_TAGS
+#define EXPORT_SYMBOL_NOHWKASAN(name)
+#else
+#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
+#endif
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 3f93b9e0b339..02511650cffe 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -99,11 +99,17 @@ void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
{
+ if (!system_supports_mte())
+ return;
+
mte_check_tfsr_el1();
}
static inline void mte_check_tfsr_exit(void)
{
+ if (!system_supports_mte())
+ return;
+
/*
* The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb()
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index 3a3264ff47b9..95f7686b728d 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
#define __HAVE_ARCH_STRCHR
extern char *strchr(const char *, int c);
+#ifndef CONFIG_KASAN_HW_TAGS
#define __HAVE_ARCH_STRCMP
extern int strcmp(const char *, const char *);
#define __HAVE_ARCH_STRNCMP
extern int strncmp(const char *, const char *, __kernel_size_t);
+#endif
#define __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 1c9c2f7a1c04..f3851724fe35 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -273,8 +273,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
return __pgprot(PROT_DEVICE_nGnRnE);
}
-static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
- acpi_size size, bool memory)
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
efi_memory_desc_t *md, *region = NULL;
pgprot_t prot;
@@ -300,11 +299,9 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
* It is fine for AML to remap regions that are not represented in the
* EFI memory map at all, as it only describes normal memory, and MMIO
* regions that require a virtual mapping to make them accessible to
- * the EFI runtime services. Determine the region default
- * attributes by checking the requested memory semantics.
+ * the EFI runtime services.
*/
- prot = memory ? __pgprot(PROT_NORMAL_NC) :
- __pgprot(PROT_DEVICE_nGnRnE);
+ prot = __pgprot(PROT_DEVICE_nGnRnE);
if (region) {
switch (region->type) {
case EFI_LOADER_CODE:
@@ -364,16 +361,6 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
return __ioremap(phys, size, prot);
}
-void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
-{
- return __acpi_os_ioremap(phys, size, false);
-}
-
-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)
-{
- return __acpi_os_ioremap(phys, size, true);
-}
-
/*
* Claim Synchronous External Aborts as a firmware first notification.
*
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f8a3067d10c6..6ec7036ef7e1 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1526,9 +1526,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
/*
* For reasons that aren't entirely clear, enabling KPTI on Cavium
* ThunderX leads to apparent I-cache corruption of kernel text, which
- * ends as well as you might imagine. Don't even try.
+ * ends as well as you might imagine. Don't even try. We cannot rely
+ * on the cpus_have_*cap() helpers here to detect the CPU erratum
+ * because cpucap detection order may change. However, since we know
+ * affected CPUs are always in a homogeneous configuration, it is
+ * safe to rely on this_cpu_has_cap() here.
*/
- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
str = "ARM64_WORKAROUND_CAVIUM_27456";
__kpti_forced = -1;
}
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 5a294f20e9de..ff4962750b3d 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
void sve_alloc(struct task_struct *task)
{
if (task->thread.sve_state) {
- memset(task->thread.sve_state, 0, sve_state_size(current));
+ memset(task->thread.sve_state, 0, sve_state_size(task));
return;
}
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 9d314a3bad3b..e5e801bc5312 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -142,12 +142,7 @@ void mte_enable_kernel_async(void)
#ifdef CONFIG_KASAN_HW_TAGS
void mte_check_tfsr_el1(void)
{
- u64 tfsr_el1;
-
- if (!system_supports_mte())
- return;
-
- tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
+ u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
/*
@@ -199,6 +194,9 @@ void mte_thread_init_user(void)
void mte_thread_switch(struct task_struct *next)
{
+ if (!system_supports_mte())
+ return;
+
mte_update_sctlr_user(next);
/*
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 19100fe8f7e4..40adb8cdbf5a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -18,7 +18,6 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/nospec.h>
-#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/unistd.h>
@@ -58,7 +57,7 @@
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
+unsigned long __stack_chk_guard __ro_after_init;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 9fe70b12b34f..c287b9407f28 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -940,10 +940,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs);
- if (thread_flags & _TIF_NOTIFY_RESUME) {
+ if (thread_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(NULL, regs);
- }
if (thread_flags & _TIF_FOREIGN_FPSTATE)
fpsimd_restore_current_state();
diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
index fb0f523d1492..0a048dc06a7d 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
@@ -24,6 +24,7 @@ struct hyp_pool {
/* Allocation */
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
+void hyp_split_page(struct hyp_page *page);
void hyp_get_page(struct hyp_pool *pool, void *addr);
void hyp_put_page(struct hyp_pool *pool, void *addr);
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 5df6193fc430..8d741f71377f 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -54,7 +54,7 @@ $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
# runtime. Because the hypervisor is part of the kernel binary, relocations
# produce a kernel VA. We enumerate relocations targeting hyp at build time
# and convert the kernel VAs at those positions to hyp VAs.
-$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
+$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE
$(call if_changed,hyprel)
# 5) Compile hyp-reloc.S and link it into the existing partially linked object.
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index bacd493a4eac..34eeb524b686 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1;
static void *host_s2_zalloc_pages_exact(size_t size)
{
- return hyp_alloc_pages(&host_s2_pool, get_order(size));
+ void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
+
+ hyp_split_page(hyp_virt_to_page(addr));
+
+ /*
+ * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
+ * so there should be no need to free any of the tail pages to make the
+ * allocation exact.
+ */
+ WARN_ON(size != (PAGE_SIZE << get_order(size)));
+
+ return addr;
}
static void *host_s2_zalloc_page(void *pool)
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index 41fc25bdfb34..0bd7701ad1df 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
{
+ BUG_ON(!p->refcount);
p->refcount--;
return (p->refcount == 0);
}
@@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
hyp_spin_unlock(&pool->lock);
}
+void hyp_split_page(struct hyp_page *p)
+{
+ unsigned short order = p->order;
+ unsigned int i;
+
+ p->order = 0;
+ for (i = 1; i < (1 << order); i++) {
+ struct hyp_page *tail = p + i;
+
+ tail->order = 0;
+ hyp_set_page_refcounted(tail);
+ }
+}
+
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
{
unsigned short i = order;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 1a94a7ca48f2..69bd1732a299 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* when updating the PG_mte_tagged page flag, see
* sanitise_mte_tags for more details.
*/
- if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
- return -EINVAL;
+ if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
+ ret = -EINVAL;
+ break;
+ }
if (vma->vm_flags & VM_PFNMAP) {
/* IO region dirty page logging not allowed */
diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c
index f9bb3b14130e..c84fe24b2ea1 100644
--- a/arch/arm64/kvm/perf.c
+++ b/arch/arm64/kvm/perf.c
@@ -50,9 +50,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
int kvm_perf_init(void)
{
- if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
- static_branch_enable(&kvm_arm_pmu_available);
-
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index f5065f23b413..2af3c37445e0 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -740,7 +740,14 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
kvm_pmu_create_perf_event(vcpu, select_idx);
}
-int kvm_pmu_probe_pmuver(void)
+void kvm_host_pmu_init(struct arm_pmu *pmu)
+{
+ if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
+ !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
+ static_branch_enable(&kvm_arm_pmu_available);
+}
+
+static int kvm_pmu_probe_pmuver(void)
{
struct perf_event_attr attr = { };
struct perf_event *event;
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index d7bee210a798..83bcad72ec97 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -173,4 +173,4 @@ L(done):
ret
SYM_FUNC_END_PI(strcmp)
-EXPORT_SYMBOL_NOKASAN(strcmp)
+EXPORT_SYMBOL_NOHWKASAN(strcmp)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index 48d44f7fddb1..e42bcfcd37e6 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -258,4 +258,4 @@ L(ret0):
ret
SYM_FUNC_END_PI(strncmp)
-EXPORT_SYMBOL_NOKASAN(strncmp)
+EXPORT_SYMBOL_NOHWKASAN(strncmp)
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 23505fc35324..a8158c948966 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -43,7 +43,7 @@ void __init arm64_hugetlb_cma_reserve(void)
#ifdef CONFIG_ARM64_4K_PAGES
order = PUD_SHIFT - PAGE_SHIFT;
#else
- order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
+ order = CONT_PMD_SHIFT - PAGE_SHIFT;
#endif
/*
* HugeTLB CMA reservation is required for gigantic
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 9d4d898df76b..823d3d5a9e11 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -8,7 +8,7 @@ config CSKY
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
- select ARCH_WANT_FRAME_POINTERS if !CPU_CK610
+ select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK
select CLKSRC_MMIO
@@ -241,6 +241,7 @@ endchoice
menuconfig HAVE_TCM
bool "Tightly-Coupled/Sram Memory"
+ depends on !COMPILE_TEST
help
The implementation are not only used by TCM (Tightly-Coupled Meory)
but also used by sram on SOC bus. It follow existed linux tcm
diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h
index 91818787d860..02b72a000767 100644
--- a/arch/csky/include/asm/bitops.h
+++ b/arch/csky/include/asm/bitops.h
@@ -74,7 +74,6 @@ static __always_inline unsigned long __fls(unsigned long x)
* bug fix, why only could use atomic!!!!
*/
#include <asm-generic/bitops/non-atomic.h>
-#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index 0105ac81b432..1a5f54e0d272 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -99,7 +99,8 @@ static int gpr_set(struct task_struct *target,
if (ret)
return ret;
- regs.sr = task_pt_regs(target)->sr;
+ /* BIT(0) of regs.sr is Condition Code/Carry bit */
+ regs.sr = (regs.sr & BIT(0)) | (task_pt_regs(target)->sr & ~BIT(0));
#ifdef CONFIG_CPU_HAS_HILO
regs.dcsr = task_pt_regs(target)->dcsr;
#endif
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index 312f046d452d..c7b763d2f526 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -52,10 +52,14 @@ static long restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
int err = 0;
+ unsigned long sr = regs->sr;
/* sc_pt_regs is structured the same as the start of pt_regs */
err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs));
+ /* BIT(0) of regs->sr is Condition Code/Carry bit */
+ regs->sr = (sr & ~1) | (regs->sr & 1);
+
/* Restore the floating-point state. */
err |= restore_fpu_state(sc);
@@ -260,8 +264,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs);
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(NULL, regs);
- }
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 045792cde481..1e33666fa679 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -388,8 +388,6 @@ config CRASH_DUMP
help
Generate crash dump after being started by kexec.
-source "drivers/firmware/Kconfig"
-
endmenu
menu "Power management and ACPI options"
diff --git a/arch/m68k/68000/entry.S b/arch/m68k/68000/entry.S
index 259b3661b614..997b54933015 100644
--- a/arch/m68k/68000/entry.S
+++ b/arch/m68k/68000/entry.S
@@ -15,7 +15,6 @@
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/traps.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
@@ -25,7 +24,6 @@
.globl system_call
.globl resume
.globl ret_from_exception
-.globl ret_from_signal
.globl sys_call_table
.globl bad_interrupt
.globl inthandler1
@@ -59,8 +57,6 @@ do_trace:
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
-
-ret_from_signal:
RESTORE_SWITCH_STACK
addql #4,%sp
jra ret_from_exception
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 774c35f47eea..0b50da08a9c5 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -29,7 +29,6 @@ config M68K
select NO_DMA if !MMU && !COLDFIRE
select OLD_SIGACTION
select OLD_SIGSUSPEND3
- select SET_FS
select UACCESS_MEMCPY if !MMU
select VIRT_TO_BUS
select ZONE_DMA
diff --git a/arch/m68k/coldfire/entry.S b/arch/m68k/coldfire/entry.S
index d43a02795a4a..9f337c70243a 100644
--- a/arch/m68k/coldfire/entry.S
+++ b/arch/m68k/coldfire/entry.S
@@ -31,7 +31,6 @@
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
@@ -51,7 +50,6 @@ sw_usp:
.globl system_call
.globl resume
.globl ret_from_exception
-.globl ret_from_signal
.globl sys_call_table
.globl inthandler
@@ -98,8 +96,6 @@ ENTRY(system_call)
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
-
-ret_from_signal:
RESTORE_SWITCH_STACK
addql #4,%sp
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index 79e55421cfb1..1a5d1e8eb4c8 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -200,7 +200,7 @@ static struct net_device * __init nfeth_probe(int unit)
dev->irq = nfEtherIRQ;
dev->netdev_ops = &nfeth_netdev_ops;
- memcpy(dev->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(dev, mac);
priv = netdev_priv(dev);
priv->ethX = unit;
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 3750819ac5a1..f4d82c619a5c 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -9,7 +9,6 @@
#define __ASM_M68K_PROCESSOR_H
#include <linux/thread_info.h>
-#include <asm/segment.h>
#include <asm/fpu.h>
#include <asm/ptrace.h>
@@ -75,11 +74,37 @@ static inline void wrusp(unsigned long usp)
#define TASK_UNMAPPED_BASE 0
#endif
+/* Address spaces (or Function Codes in Motorola lingo) */
+#define USER_DATA 1
+#define USER_PROGRAM 2
+#define SUPER_DATA 5
+#define SUPER_PROGRAM 6
+#define CPU_SPACE 7
+
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
+/*
+ * Set the SFC/DFC registers for special MM operations. For most normal
+ * operation these remain set to USER_DATA for the uaccess routines.
+ */
+static inline void set_fc(unsigned long val)
+{
+ WARN_ON_ONCE(in_interrupt());
+
+ __asm__ __volatile__ ("movec %0,%/sfc\n\t"
+ "movec %0,%/dfc\n\t"
+ : /* no outputs */ : "r" (val) : "memory");
+}
+#else
+static inline void set_fc(unsigned long val)
+{
+}
+#endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */
+
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long usp; /* user stack pointer */
unsigned short sr; /* saved status register */
- unsigned short fs; /* saved fs (sfc, dfc) */
+ unsigned short fc; /* saved fc (sfc, dfc) */
unsigned long crp[2]; /* cpu root pointer */
unsigned long esp0; /* points to SR of stack frame */
unsigned long faddr; /* info about last fault */
@@ -92,7 +117,7 @@ struct thread_struct {
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
.sr = PS_S, \
- .fs = __KERNEL_DS, \
+ .fc = USER_DATA, \
}
/*
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
deleted file mode 100644
index 2b5e68a71ef7..000000000000
--- a/arch/m68k/include/asm/segment.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_SEGMENT_H
-#define _M68K_SEGMENT_H
-
-/* define constants */
-/* Address spaces (FC0-FC2) */
-#define USER_DATA (1)
-#ifndef __USER_DS
-#define __USER_DS (USER_DATA)
-#endif
-#define USER_PROGRAM (2)
-#define SUPER_DATA (5)
-#ifndef __KERNEL_DS
-#define __KERNEL_DS (SUPER_DATA)
-#endif
-#define SUPER_PROGRAM (6)
-#define CPU_SPACE (7)
-
-#ifndef __ASSEMBLY__
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
-/*
- * Get/set the SFC/DFC registers for MOVES instructions
- */
-#define USER_DS MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
-
-static inline mm_segment_t get_fs(void)
-{
- mm_segment_t _v;
- __asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
- return _v;
-}
-
-static inline void set_fs(mm_segment_t val)
-{
- __asm__ __volatile__ ("movec %0,%/sfc\n\t"
- "movec %0,%/dfc\n\t"
- : /* no outputs */ : "r" (val.seg) : "memory");
-}
-
-#else
-#define USER_DS MAKE_MM_SEG(TASK_SIZE)
-#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-#endif
-
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _M68K_SEGMENT_H */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 15a757073fa5..c952658ba792 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -4,7 +4,6 @@
#include <asm/types.h>
#include <asm/page.h>
-#include <asm/segment.h>
/*
* On machines with 4k pages we default to an 8k thread size, though we
@@ -27,7 +26,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
unsigned long flags;
- mm_segment_t addr_limit; /* thread address space */
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */
unsigned long tp_value; /* thread pointer */
@@ -37,7 +35,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index a6318ccd308f..b882e2f4f551 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -13,13 +13,12 @@ static inline void flush_tlb_kernel_page(void *addr)
if (CPU_IS_COLDFIRE) {
mmu_write(MMUOR, MMUOR_CNL);
} else if (CPU_IS_040_OR_060) {
- mm_segment_t old_fs = get_fs();
- set_fs(KERNEL_DS);
+ set_fc(SUPER_DATA);
__asm__ __volatile__(".chip 68040\n\t"
"pflush (%0)\n\t"
".chip 68k"
: : "a" (addr));
- set_fs(old_fs);
+ set_fc(USER_DATA);
} else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
}
@@ -84,12 +83,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- if (vma->vm_mm == current->active_mm) {
- mm_segment_t old_fs = force_uaccess_begin();
-
+ if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
- force_uaccess_end(old_fs);
- }
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 4aff3358fbaf..a9d5c1c870d3 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -267,6 +267,10 @@ struct frame {
} un;
};
+#ifdef CONFIG_M68040
+asmlinkage void berr_040cleanup(struct frame *fp);
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _M68K_TRAPS_H */
diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h
index f98208ccbbcd..ba670523885c 100644
--- a/arch/m68k/include/asm/uaccess.h
+++ b/arch/m68k/include/asm/uaccess.h
@@ -9,13 +9,16 @@
*/
#include <linux/compiler.h>
#include <linux/types.h>
-#include <asm/segment.h>
#include <asm/extable.h>
/* We let the MMU do all checking */
static inline int access_ok(const void __user *addr,
unsigned long size)
{
+ /*
+ * XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check
+ * for TASK_SIZE!
+ */
return 1;
}
@@ -35,12 +38,9 @@ static inline int access_ok(const void __user *addr,
#define MOVES "move"
#endif
-extern int __put_user_bad(void);
-extern int __get_user_bad(void);
-
-#define __put_user_asm(res, x, ptr, bwl, reg, err) \
+#define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
asm volatile ("\n" \
- "1: "MOVES"."#bwl" %2,%1\n" \
+ "1: "inst"."#bwl" %2,%1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
@@ -56,6 +56,31 @@ asm volatile ("\n" \
: "+d" (res), "=m" (*(ptr)) \
: #reg (x), "i" (err))
+#define __put_user_asm8(inst, res, x, ptr) \
+do { \
+ const void *__pu_ptr = (const void __force *)(ptr); \
+ \
+ asm volatile ("\n" \
+ "1: "inst".l %2,(%1)+\n" \
+ "2: "inst".l %R2,(%1)\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: movel %3,%0\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .long 3b,10b\n" \
+ " .previous" \
+ : "+d" (res), "+a" (__pu_ptr) \
+ : "r" (x), "i" (-EFAULT) \
+ : "memory"); \
+} while (0)
+
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
@@ -68,51 +93,29 @@ asm volatile ("\n" \
__chk_user_ptr(ptr); \
switch (sizeof (*(ptr))) { \
case 1: \
- __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
+ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
break; \
case 2: \
- __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \
+ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
break; \
case 4: \
- __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
+ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
break; \
case 8: \
- { \
- const void __user *__pu_ptr = (ptr); \
- asm volatile ("\n" \
- "1: "MOVES".l %2,(%1)+\n" \
- "2: "MOVES".l %R2,(%1)\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .even\n" \
- "10: movel %3,%0\n" \
- " jra 3b\n" \
- " .previous\n" \
- "\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,10b\n" \
- " .long 2b,10b\n" \
- " .long 3b,10b\n" \
- " .previous" \
- : "+d" (__pu_err), "+a" (__pu_ptr) \
- : "r" (__pu_val), "i" (-EFAULT) \
- : "memory"); \
+ __put_user_asm8(MOVES, __pu_err, __pu_val, ptr); \
break; \
- } \
default: \
- __pu_err = __put_user_bad(); \
- break; \
+ BUILD_BUG(); \
} \
__pu_err; \
})
#define put_user(x, ptr) __put_user(x, ptr)
-#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
+#define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({ \
type __gu_val; \
asm volatile ("\n" \
- "1: "MOVES"."#bwl" %2,%1\n" \
+ "1: "inst"."#bwl" %2,%1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
@@ -130,53 +133,57 @@ asm volatile ("\n" \
(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
})
+#define __get_user_asm8(inst, res, x, ptr) \
+do { \
+ const void *__gu_ptr = (const void __force *)(ptr); \
+ union { \
+ u64 l; \
+ __typeof__(*(ptr)) t; \
+ } __gu_val; \
+ \
+ asm volatile ("\n" \
+ "1: "inst".l (%2)+,%1\n" \
+ "2: "inst".l (%2),%R1\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: move.l %3,%0\n" \
+ " sub.l %1,%1\n" \
+ " sub.l %R1,%R1\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .previous" \
+ : "+d" (res), "=&r" (__gu_val.l), \
+ "+a" (__gu_ptr) \
+ : "i" (-EFAULT) \
+ : "memory"); \
+ (x) = __gu_val.t; \
+} while (0)
+
#define __get_user(x, ptr) \
({ \
int __gu_err = 0; \
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
+ __get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
break; \
case 2: \
- __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \
+ __get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
break; \
case 4: \
- __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
+ __get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
break; \
- case 8: { \
- const void __user *__gu_ptr = (ptr); \
- union { \
- u64 l; \
- __typeof__(*(ptr)) t; \
- } __gu_val; \
- asm volatile ("\n" \
- "1: "MOVES".l (%2)+,%1\n" \
- "2: "MOVES".l (%2),%R1\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .even\n" \
- "10: move.l %3,%0\n" \
- " sub.l %1,%1\n" \
- " sub.l %R1,%R1\n" \
- " jra 3b\n" \
- " .previous\n" \
- "\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,10b\n" \
- " .long 2b,10b\n" \
- " .previous" \
- : "+d" (__gu_err), "=&r" (__gu_val.l), \
- "+a" (__gu_ptr) \
- : "i" (-EFAULT) \
- : "memory"); \
- (x) = __gu_val.t; \
+ case 8: \
+ __get_user_asm8(MOVES, __gu_err, x, ptr); \
break; \
- } \
default: \
- __gu_err = __get_user_bad(); \
- break; \
+ BUILD_BUG(); \
} \
__gu_err; \
})
@@ -322,16 +329,19 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
switch (n) {
case 1:
- __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
+ __put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
+ b, d, 1);
break;
case 2:
- __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
+ __put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
+ w, r, 2);
break;
case 3:
__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
break;
case 4:
- __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
+ __put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
+ l, r, 4);
break;
case 5:
__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
@@ -380,8 +390,65 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
-#define user_addr_max() \
- (uaccess_kernel() ? ~0UL : TASK_SIZE)
+#define HAVE_GET_KERNEL_NOFAULT
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ type *__gk_dst = (type *)(dst); \
+ type *__gk_src = (type *)(src); \
+ int __gk_err = 0; \
+ \
+ switch (sizeof(type)) { \
+ case 1: \
+ __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
+ u8, b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
+ u16, w, r, -EFAULT); \
+ break; \
+ case 4: \
+ __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
+ u32, l, r, -EFAULT); \
+ break; \
+ case 8: \
+ __get_user_asm8("move", __gk_err, *__gk_dst, __gk_src); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ if (unlikely(__gk_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ type __pk_src = *(type *)(src); \
+ type *__pk_dst = (type *)(dst); \
+ int __pk_err = 0; \
+ \
+ switch (sizeof(type)) { \
+ case 1: \
+ __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
+ b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
+ w, r, -EFAULT); \
+ break; \
+ case 4: \
+ __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
+ l, r, -EFAULT); \
+ break; \
+ case 8: \
+ __put_user_asm8("move", __pk_err, __pk_src, __pk_dst); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ if (unlikely(__pk_err)) \
+ goto err_label; \
+} while (0)
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index ccea355052ef..906d73230537 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -31,7 +31,7 @@ int main(void)
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
- DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
+ DEFINE(THREAD_FC, offsetof(struct thread_struct, fc));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 9dd76fbb7c6b..9434fca68de5 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -36,7 +36,6 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/traps.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
@@ -78,20 +77,38 @@ ENTRY(__sys_clone3)
ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK
- movel %sp,%sp@- | switch_stack pointer
- pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+ movel %sp,%a1 | switch_stack pointer
+ lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
+ lea %sp@(-84),%sp | leave a gap
+ movel %a1,%sp@-
+ movel %a0,%sp@-
jbsr do_sigreturn
- addql #8,%sp
- RESTORE_SWITCH_STACK
- rts
+ jra 1f | shared with rt_sigreturn()
ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK
- movel %sp,%sp@- | switch_stack pointer
- pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+ movel %sp,%a1 | switch_stack pointer
+ lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
+ lea %sp@(-84),%sp | leave a gap
+ movel %a1,%sp@-
+ movel %a0,%sp@-
+ | stack contents:
+ | [original pt_regs address] [original switch_stack address]
+ | [gap] [switch_stack] [pt_regs] [exception frame]
jbsr do_rt_sigreturn
- addql #8,%sp
+
+1:
+ | stack contents now:
+ | [original pt_regs address] [original switch_stack address]
+ | [unused part of the gap] [moved switch_stack] [moved pt_regs]
+ | [replacement exception frame]
+ | return value of do_{rt_,}sigreturn() points to moved switch_stack.
+
+ movel %d0,%sp | discard the leftover junk
RESTORE_SWITCH_STACK
+ | stack contents now is just [syscall return address] [pt_regs] [frame]
+ | return pt_regs.d0
+ movel %sp@(PT_OFF_D0+4),%d0
rts
ENTRY(buserr)
@@ -182,25 +199,6 @@ do_trace_exit:
addql #4,%sp
jra .Lret_from_exception
-ENTRY(ret_from_signal)
- movel %curptr@(TASK_STACK),%a1
- tstb %a1@(TINFO_FLAGS+2)
- jge 1f
- jbsr syscall_trace
-1: RESTORE_SWITCH_STACK
- addql #4,%sp
-/* on 68040 complete pending writebacks if any */
-#ifdef CONFIG_M68040
- bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
- subql #7,%d0 | bus error frame ?
- jbne 1f
- movel %sp,%sp@-
- jbsr berr_040cleanup
- addql #4,%sp
-1:
-#endif
- jra .Lret_from_exception
-
ENTRY(system_call)
SAVE_ALL_SYS
@@ -338,7 +336,7 @@ resume:
/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
movec %sfc,%d0
- movew %d0,%a0@(TASK_THREAD+THREAD_FS)
+ movew %d0,%a0@(TASK_THREAD+THREAD_FC)
/* save usp */
/* it is better to use a movel here instead of a movew 8*) */
@@ -424,7 +422,7 @@ resume:
movel %a0,%usp
/* restore fs (sfc,%dfc) */
- movew %a1@(TASK_THREAD+THREAD_FS),%a0
+ movew %a1@(TASK_THREAD+THREAD_FC),%a0
movec %a0,%sfc
movec %a0,%dfc
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index db49f9091711..1ab692b952cd 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -92,7 +92,7 @@ void show_regs(struct pt_regs * regs)
void flush_thread(void)
{
- current->thread.fs = __USER_DS;
+ current->thread.fc = USER_DATA;
#ifdef CONFIG_FPU
if (!FPU_IS_EMU) {
unsigned long zero = 0;
@@ -155,7 +155,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
* Must save the current SFC/DFC value, NOT the value when
* the parent was last descheduled - RGH 10-08-96
*/
- p->thread.fs = get_fs().seg;
+ p->thread.fc = USER_DATA;
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 8f215e79e70e..338817d0cb3f 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -447,7 +447,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
fpu_version = sc->sc_fpstate[0];
- if (CPU_IS_020_OR_030 &&
+ if (CPU_IS_020_OR_030 && !regs->stkadj &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
@@ -510,7 +510,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1];
fpu_version = fpstate[0];
- if (CPU_IS_020_OR_030 &&
+ if (CPU_IS_020_OR_030 && !regs->stkadj &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
@@ -641,56 +641,35 @@ static inline void siginfo_build_tests(void)
static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
void __user *fp)
{
- int fsize = frame_extra_sizes(formatvec >> 12);
- if (fsize < 0) {
+ int extra = frame_extra_sizes(formatvec >> 12);
+ char buf[sizeof_field(struct frame, un)];
+
+ if (extra < 0) {
/*
* user process trying to return with weird frame format
*/
pr_debug("user process returning with weird frame format\n");
- return 1;
+ return -1;
}
- if (!fsize) {
- regs->format = formatvec >> 12;
- regs->vector = formatvec & 0xfff;
- } else {
- struct switch_stack *sw = (struct switch_stack *)regs - 1;
- /* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */
- unsigned long buf[sizeof_field(struct frame, un) / 2];
-
- /* that'll make sure that expansion won't crap over data */
- if (copy_from_user(buf + fsize / 4, fp, fsize))
- return 1;
-
- /* point of no return */
- regs->format = formatvec >> 12;
- regs->vector = formatvec & 0xfff;
-#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
- __asm__ __volatile__ (
-#ifdef CONFIG_COLDFIRE
- " movel %0,%/sp\n\t"
- " bra ret_from_signal\n"
-#else
- " movel %0,%/a0\n\t"
- " subl %1,%/a0\n\t" /* make room on stack */
- " movel %/a0,%/sp\n\t" /* set stack pointer */
- /* move switch_stack and pt_regs */
- "1: movel %0@+,%/a0@+\n\t"
- " dbra %2,1b\n\t"
- " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
- " lsrl #2,%1\n\t"
- " subql #1,%1\n\t"
- /* copy to the gap we'd made */
- "2: movel %4@+,%/a0@+\n\t"
- " dbra %1,2b\n\t"
- " bral ret_from_signal\n"
+ if (extra && copy_from_user(buf, fp, extra))
+ return -1;
+ regs->format = formatvec >> 12;
+ regs->vector = formatvec & 0xfff;
+ if (extra) {
+ void *p = (struct switch_stack *)regs - 1;
+ struct frame *new = (void *)regs - extra;
+ int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);
+
+ memmove(p - extra, p, size);
+ memcpy(p - extra + size, buf, extra);
+ current->thread.esp0 = (unsigned long)&new->ptregs;
+#ifdef CONFIG_M68040
+ /* on 68040 complete pending writebacks if any */
+ if (new->ptregs.format == 7) // bus error frame
+ berr_040cleanup(new);
#endif
- : /* no outputs, it doesn't ever return */
- : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
- "n" (frame_offset), "a" (buf + fsize/4)
- : "a0");
-#undef frame_offset
}
- return 0;
+ return extra;
}
static inline int
@@ -698,7 +677,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
{
int formatvec;
struct sigcontext context;
- int err = 0;
siginfo_build_tests();
@@ -707,7 +685,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
/* get previous context */
if (copy_from_user(&context, usc, sizeof(context)))
- goto badframe;
+ return -1;
/* restore passed registers */
regs->d0 = context.sc_d0;
@@ -720,15 +698,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
wrusp(context.sc_usp);
formatvec = context.sc_formatvec;
- err = restore_fpu_state(&context);
+ if (restore_fpu_state(&context))
+ return -1;
- if (err || mangle_kernel_stack(regs, formatvec, fp))
- goto badframe;
-
- return 0;
-
-badframe:
- return 1;
+ return mangle_kernel_stack(regs, formatvec, fp);
}
static inline int
@@ -745,7 +718,7 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
- goto badframe;
+ return -1;
/* restore passed registers */
err |= __get_user(regs->d0, &gregs[0]);
err |= __get_user(regs->d1, &gregs[1]);
@@ -774,22 +747,17 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
err |= restore_altstack(&uc->uc_stack);
if (err)
- goto badframe;
+ return -1;
- if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
- goto badframe;
-
- return 0;
-
-badframe:
- return 1;
+ return mangle_kernel_stack(regs, temp, &uc->uc_extra);
}
-asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
{
unsigned long usp = rdusp();
struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
sigset_t set;
+ int size;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
@@ -801,20 +769,22 @@ asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
set_current_blocked(&set);
- if (restore_sigcontext(regs, &frame->sc, frame + 1))
+ size = restore_sigcontext(regs, &frame->sc, frame + 1);
+ if (size < 0)
goto badframe;
- return regs->d0;
+ return (void *)sw - size;
badframe:
force_sig(SIGSEGV);
- return 0;
+ return sw;
}
-asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
{
unsigned long usp = rdusp();
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
sigset_t set;
+ int size;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
@@ -823,27 +793,34 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
set_current_blocked(&set);
- if (rt_restore_ucontext(regs, sw, &frame->uc))
+ size = rt_restore_ucontext(regs, sw, &frame->uc);
+ if (size < 0)
goto badframe;
- return regs->d0;
+ return (void *)sw - size;
badframe:
force_sig(SIGSEGV);
- return 0;
+ return sw;
+}
+
+static inline struct pt_regs *rte_regs(struct pt_regs *regs)
+{
+ return (void *)regs + regs->stkadj;
}
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
unsigned long mask)
{
+ struct pt_regs *tregs = rte_regs(regs);
sc->sc_mask = mask;
sc->sc_usp = rdusp();
sc->sc_d0 = regs->d0;
sc->sc_d1 = regs->d1;
sc->sc_a0 = regs->a0;
sc->sc_a1 = regs->a1;
- sc->sc_sr = regs->sr;
- sc->sc_pc = regs->pc;
- sc->sc_formatvec = regs->format << 12 | regs->vector;
+ sc->sc_sr = tregs->sr;
+ sc->sc_pc = tregs->pc;
+ sc->sc_formatvec = tregs->format << 12 | tregs->vector;
save_a5_state(sc, regs);
save_fpu_state(sc, regs);
}
@@ -851,6 +828,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
{
struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ struct pt_regs *tregs = rte_regs(regs);
greg_t __user *gregs = uc->uc_mcontext.gregs;
int err = 0;
@@ -871,9 +849,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
err |= __put_user(sw->a5, &gregs[13]);
err |= __put_user(sw->a6, &gregs[14]);
err |= __put_user(rdusp(), &gregs[15]);
- err |= __put_user(regs->pc, &gregs[16]);
- err |= __put_user(regs->sr, &gregs[17]);
- err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
+ err |= __put_user(tregs->pc, &gregs[16]);
+ err |= __put_user(tregs->sr, &gregs[17]);
+ err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
err |= rt_save_fpu_state(uc, regs);
return err;
}
@@ -890,13 +868,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct sigframe __user *frame;
- int fsize = frame_extra_sizes(regs->format);
+ struct pt_regs *tregs = rte_regs(regs);
+ int fsize = frame_extra_sizes(tregs->format);
struct sigcontext context;
int err = 0, sig = ksig->sig;
if (fsize < 0) {
pr_debug("setup_frame: Unknown frame format %#x\n",
- regs->format);
+ tregs->format);
return -EFAULT;
}
@@ -907,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
err |= __put_user(sig, &frame->sig);
- err |= __put_user(regs->vector, &frame->code);
+ err |= __put_user(tregs->vector, &frame->code);
err |= __put_user(&frame->sc, &frame->psc);
if (_NSIG_WORDS > 1)
@@ -934,33 +913,27 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
push_cache ((unsigned long) &frame->retcode);
/*
- * Set up registers for signal handler. All the state we are about
- * to destroy is successfully copied to sigframe.
- */
- wrusp ((unsigned long) frame);
- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
- adjustformat(regs);
-
- /*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
- if (fsize)
+ if (fsize) {
regs->stkadj = fsize;
-
- /* Prepare to skip over the extra stuff in the exception frame. */
- if (regs->stkadj) {
- struct pt_regs *tregs =
- (struct pt_regs *)((ulong)regs + regs->stkadj);
+ tregs = rte_regs(regs);
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
- /* This must be copied with decreasing addresses to
- handle overlaps. */
tregs->vector = 0;
tregs->format = 0;
- tregs->pc = regs->pc;
tregs->sr = regs->sr;
}
+
+ /*
+ * Set up registers for signal handler. All the state we are about
+ * to destroy is successfully copied to sigframe.
+ */
+ wrusp ((unsigned long) frame);
+ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+ adjustformat(regs);
+
return 0;
}
@@ -968,7 +941,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
- int fsize = frame_extra_sizes(regs->format);
+ struct pt_regs *tregs = rte_regs(regs);
+ int fsize = frame_extra_sizes(tregs->format);
int err = 0, sig = ksig->sig;
if (fsize < 0) {
@@ -1019,33 +993,26 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
push_cache ((unsigned long) &frame->retcode);
/*
- * Set up registers for signal handler. All the state we are about
- * to destroy is successfully copied to sigframe.
- */
- wrusp ((unsigned long) frame);
- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
- adjustformat(regs);
-
- /*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
- if (fsize)
+ if (fsize) {
regs->stkadj = fsize;
-
- /* Prepare to skip over the extra stuff in the exception frame. */
- if (regs->stkadj) {
- struct pt_regs *tregs =
- (struct pt_regs *)((ulong)regs + regs->stkadj);
+ tregs = rte_regs(regs);
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
- /* This must be copied with decreasing addresses to
- handle overlaps. */
tregs->vector = 0;
tregs->format = 0;
- tregs->pc = regs->pc;
tregs->sr = regs->sr;
}
+
+ /*
+ * Set up registers for signal handler. All the state we are about
+ * to destroy is successfully copied to sigframe.
+ */
+ wrusp ((unsigned long) frame);
+ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+ adjustformat(regs);
return 0;
}
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 5b19fcdcd69e..9718ce94cc84 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -181,9 +181,8 @@ static inline void access_error060 (struct frame *fp)
static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
{
unsigned long mmusr;
- mm_segment_t old_fs = get_fs();
- set_fs(MAKE_MM_SEG(wbs));
+ set_fc(wbs);
if (iswrite)
asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
@@ -192,7 +191,7 @@ static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
- set_fs(old_fs);
+ set_fc(USER_DATA);
return mmusr;
}
@@ -201,10 +200,8 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
unsigned long wbd)
{
int res = 0;
- mm_segment_t old_fs = get_fs();
- /* set_fs can not be moved, otherwise put_user() may oops */
- set_fs(MAKE_MM_SEG(wbs));
+ set_fc(wbs);
switch (wbs & WBSIZ_040) {
case BA_SIZE_BYTE:
@@ -218,9 +215,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
break;
}
- /* set_fs can not be moved, otherwise put_user() may oops */
- set_fs(old_fs);
-
+ set_fc(USER_DATA);
pr_debug("do_040writeback1, res=%d\n", res);
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 90f4e9ca1276..4fab34791758 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -18,7 +18,6 @@
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/segment.h>
#include <asm/setup.h>
#include <asm/macintosh.h>
#include <asm/mac_via.h>
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
index b486c0889eec..dde978e66f14 100644
--- a/arch/m68k/mm/cache.c
+++ b/arch/m68k/mm/cache.c
@@ -49,24 +49,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
if (mmusr & MMU_R_040)
return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
} else {
- unsigned short mmusr;
- unsigned long *descaddr;
-
- asm volatile ("ptestr %3,%2@,#7,%0\n\t"
- "pmove %%psr,%1"
- : "=a&" (descaddr), "=m" (mmusr)
- : "a" (vaddr), "d" (get_fs().seg));
- if (mmusr & (MMU_I|MMU_B|MMU_L))
- return 0;
- descaddr = phys_to_virt((unsigned long)descaddr);
- switch (mmusr & MMU_NUM) {
- case 1:
- return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
- case 2:
- return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
- case 3:
- return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
- }
+ WARN_ON_ONCE(!CPU_IS_040_OR_060);
}
return 0;
}
@@ -107,11 +90,9 @@ void flush_icache_user_range(unsigned long address, unsigned long endaddr)
void flush_icache_range(unsigned long address, unsigned long endaddr)
{
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
+ set_fc(SUPER_DATA);
flush_icache_user_range(address, endaddr);
- set_fs(old_fs);
+ set_fc(USER_DATA);
}
EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 5d749e188246..1b47bec15832 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -72,12 +72,6 @@ void __init paging_init(void)
if (!empty_zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
-
- /*
- * Set up SFC/DFC registers (user data space).
- */
- set_fs (USER_DS);
-
max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
free_area_init(max_zone_pfn);
}
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 1269d513b221..20ddf71b43d0 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -17,7 +17,6 @@
#include <linux/vmalloc.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/tlbflush.h>
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index fe75aecfb238..c2c03b0a1567 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -15,7 +15,6 @@
#include <linux/gfp.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/traps.h>
#include <asm/machdep.h>
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 3a653f0a4188..9f3f77785aa7 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -467,7 +467,7 @@ void __init paging_init(void)
/*
* Set up SFC/DFC registers
*/
- set_fs(KERNEL_DS);
+ set_fc(USER_DATA);
#ifdef DEBUG
printk ("before free_area_init\n");
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index f7dd47232b6c..203f428a0344 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -31,7 +31,6 @@
#include <asm/intersil.h>
#include <asm/irq.h>
#include <asm/sections.h>
-#include <asm/segment.h>
#include <asm/sun3ints.h>
char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
@@ -89,7 +88,7 @@ void __init sun3_init(void)
sun3_reserved_pmeg[249] = 1;
sun3_reserved_pmeg[252] = 1;
sun3_reserved_pmeg[253] = 1;
- set_fs(KERNEL_DS);
+ set_fc(USER_DATA);
}
/* Without this, Bad Things happen when something calls arch_reset. */
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 7aa879b7c7ff..7ec20817c0c9 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -23,7 +23,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/sun3mmu.h>
-#include <asm/segment.h>
#include <asm/oplib.h>
#include <asm/mmu_context.h>
#include <asm/dvma.h>
@@ -191,14 +190,13 @@ void __init mmu_emu_init(unsigned long bootmem_end)
for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
sun3_put_segmap(seg, SUN3_INVALID_PMEG);
- set_fs(MAKE_MM_SEG(3));
+ set_fc(3);
for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
i = sun3_get_segmap(seg);
for(j = 1; j < CONTEXTS_NUM; j++)
(*(romvec->pv_setctxt))(j, (void *)seg, i);
}
- set_fs(KERNEL_DS);
-
+ set_fc(USER_DATA);
}
/* erase the mappings for a dead context. Uses the pg_dir for hints
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 41ae422119d3..36cc280a4505 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
-#include <asm/segment.h>
#include <asm/intersil.h>
#include <asm/oplib.h>
#include <asm/sun3ints.h>
diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c
index 74d2fe57524b..64c23bfaa90c 100644
--- a/arch/m68k/sun3x/prom.c
+++ b/arch/m68k/sun3x/prom.c
@@ -14,7 +14,6 @@
#include <asm/traps.h>
#include <asm/sun3xprom.h>
#include <asm/idprom.h>
-#include <asm/segment.h>
#include <asm/sun3ints.h>
#include <asm/openprom.h>
#include <asm/machines.h>
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 771ca53af06d..6b8f591c5054 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -3316,8 +3316,6 @@ source "drivers/cpuidle/Kconfig"
endmenu
-source "drivers/firmware/Kconfig"
-
source "arch/mips/kvm/Kconfig"
source "arch/mips/vdso/Kconfig"
diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h
index 35fb8ee6dd33..fd43d876892e 100644
--- a/arch/mips/include/asm/mips-cps.h
+++ b/arch/mips/include/asm/mips-cps.h
@@ -10,8 +10,6 @@
#include <linux/io.h>
#include <linux/types.h>
-#include <asm/mips-boards/launch.h>
-
extern unsigned long __cps_access_bad_size(void)
__compiletime_error("Bad size for CPS accessor");
@@ -167,30 +165,11 @@ static inline uint64_t mips_cps_cluster_config(unsigned int cluster)
*/
static inline unsigned int mips_cps_numcores(unsigned int cluster)
{
- unsigned int ncores;
-
if (!mips_cm_present())
return 0;
/* Add one before masking to handle 0xff indicating no cores */
- ncores = (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
-
- if (IS_ENABLED(CONFIG_SOC_MT7621)) {
- struct cpulaunch *launch;
-
- /*
- * Ralink MT7621S SoC is single core, but the GCR_CONFIG method
- * always reports 2 cores. Check the second core's LAUNCH_FREADY
- * flag to detect if the second core is missing. This method
- * only works before the core has been started.
- */
- launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);
- launch += 2; /* MT7621 has 2 VPEs per core */
- if (!(launch->flags & LAUNCH_FREADY))
- ncores = 1;
- }
-
- return ncores;
+ return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
}
/**
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 1eaf6a1ca561..24e0efb360f6 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -142,6 +142,8 @@
#define SO_BUF_LOCK 72
+#define SO_RESERVE_MEM 73
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index f1e985109da0..c9b2a75563e1 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -906,10 +906,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs);
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(NULL, regs);
- }
user_enter();
}
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 0af88622c619..cb6d22439f71 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
func##_positive)
+static bool is_bad_offset(int b_off)
+{
+ return b_off > 0x1ffff || b_off < -0x20000;
+}
+
static int build_body(struct jit_ctx *ctx)
{
const struct bpf_prog *prog = ctx->skf;
@@ -728,7 +733,10 @@ load_common:
/* Load return register on DS for failures */
emit_reg_move(r_ret, r_zero, ctx);
/* Return with error */
- emit_b(b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
emit_nop(ctx);
break;
case BPF_LD | BPF_W | BPF_IND:
@@ -775,8 +783,10 @@ load_ind:
emit_jalr(MIPS_R_RA, r_s0, ctx);
emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
/* Check the error value */
- emit_bcond(MIPS_COND_NE, r_ret, 0,
- b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx);
/* We are good */
/* X <- P[1:K] & 0xf */
@@ -855,8 +865,10 @@ load_ind:
/* A /= X */
ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */
- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
- b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_div(r_A, r_X, ctx);
break;
@@ -864,8 +876,10 @@ load_ind:
/* A %= X */
ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */
- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
- b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_mod(r_A, r_X, ctx);
break;
@@ -926,7 +940,10 @@ load_ind:
break;
case BPF_JMP | BPF_JA:
/* pc += K */
- emit_b(b_imm(i + k + 1, ctx), ctx);
+ b_off = b_imm(i + k + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
emit_nop(ctx);
break;
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -1056,12 +1073,16 @@ jmp_cmp:
break;
case BPF_RET | BPF_A:
ctx->flags |= SEEN_A;
- if (i != prog->len - 1)
+ if (i != prog->len - 1) {
/*
* If this is not the last instruction
* then jump to the epilogue
*/
- emit_b(b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ }
emit_reg_move(r_ret, r_A, ctx); /* delay slot */
break;
case BPF_RET | BPF_K:
@@ -1075,7 +1096,10 @@ jmp_cmp:
* If this is not the last instruction
* then jump to the epilogue
*/
- emit_b(b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
emit_nop(ctx);
}
break;
@@ -1133,8 +1157,10 @@ jmp_cmp:
/* Load *dev pointer */
emit_load_ptr(r_s0, r_skb, off, ctx);
/* error (0) in the delay slot */
- emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
- b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx);
if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
@@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* Generate the actual JIT code */
build_prologue(&ctx);
- build_body(&ctx);
+ if (build_body(&ctx)) {
+ module_memfree(ctx.target);
+ goto out;
+ }
build_epilogue(&ctx);
/* Update the icache */
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
index a8bc06e96ef5..ca1beb87f987 100644
--- a/arch/nios2/Kconfig.debug
+++ b/arch/nios2/Kconfig.debug
@@ -3,9 +3,10 @@
config EARLY_PRINTK
bool "Activate early kernel debugging"
default y
+ depends on TTY
select SERIAL_CORE_CONSOLE
help
- Enable early printk on console
+ Enable early printk on console.
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized.
You should normally say N here, unless you want to debug such a crash.
diff --git a/arch/nios2/include/asm/irqflags.h b/arch/nios2/include/asm/irqflags.h
index b3ec3e510706..25acf27862f9 100644
--- a/arch/nios2/include/asm/irqflags.h
+++ b/arch/nios2/include/asm/irqflags.h
@@ -9,7 +9,7 @@
static inline unsigned long arch_local_save_flags(void)
{
- return RDCTL(CTL_STATUS);
+ return RDCTL(CTL_FSTATUS);
}
/*
@@ -18,7 +18,7 @@ static inline unsigned long arch_local_save_flags(void)
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
- WRCTL(CTL_STATUS, flags);
+ WRCTL(CTL_FSTATUS, flags);
}
static inline void arch_local_irq_disable(void)
diff --git a/arch/nios2/include/asm/registers.h b/arch/nios2/include/asm/registers.h
index 183c720e454d..95b67dd16f81 100644
--- a/arch/nios2/include/asm/registers.h
+++ b/arch/nios2/include/asm/registers.h
@@ -11,7 +11,7 @@
#endif
/* control register numbers */
-#define CTL_STATUS 0
+#define CTL_FSTATUS 0
#define CTL_ESTATUS 1
#define CTL_BSTATUS 2
#define CTL_IENABLE 3
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index cf8d687a2644..40bc8fb75e0b 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
void __init setup_arch(char **cmdline_p)
{
- int dram_start;
-
console_verbose();
memory_start = memblock_start_of_DRAM();
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 4742b6f169b7..27a8b49af11f 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -384,6 +384,4 @@ config KEXEC_FILE
endmenu
-source "drivers/firmware/Kconfig"
-
source "drivers/parisc/Kconfig"
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 8baaad52d799..845ddc63c882 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -123,6 +123,8 @@
#define SO_BUF_LOCK 0x4046
+#define SO_RESERVE_MEM 0x4047
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index f03adb1999e7..367f6397bda7 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
}
}
+#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
if (!INDIRECT_ADDR(addr)) {
iounmap(addr);
}
}
+EXPORT_SYMBOL(pci_iounmap);
+#endif
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
@@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
-EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
index 5ba6fbfca274..f82f85c65964 100644
--- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
@@ -154,7 +154,7 @@
fm1mac3: ethernet@e4000 {
phy-handle = <&sgmii_aqr_phy3>;
- phy-connection-type = "sgmii-2500";
+ phy-connection-type = "2500base-x";
sleep = <&rcpm 0x20000000>;
};
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index d4b145b279f6..9f38040f0641 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
if (kuap_is_disabled())
return;
+ if (unlikely(kuap != KUAP_NONE)) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, false);
+ }
+
+ if (likely(regs->kuap == KUAP_NONE))
+ return;
+
current->thread.kuap = regs->kuap;
kuap_unlock(regs->kuap, false);
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index a95f63788c6b..4ba834599c4d 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -23,6 +23,7 @@
#define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset);
+bool is_offset_in_cond_branch_range(long offset);
int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 6b800d3e2681..a1d238255f07 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- if (is_implicit_soft_masked(regs)) {
- // Adjust regs->softe soft implicit soft-mask, so
- // arch_irq_disabled_regs(regs) behaves as expected.
+ if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
+ /*
+ * Adjust regs->softe to be soft-masked if it had not been
+ * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
+ * not yet set disabled), or if it was in an implicit soft
+ * masked state. This makes arch_irq_disabled_regs(regs)
+ * behave as expected.
+ */
regs->softe = IRQS_ALL_DISABLED;
}
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
/* Don't do any per-CPU operations until interrupt state is fixed */
@@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
/* kernel/traps.c */
DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
#ifdef CONFIG_PPC_BOOK3S_64
-DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
-#else
-DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
+DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
#endif
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
DECLARE_INTERRUPT_HANDLER(SMIException);
DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
DECLARE_INTERRUPT_HANDLER(unknown_exception);
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index 792eefaf230b..27574f218b37 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
return !!(powerpc_security_features & feature);
}
+#ifdef CONFIG_PPC_BOOK3S_64
+enum stf_barrier_type stf_barrier_type_get(void);
+#else
+static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
+#endif
// Features indicating support for Spectre/Meltdown mitigations
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 111249fd619d..038ce8d9061d 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
struct iommu_table *tbl = get_iommu_table_base(dev);
u64 mask;
+ if (dev_is_pci(dev)) {
+ u64 bypass_mask = dma_direct_get_required_mask(dev);
+
+ if (dma_iommu_dma_supported(dev, bypass_mask)) {
+ dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
+ return bypass_mask;
+ }
+ }
+
if (!tbl)
return 0;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 37859e62a8dc..eaf1f72131a1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
li r10,MSR_RI
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
- bl machine_check_exception
+ bl machine_check_exception_async
b interrupt_return_srr
@@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
subi r12,r12,1
sth r12,PACA_IN_MCE(r13)
- /* Invoke machine_check_exception to print MCE event and panic. */
+ /*
+ * Invoke machine_check_exception to print MCE event and panic.
+ * This is the NMI version of the handler because we are called from
+ * the early handler which is a true NMI.
+ */
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
@@ -1665,27 +1669,30 @@ EXC_COMMON_BEGIN(program_check_common)
*/
andi. r10,r12,MSR_PR
- bne 2f /* If userspace, go normal path */
+ bne .Lnormal_stack /* If userspace, go normal path */
andis. r10,r12,(SRR1_PROGTM)@h
- bne 1f /* If TM, emergency */
+ bne .Lemergency_stack /* If TM, emergency */
cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
- blt 2f /* normal path if not */
+ blt .Lnormal_stack /* normal path if not */
/* Use the emergency stack */
-1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
+.Lemergency_stack:
+ andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
/* 3 in EXCEPTION_PROLOG_COMMON */
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
__ISTACK(program_check)=0
__GEN_COMMON_BODY program_check
- b 3f
-2:
+ b .Ldo_program_check
+
+.Lnormal_stack:
__ISTACK(program_check)=1
__GEN_COMMON_BODY program_check
-3:
+
+.Ldo_program_check:
addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index abb719b21cae..3d97fb833834 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -126,14 +126,16 @@ _GLOBAL(idle_return_gpr_loss)
/*
* This is the sequence required to execute idle instructions, as
* specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
- *
- * The 0(r1) slot is used to save r2 in isa206, so use that here.
+ * We have to store a GPR somewhere, ptesync, then reload it, and create
+ * a false dependency on the result of the load. It doesn't matter which
+ * GPR we store, or where we store it. We have already stored r2 to the
+ * stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
*/
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
- std r2,0(r1); \
+ std r2,-8(r1); \
ptesync; \
- ld r2,0(r1); \
+ ld r2,-8(r1); \
236: cmpd cr0,r2,r2; \
bne 236b; \
IDLE_INST; \
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index a73f3f70a657..de10a2697258 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -18,6 +18,7 @@
#include <asm/switch_to.h>
#include <asm/syscall.h>
#include <asm/time.h>
+#include <asm/tm.h>
#include <asm/unistd.h>
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
@@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
*/
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
+ /*
+ * If system call is called with TM active, set _TIF_RESTOREALL to
+ * prevent RFSCV being used to return to userspace, because POWER9
+ * TM implementation has problems with this instruction returning to
+ * transactional state. Final register values are not relevant because
+ * the transaction will be aborted upon return anyway. Or in the case
+ * of unsupported_scv SIGILL fault, the return state does not much
+ * matter because it's an edge case.
+ */
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+ current_thread_info()->flags |= _TIF_RESTOREALL;
+
+ /*
+ * If the system call was made with a transaction active, doom it and
+ * return without performing the system call. Unless it was an
+ * unsupported scv vector, in which case it's treated like an illegal
+ * instruction.
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+ !trap_is_unsupported_scv(regs)) {
+ /* Enable TM in the kernel, and disable EE (for scv) */
+ hard_irq_disable();
+ mtmsr(mfmsr() | MSR_TM);
+
+ /* tabort, this dooms the transaction, nothing else */
+ asm volatile(".long 0x7c00071d | ((%0) << 16)"
+ :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+ /*
+ * Userspace will never see the return value. Execution will
+ * resume after the tbegin. of the aborted transaction with the
+ * checkpointed register state. A context switch could occur
+ * or signal delivered to the process before resuming the
+ * doomed transaction context, but that should all be handled
+ * as expected.
+ */
+ return -ENOSYS;
+ }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
local_irq_enable();
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index d4212d2ff0b5..ec950b08a8dc 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -12,7 +12,6 @@
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/ptrace.h>
-#include <asm/tm.h>
.section ".toc","aw"
SYS_CALL_TABLE:
@@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
.globl system_call_vectored_\name
system_call_vectored_\name:
_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
SCV_INTERRUPT_TO_KERNEL
mr r10,r1
ld r1,PACAKSAVE(r13)
@@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
.globl system_call_common
system_call_common:
_ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
mr r10,r1
ld r1,PACAKSAVE(r13)
std r10,0(r1)
@@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-tabort_syscall:
-_ASM_NOKPROBE_SYMBOL(tabort_syscall)
- /* Firstly we need to enable TM in the kernel */
- mfmsr r10
- li r9, 1
- rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r10, 0
-
- /* tabort, this dooms the transaction, nothing else */
- li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
- TABORT(R9)
-
- /*
- * Return directly to userspace. We have corrupted user register state,
- * but userspace will never see that register state. Execution will
- * resume after the tbegin of the aborted transaction with the
- * checkpointed register state.
- */
- li r9, MSR_RI
- andc r10, r10, r9
- mtmsrd r10, 1
- mtspr SPRN_SRR0, r11
- mtspr SPRN_SRR1, r12
- RFI_TO_USER
- b . /* prevent speculative execution */
-#endif
-
/*
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
* touched, no exit work created, then this can be used.
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 551b653228c4..c4f1d6b7d992 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
return;
}
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(in_nmi() || in_hardirq());
+
/*
* After the stb, interrupts are unmasked and there are no interrupts
* pending replay. The restart sequence makes this atomic with
@@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
if (mask)
return;
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(in_nmi() || in_hardirq());
+
/*
* From this point onward, we can take interrupts, preempt,
* etc... unless we got hard-disabled. We check if an event
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 47a683cd00d2..fd829f7f25a4 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -249,6 +249,7 @@ void machine_check_queue_event(void)
{
int index;
struct machine_check_event evt;
+ unsigned long msr;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
@@ -262,8 +263,20 @@ void machine_check_queue_event(void)
memcpy(&local_paca->mce_info->mce_event_queue[index],
&evt, sizeof(evt));
- /* Queue irq work to process this event later. */
- irq_work_queue(&mce_event_process_work);
+ /*
+ * Queue irq work to process this event later. Before
+ * queuing the work enable translation for non radix LPAR,
+ * as irq_work_queue may try to access memory outside RMO
+ * region.
+ */
+ if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
+ msr = mfmsr();
+ mtmsr(msr | MSR_IR | MSR_DR);
+ irq_work_queue(&mce_event_process_work);
+ mtmsr(msr);
+ } else {
+ irq_work_queue(&mce_event_process_work);
+ }
}
void mce_common_process_ue(struct pt_regs *regs,
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 1a998490fe60..15fb5ea1b9ea 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)
early_param("no_stf_barrier", handle_no_stf_barrier);
+enum stf_barrier_type stf_barrier_type_get(void)
+{
+ return stf_enabled_flush_types;
+}
+
/* This is the generic flag used by other architectures */
static int __init handle_ssbd(char *p)
{
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index e600764a926c..b93b87df499d 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -293,10 +293,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
do_signal(current);
}
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(NULL, regs);
- }
}
static unsigned long get_tm_stackpointer(struct task_struct *tsk)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9cc7d3dbf439..605bab448f84 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1730,8 +1730,6 @@ void __cpu_die(unsigned int cpu)
void arch_cpu_idle_dead(void)
{
- sched_preempt_enable_no_resched();
-
/*
* Disable on the down path. This will be re-enabled by
* start_secondary() via start_secondary_resume() below
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index aac8c0412ff9..11741703d26e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
return false;
}
- show_signal_msg(signr, regs, code, addr);
+ /*
+ * Must not enable interrupts even for user-mode exception, because
+ * this can be called from machine check, which may be a NMI or IRQ
+ * which don't like interrupts being enabled. Could check for
+ * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
+ * reason why _exception() should enable irqs for an exception handler,
+ * the handlers themselves do that directly.
+ */
- if (arch_irqs_disabled())
- interrupt_cond_local_irq_enable(regs);
+ show_signal_msg(signr, regs, code, addr);
current->thread.trap_nr = code;
@@ -790,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
* do_exit() checks for in_interrupt() and panics in that case, so
* exit the irq/nmi before calling die.
*/
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
- irq_exit();
- else
+ if (in_nmi())
nmi_exit();
+ else
+ irq_exit();
die(str, regs, err);
}
/*
- * BOOK3S_64 does not call this handler as a non-maskable interrupt
+ * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
* (it uses its own early real-mode handler to handle the MCE proper
* and then raises irq_work to call this handler when interrupts are
- * enabled).
+ * enabled). The only time when this is not true is if the early handler
+ * is unrecoverable, then it does call this directly to try to get a
+ * message out.
*/
-#ifdef CONFIG_PPC_BOOK3S_64
-DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
-#else
-DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
-#endif
+static void __machine_check_exception(struct pt_regs *regs)
{
int recover = 0;
@@ -841,12 +845,19 @@ bail:
/* Must die if the interrupt is not recoverable */
if (regs_is_unrecoverable(regs))
die_mce("Unrecoverable Machine check", regs, SIGBUS);
+}
#ifdef CONFIG_PPC_BOOK3S_64
- return;
-#else
- return 0;
+DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
+{
+ __machine_check_exception(regs);
+}
#endif
+DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
+{
+ __machine_check_exception(regs);
+
+ return 0;
}
DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 75079397c2a5..eb776d0c5d8e 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -255,13 +255,16 @@ kvm_novcpu_exit:
* r3 contains the SRR1 wakeup value, SRR1 is trashed.
*/
_GLOBAL(idle_kvm_start_guest)
- ld r4,PACAEMERGSP(r13)
mfcr r5
mflr r0
- std r1,0(r4)
- std r5,8(r4)
- std r0,16(r4)
- subi r1,r4,STACK_FRAME_OVERHEAD
+ std r5, 8(r1) // Save CR in caller's frame
+ std r0, 16(r1) // Save LR in caller's frame
+ // Create frame on emergency stack
+ ld r4, PACAEMERGSP(r13)
+ stdu r1, -SWITCH_FRAME_SIZE(r4)
+ // Switch to new frame on emergency stack
+ mr r1, r4
+ std r3, 32(r1) // Save SRR1 wakeup value
SAVE_NVGPRS(r1)
/*
@@ -313,6 +316,10 @@ kvm_unsplit_wakeup:
kvm_secondary_got_guest:
+ // About to go to guest, clear saved SRR1
+ li r0, 0
+ std r0, 32(r1)
+
/* Set HSTATE_DSCR(r13) to something sensible */
ld r6, PACA_DSCR_DEFAULT(r13)
std r6, HSTATE_DSCR(r13)
@@ -392,13 +399,12 @@ kvm_no_guest:
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
- /* set up r3 for return */
- mfspr r3,SPRN_SRR1
+ // Return SRR1 wakeup value, or 0 if we went into the guest
+ ld r3, 32(r1)
REST_NVGPRS(r1)
- addi r1, r1, STACK_FRAME_OVERHEAD
- ld r0, 16(r1)
- ld r5, 8(r1)
- ld r1, 0(r1)
+ ld r1, 0(r1) // Switch back to caller stack
+ ld r0, 16(r1) // Reload LR
+ ld r5, 8(r1) // Reload CR
mtlr r0
mtcr r5
blr
@@ -2536,7 +2542,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
/* The following code handles the fake_suspend = 1 case */
mflr r0
std r0, PPC_LR_STKOFF(r1)
- stdu r1, -PPC_MIN_STKFRM(r1)
+ stdu r1, -TM_FRAME_SIZE(r1)
/* Turn on TM. */
mfmsr r8
@@ -2551,10 +2557,42 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
+ /*
+ * It's possible that treclaim. may modify registers, if we have lost
+ * track of fake-suspend state in the guest due to it using rfscv.
+ * Save and restore registers in case this occurs.
+ */
+ mfspr r3, SPRN_DSCR
+ mfspr r4, SPRN_XER
+ mfspr r5, SPRN_AMR
+ /* SPRN_TAR would need to be saved here if the kernel ever used it */
+ mfcr r12
+ SAVE_NVGPRS(r1)
+ SAVE_GPR(2, r1)
+ SAVE_GPR(3, r1)
+ SAVE_GPR(4, r1)
+ SAVE_GPR(5, r1)
+ stw r12, 8(r1)
+ std r1, HSTATE_HOST_R1(r13)
+
/* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3)
+ GET_PACA(r13)
+ ld r1, HSTATE_HOST_R1(r13)
+ REST_GPR(2, r1)
+ REST_GPR(3, r1)
+ REST_GPR(4, r1)
+ REST_GPR(5, r1)
+ lwz r12, 8(r1)
+ REST_NVGPRS(r1)
+ mtspr SPRN_DSCR, r3
+ mtspr SPRN_XER, r4
+ mtspr SPRN_AMR, r5
+ mtcr r12
+ HMT_MEDIUM
+
/*
* We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since
@@ -2582,7 +2620,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
- addi r1, r1, PPC_MIN_STKFRM
+ addi r1, r1, TM_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index f9a3019e37b4..c5ed98823835 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
}
+bool is_offset_in_cond_branch_range(long offset)
+{
+ return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
+}
+
/*
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
@@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
offset = offset - (unsigned long)addr;
/* Check we can represent the target in the instruction format */
- if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
+ if (!is_offset_in_cond_branch_range(offset))
return 1;
/* Mask out the flags and target, so they don't step on each other. */
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 99fad093f43e..7e9b978b768e 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -24,16 +24,30 @@
#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
/* Long jump; (unconditional 'branch') */
-#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
- (((dest) - (ctx->idx * 4)) & 0x03fffffc))
+#define PPC_JMP(dest) \
+ do { \
+ long offset = (long)(dest) - (ctx->idx * 4); \
+ if (!is_offset_in_branch_range(offset)) { \
+ pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
+ return -ERANGE; \
+ } \
+ EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \
+ } while (0)
+
/* blr; (unconditional 'branch' with link) to absolute address */
#define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \
(((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
/* "cond" here covers BO:BI fields. */
-#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
- (((cond) & 0x3ff) << 16) | \
- (((dest) - (ctx->idx * 4)) & \
- 0xfffc))
+#define PPC_BCC_SHORT(cond, dest) \
+ do { \
+ long offset = (long)(dest) - (ctx->idx * 4); \
+ if (!is_offset_in_cond_branch_range(offset)) { \
+ pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
+ return -ERANGE; \
+ } \
+ EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
+ } while (0)
+
/* Sign-extended 32-bit immediate load */
#define PPC_LI32(d, i) do { \
if ((int)(uintptr_t)(i) >= -32768 && \
@@ -78,11 +92,6 @@
#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
#endif
-static inline bool is_nearbranch(int offset)
-{
- return (offset < 32768) && (offset >= -32768);
-}
-
/*
* The fly in the ointment of code size changing from pass to pass is
* avoided by padding the short branch case with a NOP. If code size differs
@@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
* state.
*/
#define PPC_BCC(cond, dest) do { \
- if (is_nearbranch((dest) - (ctx->idx * 4))) { \
+ if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
PPC_BCC_SHORT(cond, dest); \
EMIT(PPC_RAW_NOP()); \
} else { \
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 7b713edfa7e2..b63b35e45e55 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -16,18 +16,18 @@
* with our redzone usage.
*
* [ prev sp ] <-------------
- * [ nv gpr save area ] 6*8 |
+ * [ nv gpr save area ] 5*8 |
* [ tail_call_cnt ] 8 |
- * [ local_tmp_var ] 8 |
+ * [ local_tmp_var ] 16 |
* fp (r31) --> [ ebpf stack space ] upto 512 |
* [ frame header ] 32/112 |
* sp (r1) ---> [ stack pointer ] --------------
*/
/* for gpr non volatile registers BPG_REG_6 to 10 */
-#define BPF_PPC_STACK_SAVE (6*8)
+#define BPF_PPC_STACK_SAVE (5*8)
/* for bpf JIT code internal usage */
-#define BPF_PPC_STACK_LOCALS 16
+#define BPF_PPC_STACK_LOCALS 24
/* stack frame excluding BPF stack, ensure this is quadword aligned */
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 53aefee3fe70..fcbf7a917c56 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -210,7 +210,11 @@ skip_init_ctx:
/* Now build the prologue, body code & epilogue for real. */
cgctx.idx = 0;
bpf_jit_build_prologue(code_base, &cgctx);
- bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
+ if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
+ bpf_jit_binary_free(bpf_hdr);
+ fp = org_fp;
+ goto out_addrs;
+ }
bpf_jit_build_epilogue(code_base, &cgctx);
if (bpf_jit_enable > 1)
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index beb12cbc8c29..0da31d41d413 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
}
}
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
{
/*
* By now, the eBPF program has already setup parameters in r3-r6
@@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
bpf_jit_emit_common_epilogue(image, ctx);
EMIT(PPC_RAW_BCTR());
+
/* out: */
+ return 0;
}
/* Assemble the body code between the prologue & epilogue */
@@ -355,7 +357,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
}
- if (imm >= 0)
+ if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
else
EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
@@ -623,7 +625,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_LI(dst_reg_h, 0));
break;
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
- EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
bpf_set_seen_register(ctx, tmp_reg);
@@ -1073,7 +1075,7 @@ cond_branch:
break;
case BPF_JMP32 | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
- if (imm >= -32768 && imm < 32768) {
+ if (imm >= 0 && imm < 32768) {
/* PPC_ANDI is _only/always_ dot-form */
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
@@ -1090,7 +1092,9 @@ cond_branch:
*/
case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
- bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ if (ret < 0)
+ return ret;
break;
default:
@@ -1103,7 +1107,7 @@ cond_branch:
return -EOPNOTSUPP;
}
if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
- !insn_is_zext(&insn[i + 1]))
+ !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
EMIT(PPC_RAW_LI(dst_reg_h, 0));
}
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index b87a63dba9c8..8b5157ccfeba 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -15,6 +15,7 @@
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
#include <linux/bpf.h>
+#include <asm/security_features.h>
#include "bpf_jit64.h"
@@ -35,9 +36,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* [ prev sp ] <-------------
* [ ... ] |
* sp (r1) ---> [ stack pointer ] --------------
- * [ nv gpr save area ] 6*8
+ * [ nv gpr save area ] 5*8
* [ tail_call_cnt ] 8
- * [ local_tmp_var ] 8
+ * [ local_tmp_var ] 16
* [ unused red zone ] 208 bytes protected
*/
static int bpf_jit_stack_local(struct codegen_context *ctx)
@@ -45,12 +46,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
if (bpf_has_stack_frame(ctx))
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
else
- return -(BPF_PPC_STACK_SAVE + 16);
+ return -(BPF_PPC_STACK_SAVE + 24);
}
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
{
- return bpf_jit_stack_local(ctx) + 8;
+ return bpf_jit_stack_local(ctx) + 16;
}
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
@@ -206,7 +207,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
EMIT(PPC_RAW_BCTRL());
}
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
{
/*
* By now, the eBPF program has already setup parameters in r3, r4 and r5
@@ -267,13 +268,38 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
bpf_jit_emit_common_epilogue(image, ctx);
EMIT(PPC_RAW_BCTR());
+
/* out: */
+ return 0;
}
+/*
+ * We spill into the redzone always, even if the bpf program has its own stackframe.
+ * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
+ */
+void bpf_stf_barrier(void);
+
+asm (
+" .global bpf_stf_barrier ;"
+" bpf_stf_barrier: ;"
+" std 21,-64(1) ;"
+" std 22,-56(1) ;"
+" sync ;"
+" ld 21,-64(1) ;"
+" ld 22,-56(1) ;"
+" ori 31,31,0 ;"
+" .rept 14 ;"
+" b 1f ;"
+" 1: ;"
+" .endr ;"
+" blr ;"
+);
+
/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
u32 *addrs, bool extra_pass)
{
+ enum stf_barrier_type stf_barrier = stf_barrier_type_get();
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
int i, ret;
@@ -328,18 +354,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
- case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+ if (!imm) {
+ goto bpf_alu32_trunc;
+ } else if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+ } else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
- if (BPF_OP(code) == BPF_SUB)
- imm = -imm;
- if (imm) {
- if (imm >= -32768 && imm < 32768)
- EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
- else {
- PPC_LI32(b2p[TMP_REG_1], imm);
- EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
- }
+ if (!imm) {
+ goto bpf_alu32_trunc;
+ } else if (imm > -32768 && imm <= 32768) {
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
+ } else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
@@ -389,8 +422,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
if (imm == 0)
return -EINVAL;
- else if (imm == 1)
- goto bpf_alu32_trunc;
+ if (imm == 1) {
+ if (BPF_OP(code) == BPF_DIV) {
+ goto bpf_alu32_trunc;
+ } else {
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ break;
+ }
+ }
PPC_LI32(b2p[TMP_REG_1], imm);
switch (BPF_CLASS(code)) {
@@ -631,6 +670,29 @@ emit_clear:
* BPF_ST NOSPEC (speculation barrier)
*/
case BPF_ST | BPF_NOSPEC:
+ if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
+ !security_ftr_enabled(SEC_FTR_STF_BARRIER))
+ break;
+
+ switch (stf_barrier) {
+ case STF_BARRIER_EIEIO:
+ EMIT(PPC_RAW_EIEIO() | 0x02000000);
+ break;
+ case STF_BARRIER_SYNC_ORI:
+ EMIT(PPC_RAW_SYNC());
+ EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
+ EMIT(PPC_RAW_ORI(_R31, _R31, 0));
+ break;
+ case STF_BARRIER_FALLBACK:
+ EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
+ PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
+ EMIT(PPC_RAW_MTCTR(12));
+ EMIT(PPC_RAW_BCTRL());
+ EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
+ break;
+ case STF_BARRIER_NONE:
+ break;
+ }
break;
/*
@@ -993,7 +1055,9 @@ cond_branch:
*/
case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
- bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ if (ret < 0)
+ return ret;
break;
default:
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index bc15200852b7..09fafcf2d3a0 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -867,6 +867,10 @@ static int __init eeh_pseries_init(void)
if (is_kdump_kernel() || reset_devices) {
pr_info("Issue PHB reset ...\n");
list_for_each_entry(phb, &hose_list, list_node) {
+ // Skip if the slot is empty
+ if (list_empty(&PCI_DN(phb->dn)->child_list))
+ continue;
+
pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
config_addr = pseries_eeh_get_pe_config_addr(pdn);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 1b305e411862..8627362f613e 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -507,12 +507,27 @@ static void pseries_msi_unmask(struct irq_data *d)
irq_chip_unmask_parent(d);
}
+static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct msi_desc *entry = irq_data_get_msi_desc(data);
+
+ /*
+ * Do not update the MSIx vector table. It's not strictly necessary
+ * because the table is initialized by the underlying hypervisor, PowerVM
+ * or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
+ * activation will fail. This can happen in some drivers (eg. IPR) which
+ * deactivate an IRQ used for testing MSI support.
+ */
+ entry->msg = *msg;
+}
+
static struct irq_chip pseries_pci_msi_irq_chip = {
.name = "pSeries-PCI-MSI",
.irq_shutdown = pseries_msi_shutdown,
.irq_mask = pseries_msi_mask,
.irq_unmask = pseries_msi_unmask,
.irq_eoi = irq_chip_eoi_parent,
+ .irq_write_msi_msg = pseries_msi_write_msg,
};
static struct msi_domain_info pseries_msi_domain_info = {
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 5c1a157a83b8..244a727c6ba4 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
if (xics_ics->check(xics_ics, hwirq))
return -EINVAL;
- /* No chip data for the XICS domain */
+ /* Let the ICS be the chip data for the XICS domain. For ICS native */
irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
- NULL, handle_fasteoi_irq, NULL, NULL);
+ xics_ics, handle_fasteoi_irq, NULL, NULL);
return 0;
}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index c732ce5a3e1a..c5d75c02ad8b 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -945,7 +945,8 @@ static int xive_get_irqchip_state(struct irq_data *data,
* interrupt to be inactive in that case.
*/
*state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
- (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
+ (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) &&
+ !irqd_irq_disabled(data)));
return 0;
default:
return -EINVAL;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 301a54233c7e..6a6fa9e976d5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -561,5 +561,3 @@ menu "Power management options"
source "kernel/power/Kconfig"
endmenu
-
-source "drivers/firmware/Kconfig"
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index b933b1583c9f..34fbb3ea21d5 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -82,4 +82,5 @@ static inline int syscall_get_arch(struct task_struct *task)
#endif
}
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 893e47195e30..208e31bc5d1c 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -16,18 +16,24 @@
#ifdef CONFIG_MMU
#include <linux/types.h>
-#include <generated/vdso-offsets.h>
+/*
+ * All systems with an MMU have a VDSO, but systems without an MMU don't
+ * support shared libraries and therefor don't have one.
+ */
+#ifdef CONFIG_MMU
+
+#define __VVAR_PAGES 1
-#ifndef CONFIG_GENERIC_TIME_VSYSCALL
-struct vdso_data {
-};
-#endif
+#ifndef __ASSEMBLY__
+#include <generated/vdso-offsets.h>
#define VDSO_SYMBOL(base, name) \
(void __user *)((unsigned long)(base) + __vdso_##name##_offset)
#endif /* CONFIG_MMU */
-asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
index 4b989ae15d59..8062996c2dfd 100644
--- a/arch/riscv/include/uapi/asm/unistd.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -18,9 +18,10 @@
#ifdef __LP64__
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_CLONE3
#endif /* __LP64__ */
+#define __ARCH_WANT_SYS_CLONE3
+
#include <asm-generic/unistd.h>
/*
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index a63c667c27b3..44b1420a2270 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/syscalls.h>
#include <asm-generic/syscalls.h>
-#include <asm/vdso.h>
#include <asm/syscall.h>
#undef __SYSCALL
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 25a3b8849599..b70956d80408 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -12,14 +12,24 @@
#include <linux/binfmts.h>
#include <linux/err.h>
#include <asm/page.h>
+#include <asm/vdso.h>
+
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
#include <vdso/datapage.h>
#else
-#include <asm/vdso.h>
+struct vdso_data {
+};
#endif
extern char vdso_start[], vdso_end[];
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_NR_PAGES,
+};
+
+#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
+
static unsigned int vdso_pages __ro_after_init;
static struct page **vdso_pagelist __ro_after_init;
@@ -38,7 +48,7 @@ static int __init vdso_init(void)
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
vdso_pagelist =
- kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+ kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
if (unlikely(vdso_pagelist == NULL)) {
pr_err("vdso: pagelist allocation failed\n");
return -ENOMEM;
@@ -63,38 +73,41 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
unsigned long vdso_base, vdso_len;
int ret;
- vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+ BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+
+ vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
- mmap_write_lock(mm);
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
goto end;
}
- /*
- * Put vDSO base into mm struct. We need to do this before calling
- * install_special_mapping or the perf counter mmap tracking code
- * will fail to recognise it as a vDSO (since arch_vma_name fails).
- */
- mm->context.vdso = (void *)vdso_base;
+ mm->context.vdso = NULL;
+ ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+ if (unlikely(ret))
+ goto end;
ret =
- install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ install_special_mapping(mm, vdso_base + VVAR_SIZE,
+ vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
vdso_pagelist);
- if (unlikely(ret)) {
- mm->context.vdso = NULL;
+ if (unlikely(ret))
goto end;
- }
- vdso_base += (vdso_pages << PAGE_SHIFT);
- ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
- (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+ /*
+ * Put vDSO base into mm struct. We need to do this before calling
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO (since arch_vma_name fails).
+ */
+ mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
- if (unlikely(ret))
- mm->context.vdso = NULL;
end:
mmap_write_unlock(mm);
return ret;
@@ -105,7 +118,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
return "[vdso]";
if (vma->vm_mm && (vma->vm_start ==
- (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ (long)vma->vm_mm->context.vdso - VVAR_SIZE))
return "[vdso_data]";
return NULL;
}
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index e6f558bca71b..e9111f700af0 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -3,12 +3,13 @@
* Copyright (C) 2012 Regents of the University of California
*/
#include <asm/page.h>
+#include <asm/vdso.h>
OUTPUT_ARCH(riscv)
SECTIONS
{
- PROVIDE(_vdso_data = . + PAGE_SIZE);
+ PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 094118663285..89f81067e09e 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -16,6 +16,8 @@ static void ipi_remote_fence_i(void *info)
void flush_icache_all(void)
{
+ local_flush_icache_all();
+
if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_remote_fence_i(NULL);
else
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2bd90c51efd3..b86de61b8caa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -685,16 +685,6 @@ config STACK_GUARD
The minimum size for the stack guard should be 256 for 31 bit and
512 for 64 bit.
-config WARN_DYNAMIC_STACK
- def_bool n
- prompt "Emit compiler warnings for function with dynamic stack usage"
- help
- This option enables the compiler option -mwarn-dynamicstack. If the
- compiler supports this options generates warnings for functions
- that dynamically allocate stack space using alloca.
-
- Say N if you are unsure.
-
endmenu
menu "I/O subsystem"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index a3cf33ad009f..450b351dfa8e 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
endif
endif
-ifdef CONFIG_WARN_DYNAMIC_STACK
- ifneq ($(call cc-option,-mwarn-dynamicstack),)
- KBUILD_CFLAGS += -mwarn-dynamicstack
- KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
- endif
-endif
-
ifdef CONFIG_EXPOLINE
ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 37b6115ed80e..6aad18ee131d 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y
CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -503,6 +504,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
@@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
@@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m
@@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_DMA_API_DEBUG=y
-CONFIG_STRING_SELFTEST=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
@@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_MIN_HEAP=y
-CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_STRING_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
CONFIG_TEST_LIVEPATCH=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 56a1cc85c5d7..f08b161c9446 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -494,6 +495,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
@@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
@@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index 36dbf5043fc0..aa995d91cd1d 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -55,7 +55,7 @@ int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf);
extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
-extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index e4803ec51110..6b3c366af78e 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -207,6 +207,8 @@ int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
int zpci_deconfigure_device(struct zpci_dev *zdev);
+void zpci_device_reserved(struct zpci_dev *zdev);
+bool zpci_is_device_configured(struct zpci_dev *zdev);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index b9f85b2dc053..6af59c59cc1b 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -894,6 +894,11 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
/**
* guest_translate_address - translate guest logical into guest absolute address
+ * @vcpu: virtual cpu
+ * @gva: Guest virtual address
+ * @ar: Access register
+ * @gpa: Guest physical address
+ * @mode: Translation access mode
*
* Parameter semantics are the same as the ones from guest_translate.
* The memory contents at the guest address are not changed.
@@ -934,6 +939,11 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
/**
* check_gva_range - test a range of guest virtual addresses for accessibility
+ * @vcpu: virtual cpu
+ * @gva: Guest virtual address
+ * @ar: Access register
+ * @length: Length of test range
+ * @mode: Translation access mode
*/
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode)
@@ -956,6 +966,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
/**
* kvm_s390_check_low_addr_prot_real - check for low-address protection
+ * @vcpu: virtual cpu
* @gra: Guest real address
*
* Checks whether an address is subject to low-address protection and set
@@ -979,6 +990,7 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
* @pgt: pointer to the beginning of the page table for the given address if
* successful (return value 0), or to the first invalid DAT entry in
* case of exceptions (return value > 0)
+ * @dat_protection: referenced memory is write protected
* @fake: pgt references contiguous guest memory block, not a pgtable
*/
static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 72b25b7cc6ae..2bd8f854f1b4 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -269,6 +269,7 @@ static int handle_prog(struct kvm_vcpu *vcpu)
/**
* handle_external_interrupt - used for external interruption interceptions
+ * @vcpu: virtual cpu
*
* This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
* the new PSW does not have external interrupts disabled. In the first case,
@@ -315,7 +316,8 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
}
/**
- * Handle MOVE PAGE partial execution interception.
+ * handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
+ * @vcpu: virtual cpu
*
* This interception can only happen for guests with DAT disabled and
* addresses that are currently not mapped in the host. Thus we try to
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 16256e17a544..10722455fd02 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
- set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+ set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
}
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
- clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+ clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
}
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 752a0ffab9bf..6a6dd5e1daf6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4066,7 +4066,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
kvm_s390_patch_guest_per_regs(vcpu);
}
- clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
+ clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index ecd741ee3276..52bc8fbaa60a 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
- return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+ return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
}
static inline int kvm_is_ucontrol(struct kvm *kvm)
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index cfcdf76d6a95..a95ca6df4e5e 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -259,14 +259,13 @@ EXPORT_SYMBOL(strcmp);
#ifdef __HAVE_ARCH_STRRCHR
char *strrchr(const char *s, int c)
{
- size_t len = __strend(s) - s;
-
- if (len)
- do {
- if (s[len] == (char) c)
- return (char *) s + len;
- } while (--len > 0);
- return NULL;
+ ssize_t len = __strend(s) - s;
+
+ do {
+ if (s[len] == (char)c)
+ return (char *)s + len;
+ } while (--len >= 0);
+ return NULL;
}
EXPORT_SYMBOL(strrchr);
#endif
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 88419263a89a..1a374d021e25 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
- /* Branch instruction needs 6 bytes */ \
- int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
+ int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9080000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
- if (!imm)
- break;
- /* alfi %dst,imm */
- EMIT6_IMM(0xc20b0000, dst_reg, imm);
+ if (imm != 0) {
+ /* alfi %dst,imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9090000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
- if (!imm)
- break;
- /* alfi %dst,-imm */
- EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+ if (imm != 0) {
+ /* alfi %dst,-imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
if (!imm)
break;
- /* agfi %dst,-imm */
- EMIT6_IMM(0xc2080000, dst_reg, -imm);
+ if (imm == -0x80000000) {
+ /* algfi %dst,0x80000000 */
+ EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
+ } else {
+ /* agfi %dst,-imm */
+ EMIT6_IMM(0xc2080000, dst_reg, -imm);
+ }
break;
/*
* BPF_MUL
@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb90c0000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
- if (imm == 1)
- break;
- /* msfi %r5,imm */
- EMIT6_IMM(0xc2010000, dst_reg, imm);
+ if (imm != 1) {
+ /* msfi %r5,imm */
+ EMIT6_IMM(0xc2010000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
if (BPF_OP(insn->code) == BPF_MOD)
/* lhgi %dst,0 */
EMIT4_IMM(0xa7090000, dst_reg, 0);
+ else
+ EMIT_ZERO(dst_reg);
break;
}
/* lhi %w0,0 */
@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9820000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
- if (!imm)
- break;
- /* xilf %dst,imm */
- EMIT6_IMM(0xc0070000, dst_reg, imm);
+ if (imm != 0) {
+ /* xilf %dst,imm */
+ EMIT6_IMM(0xc0070000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
- if (imm == 0)
- break;
- /* sll %dst,imm(%r0) */
- EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* sll %dst,imm(%r0) */
+ EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
- if (imm == 0)
- break;
- /* srl %dst,imm(%r0) */
- EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* srl %dst,imm(%r0) */
+ EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
- if (imm == 0)
- break;
- /* sra %dst,imm(%r0) */
- EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* sra %dst,imm(%r0) */
+ EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
@@ -1820,7 +1826,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
if (jit.addrs == NULL) {
fp = orig_fp;
- goto out;
+ goto free_addrs;
}
/*
* Three initial passes:
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e7e6788d75a8..b833155ce838 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -92,7 +92,7 @@ void zpci_remove_reserved_devices(void)
spin_unlock(&zpci_list_lock);
list_for_each_entry_safe(zdev, tmp, &remove, entry)
- zpci_zdev_put(zdev);
+ zpci_device_reserved(zdev);
}
int pci_domain_nr(struct pci_bus *bus)
@@ -751,6 +751,14 @@ error:
return ERR_PTR(rc);
}
+bool zpci_is_device_configured(struct zpci_dev *zdev)
+{
+ enum zpci_state state = zdev->state;
+
+ return state != ZPCI_FN_STATE_RESERVED &&
+ state != ZPCI_FN_STATE_STANDBY;
+}
+
/**
* zpci_scan_configured_device() - Scan a freshly configured zpci_dev
* @zdev: The zpci_dev to be configured
@@ -822,6 +830,31 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
return 0;
}
+/**
+ * zpci_device_reserved() - Mark device as resverved
+ * @zdev: the zpci_dev that was reserved
+ *
+ * Handle the case that a given zPCI function was reserved by another system.
+ * After a call to this function the zpci_dev can not be found via
+ * get_zdev_by_fid() anymore but may still be accessible via existing
+ * references though it will not be functional anymore.
+ */
+void zpci_device_reserved(struct zpci_dev *zdev)
+{
+ if (zdev->has_hp_slot)
+ zpci_exit_slot(zdev);
+ /*
+ * Remove device from zpci_list as it is going away. This also
+ * makes sure we ignore subsequent zPCI events for this device.
+ */
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
+ zdev->state = ZPCI_FN_STATE_RESERVED;
+ zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+ zpci_zdev_put(zdev);
+}
+
void zpci_release_device(struct kref *kref)
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
@@ -843,6 +876,12 @@ void zpci_release_device(struct kref *kref)
case ZPCI_FN_STATE_STANDBY:
if (zdev->has_hp_slot)
zpci_exit_slot(zdev);
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
+ zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+ fallthrough;
+ case ZPCI_FN_STATE_RESERVED:
if (zdev->has_resources)
zpci_cleanup_bus_resources(zdev);
zpci_bus_device_unregister(zdev);
@@ -851,10 +890,6 @@ void zpci_release_device(struct kref *kref)
default:
break;
}
-
- spin_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- spin_unlock(&zpci_list_lock);
zpci_dbg(3, "rem fid:%x\n", zdev->fid);
kfree(zdev);
}
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index c856f80cb21b..5b8d647523f9 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -140,7 +140,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
/* The 0x0304 event may immediately reserve the device */
if (!clp_get_state(zdev->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) {
- zpci_zdev_put(zdev);
+ zpci_device_reserved(zdev);
}
}
break;
@@ -151,7 +151,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
case 0x0308: /* Standby -> Reserved */
if (!zdev)
break;
- zpci_zdev_put(zdev);
+ zpci_device_reserved(zdev);
break;
default:
break;
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index ae683aa623ac..c5b35ea129cf 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
mmap_read_lock(current->mm);
ret = -EINVAL;
- vma = find_vma(current->mm, mmio_addr);
+ vma = vma_lookup(current->mm, mmio_addr);
if (!vma)
goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
@@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
mmap_read_lock(current->mm);
ret = -EINVAL;
- vma = find_vma(current->mm, mmio_addr);
+ vma = vma_lookup(current->mm, mmio_addr);
if (!vma)
goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index 58592dfa5cb6..c081e7e2d6e7 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzo)
-$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
$(call if_changed,uimage,bzip2)
-$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip)
-$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma)
-$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
$(call if_changed,uimage,xz)
-$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
$(call if_changed,uimage,lzo)
-$(obj)/uImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none)
OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
-$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
+$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
OBJCOPYFLAGS_uImage.srec := -I binary -O srec
-$(obj)/uImage.srec: $(obj)/uImage
+$(obj)/uImage.srec: $(obj)/uImage FORCE
$(call if_changed,objcopy)
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 56bf35c2f29c..cdced80a7ffa 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -34,7 +34,7 @@ typedef struct { unsigned long long pmd; } pmd_t;
static inline pmd_t *pud_pgtable(pud_t pud)
{
- return (pmd_t *)pud_val(pud);
+ return (pmd_t *)(unsigned long)pud_val(pud);
}
/* only used by the stubbed out hugetlb gup code, should never be called */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index e80ee8641ac3..2672dd03faf3 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -124,6 +124,9 @@
#define SO_BUF_LOCK 0x0051
+#define SO_RESERVE_MEM 0x0052
+
+
#if !defined(__KERNEL__)
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 8e1d72a16759..7ceae24b0ca9 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -356,7 +356,9 @@ err_nomem:
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
- if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
+ size = PAGE_ALIGN(size);
+
+ if (!sparc_dma_free_resource(cpu_addr, size))
return;
dma_make_coherent(dma_addr, size);
diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c
index c9da9f139694..f3a8cd491ce0 100644
--- a/arch/sparc/lib/iomap.c
+++ b/arch/sparc/lib/iomap.c
@@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr)
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
+#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
/* nothing to do */
}
EXPORT_SYMBOL(pci_iounmap);
+#endif
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4e001bbbb425..d9830e7e1060 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
config ARCH_HIBERNATION_POSSIBLE
def_bool y
+config ARCH_NR_GPIO
+ int
+ default 1024 if X86_64
+ default 512
+
config ARCH_SUSPEND_POSSIBLE
def_bool y
@@ -1400,7 +1405,7 @@ config HIGHMEM4G
config HIGHMEM64G
bool "64GB"
- depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
+ depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !MWINCHIP3D && !MK6
select X86_PAE
help
Select this if you have a 32-bit processor and more than 4
@@ -1520,7 +1525,6 @@ config AMD_MEM_ENCRYPT
config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
bool "Activate AMD Secure Memory Encryption (SME) by default"
- default y
depends on AMD_MEM_ENCRYPT
help
Say yes to have system memory encrypted by default if running on
@@ -2605,7 +2609,6 @@ config PCI_OLPC
config PCI_XEN
def_bool y
depends on PCI && XEN
- select SWIOTLB_XEN
config MMCONF_FAM10H
def_bool y
@@ -2828,8 +2831,6 @@ config HAVE_ATOMIC_IOMAP
def_bool y
depends on X86_32
-source "drivers/firmware/Kconfig"
-
source "arch/x86/kvm/Kconfig"
source "arch/x86/Kconfig.assembler"
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index e7355f8b51c2..94834c4b5e5e 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -4,6 +4,12 @@
tune = $(call cc-option,-mtune=$(1),$(2))
+ifdef CONFIG_CC_IS_CLANG
+align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
+else
+align := -falign-functions=0 -falign-jumps=0 -falign-loops=0
+endif
+
cflags-$(CONFIG_M486SX) += -march=i486
cflags-$(CONFIG_M486) += -march=i486
cflags-$(CONFIG_M586) += -march=i586
@@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6) += -march=k6
# They make zero difference whatsosever to performance at this time.
cflags-$(CONFIG_MK7) += -march=athlon
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
-cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
-cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
+cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
index fa2c3f50aecb..18d2f5199194 100644
--- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
@@ -367,10 +367,11 @@ SYM_FUNC_START(sm4_aesni_avx_crypt8)
* %rdx: src (1..8 blocks)
* %rcx: num blocks (1..8)
*/
- FRAME_BEGIN
-
cmpq $5, %rcx;
jb sm4_aesni_avx_crypt4;
+
+ FRAME_BEGIN
+
vmovdqu (0 * 16)(%rdx), RA0;
vmovdqu (1 * 16)(%rdx), RA1;
vmovdqu (2 * 16)(%rdx), RA2;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 2a57dbed4894..6dfa8ddaa60f 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2465,6 +2465,7 @@ static int x86_pmu_event_init(struct perf_event *event)
if (err) {
if (event->destroy)
event->destroy(event);
+ event->destroy = NULL;
}
if (READ_ONCE(x86_pmu.attr_rdpmc) &&
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 1248fc1937f8..69698051a6ab 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
+ INTEL_EVENT_CONSTRAINT(0xef, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
EVENT_CONSTRAINT_END
};
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index c853b28efa33..96c775abe31f 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -68,6 +68,7 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_BROADWELL_D:
case INTEL_FAM6_BROADWELL_G:
case INTEL_FAM6_BROADWELL_X:
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_ATOM_SILVERMONT:
case INTEL_FAM6_ATOM_SILVERMONT_D:
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 32a1ad356c18..db2d92fb44da 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -122,17 +122,27 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
ipi_arg->reserved = 0;
ipi_arg->vp_set.valid_bank_mask = 0;
- if (!cpumask_equal(mask, cpu_present_mask)) {
+ /*
+ * Use HV_GENERIC_SET_ALL and avoid converting cpumask to VP_SET
+ * when the IPI is sent to all currently present CPUs.
+ */
+ if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
if (exclude_self)
nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
else
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
- }
- if (nr_bank < 0)
- goto ipi_mask_ex_done;
- if (!nr_bank)
+
+ /*
+ * 'nr_bank <= 0' means some CPUs in cpumask can't be
+ * represented in VP_SET. Return an error and fall back to
+ * native (architectural) method of sending IPIs.
+ */
+ if (nr_bank <= 0)
+ goto ipi_mask_ex_done;
+ } else {
ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
+ }
status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
ipi_arg, NULL);
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 14ebd2196569..43184640b579 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -25,7 +25,7 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
* For !SMAP hardware we patch out CLAC on entry.
*/
if (boot_cpu_has(X86_FEATURE_SMAP) ||
- (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+ (IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
mask |= X86_EFLAGS_AC;
WARN_ON_ONCE(flags & mask);
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index 87bd6025d91d..6a5f3acf2b33 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
struct kvm_page_track_notifier_node *node);
};
-void kvm_page_track_init(struct kvm *kvm);
+int kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
diff --git a/arch/x86/include/asm/kvmclock.h b/arch/x86/include/asm/kvmclock.h
index eceea9299097..6c5765192102 100644
--- a/arch/x86/include/asm/kvmclock.h
+++ b/arch/x86/include/asm/kvmclock.h
@@ -2,6 +2,20 @@
#ifndef _ASM_X86_KVM_CLOCK_H
#define _ASM_X86_KVM_CLOCK_H
+#include <linux/percpu.h>
+
extern struct clocksource kvm_clock;
+DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+
+static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
+{
+ return &this_cpu_read(hv_clock_per_cpu)->pvti;
+}
+
+static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
+{
+ return this_cpu_read(hv_clock_per_cpu);
+}
+
#endif /* _ASM_X86_KVM_CLOCK_H */
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index 5c7bcaa79623..1d5f14aff5f6 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -2,8 +2,6 @@
#ifndef _ASM_X86_PKEYS_H
#define _ASM_X86_PKEYS_H
-#define ARCH_DEFAULT_PKEY 0
-
/*
* If more than 16 keys are ever supported, a thorough audit
* will be necessary to ensure that the types that store key
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index f3fbb84ff8a7..68c257a3de0d 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -275,7 +275,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
{
const struct { char _[64]; } *__src = src;
struct { char _[64]; } __iomem *__dst = dst;
- int zf;
+ bool zf;
/*
* ENQCMDS %(rdx), rax
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 3506d8c598c1..4557f7cb0fa6 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -14,16 +14,19 @@ static inline int pci_xen_hvm_init(void)
return -1;
}
#endif
-#if defined(CONFIG_XEN_DOM0)
+#ifdef CONFIG_XEN_PV_DOM0
int __init pci_xen_initial_domain(void);
-int xen_find_device_domain_owner(struct pci_dev *dev);
-int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
-int xen_unregister_device_domain_owner(struct pci_dev *dev);
#else
static inline int __init pci_xen_initial_domain(void)
{
return -1;
}
+#endif
+#ifdef CONFIG_XEN_DOM0
+int xen_find_device_domain_owner(struct pci_dev *dev);
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
+int xen_unregister_device_domain_owner(struct pci_dev *dev);
+#else
static inline int xen_find_device_domain_owner(struct pci_dev *dev)
{
return -1;
diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h
index 6b56d0d45d15..66b4ddde7743 100644
--- a/arch/x86/include/asm/xen/swiotlb-xen.h
+++ b/arch/x86/include/asm/xen/swiotlb-xen.h
@@ -3,14 +3,10 @@
#define _ASM_X86_SWIOTLB_XEN_H
#ifdef CONFIG_SWIOTLB_XEN
-extern int xen_swiotlb;
extern int __init pci_xen_swiotlb_detect(void);
-extern void __init pci_xen_swiotlb_init(void);
extern int pci_xen_swiotlb_init_late(void);
#else
-#define xen_swiotlb (0)
-static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
-static inline void __init pci_xen_swiotlb_init(void) { }
+#define pci_xen_swiotlb_detect NULL
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0f8885949e8c..b3410f1ac217 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -326,6 +326,7 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_SMAP
cr4_set_bits(X86_CR4_SMAP);
#else
+ clear_cpu_cap(c, X86_FEATURE_SMAP);
cr4_clear_bits(X86_CR4_SMAP);
#endif
}
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 8cb7816d03b4..193204aee880 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
static void kill_me_now(struct callback_head *ch)
{
+ struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
+
+ p->mce_count = 0;
force_sig(SIGBUS);
}
@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
int flags = MF_ACTION_REQUIRED;
int ret;
+ p->mce_count = 0;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
@@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
}
}
-static void queue_task_work(struct mce *m, int kill_current_task)
+static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
{
- current->mce_addr = m->addr;
- current->mce_kflags = m->kflags;
- current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
- current->mce_whole_page = whole_page(m);
+ int count = ++current->mce_count;
- if (kill_current_task)
- current->mce_kill_me.func = kill_me_now;
- else
- current->mce_kill_me.func = kill_me_maybe;
+ /* First call, save all the details */
+ if (count == 1) {
+ current->mce_addr = m->addr;
+ current->mce_kflags = m->kflags;
+ current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+ current->mce_whole_page = whole_page(m);
+
+ if (kill_current_task)
+ current->mce_kill_me.func = kill_me_now;
+ else
+ current->mce_kill_me.func = kill_me_maybe;
+ }
+
+ /* Ten is likely overkill. Don't expect more than two faults before task_work() */
+ if (count > 10)
+ mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
+
+ /* Second or later call, make sure page address matches the one from first call */
+ if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
+ mce_panic("Consecutive machine checks to different user pages", m, msg);
+
+ /* Do not call task_work_add() more than once */
+ if (count > 1)
+ return;
task_work_add(current, &current->mce_kill_me, TWA_RESUME);
}
@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
/* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs));
- queue_task_work(&m, kill_current_task);
+ queue_task_work(&m, msg, kill_current_task);
} else {
/*
@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
}
if (m.kflags & MCE_IN_KERNEL_COPYIN)
- queue_task_work(&m, kill_current_task);
+ queue_task_work(&m, msg, kill_current_task);
}
out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 4b8813bafffd..bb1c3f5f60c8 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -527,12 +527,14 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
rdt_domain_reconfigure_cdp(r);
if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
- kfree(d);
+ kfree(hw_dom);
return;
}
if (r->mon_capable && domain_setup_mon_state(r, d)) {
- kfree(d);
+ kfree(hw_dom->ctrl_val);
+ kfree(hw_dom->mbps_val);
+ kfree(hw_dom);
return;
}
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 38837dad46e6..391a4e2b8604 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -714,12 +714,6 @@ static struct chipset early_qrk[] __initdata = {
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
- { PCI_VENDOR_ID_INTEL, 0x3e20,
- PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
- { PCI_VENDOR_ID_INTEL, 0x3ec4,
- PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
- { PCI_VENDOR_ID_INTEL, 0x8a12,
- PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 445c57c9c539..831b25c5e705 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -379,9 +379,14 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
sizeof(fpu->state.fxsave)))
return -EFAULT;
- /* Reject invalid MXCSR values. */
- if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
- return -EINVAL;
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ /* Reject invalid MXCSR values. */
+ if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
+ return -EINVAL;
+ } else {
+ /* Mask invalid bits out for historical reasons (broken hardware). */
+ fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
+ }
/* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
if (use_xsave())
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 42fc41dd0e1f..882213df3713 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -10,6 +10,7 @@
#include <asm/irq_remapping.h>
#include <asm/hpet.h>
#include <asm/time.h>
+#include <asm/mwait.h>
#undef pr_fmt
#define pr_fmt(fmt) "hpet: " fmt
@@ -916,6 +917,83 @@ static bool __init hpet_counting(void)
return false;
}
+static bool __init mwait_pc10_supported(void)
+{
+ unsigned int eax, ebx, ecx, mwait_substates;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return false;
+
+ if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
+ return false;
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return false;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+ return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
+ (ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
+ (mwait_substates & (0xF << 28));
+}
+
+/*
+ * Check whether the system supports PC10. If so force disable HPET as that
+ * stops counting in PC10. This check is overbroad as it does not take any
+ * of the following into account:
+ *
+ * - ACPI tables
+ * - Enablement of intel_idle
+ * - Command line arguments which limit intel_idle C-state support
+ *
+ * That's perfectly fine. HPET is a piece of hardware designed by committee
+ * and the only reasons why it is still in use on modern systems is the
+ * fact that it is impossible to reliably query TSC and CPU frequency via
+ * CPUID or firmware.
+ *
+ * If HPET is functional it is useful for calibrating TSC, but this can be
+ * done via PMTIMER as well which seems to be the last remaining timer on
+ * X86/INTEL platforms that has not been completely wreckaged by feature
+ * creep.
+ *
+ * In theory HPET support should be removed altogether, but there are older
+ * systems out there which depend on it because TSC and APIC timer are
+ * dysfunctional in deeper C-states.
+ *
+ * It's only 20 years now that hardware people have been asked to provide
+ * reliable and discoverable facilities which can be used for timekeeping
+ * and per CPU timer interrupts.
+ *
+ * The probability that this problem is going to be solved in the
+ * forseeable future is close to zero, so the kernel has to be cluttered
+ * with heuristics to keep up with the ever growing amount of hardware and
+ * firmware trainwrecks. Hopefully some day hardware people will understand
+ * that the approach of "This can be fixed in software" is not sustainable.
+ * Hope dies last...
+ */
+static bool __init hpet_is_pc10_damaged(void)
+{
+ unsigned long long pcfg;
+
+ /* Check whether PC10 substates are supported */
+ if (!mwait_pc10_supported())
+ return false;
+
+ /* Check whether PC10 is enabled in PKG C-state limit */
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
+ if ((pcfg & 0xF) < 8)
+ return false;
+
+ if (hpet_force_user) {
+ pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
+ return false;
+ }
+
+ pr_info("HPET dysfunctional in PC10. Force disabled.\n");
+ boot_hpet_disable = true;
+ return true;
+}
+
/**
* hpet_enable - Try to setup the HPET timer. Returns 1 on success.
*/
@@ -929,6 +1007,9 @@ int __init hpet_enable(void)
if (!is_hpet_capable())
return 0;
+ if (hpet_is_pc10_damaged())
+ return 0;
+
hpet_set_mapping();
if (!hpet_virt_address)
return 0;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index ad273e5861c1..73c74b961d0f 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
static struct pvclock_vsyscall_time_info
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
static struct pvclock_wall_clock wall_clock __bss_decrypted;
-static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static struct pvclock_vsyscall_time_info *hvclock_mem;
-
-static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
-{
- return &this_cpu_read(hv_clock_per_cpu)->pvti;
-}
-
-static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
-{
- return this_cpu_read(hv_clock_per_cpu);
-}
+DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
/*
* The wallclock is the time of day when we booted. Since then, some time may
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 79f164141116..40ed44ead063 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -830,6 +830,20 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
+ /*
+ * Do some memory reservations *before* memory is added to memblock, so
+ * memblock allocations won't overwrite it.
+ *
+ * After this point, everything still needed from the boot loader or
+ * firmware or kernel text should be early reserved or marked not RAM in
+ * e820. All other memory is free game.
+ *
+ * This call needs to happen before e820__memory_setup() which calls the
+ * xen_memory_setup() on Xen dom0 which relies on the fact that those
+ * early reservations have happened already.
+ */
+ early_reserve_memory();
+
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
e820__memory_setup();
parse_setup_data();
@@ -876,18 +890,6 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
- /*
- * Do some memory reservations *before* memory is added to
- * memblock, so memblock allocations won't overwrite it.
- * Do it after early param, so we could get (unlikely) panic from
- * serial.
- *
- * After this point everything still needed from the boot loader or
- * firmware or kernel text should be early reserved or marked not
- * RAM in e820. All other memory is free game.
- */
- early_reserve_memory();
-
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory used by the kernel cannot be hot-removed because Linux
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 9f90f460a28c..bf1033a62e48 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -130,6 +130,8 @@ static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
} else {
ret = ES_VMM_ERROR;
}
+ } else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
+ ret = ES_VMM_ERROR;
} else {
ret = ES_OK;
}
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index fe03bd978761..751aa85a3001 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
for (i = 0; i < nent; i++) {
e = &entries[i];
- if (e->function == function && (e->index == index ||
- !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
+ if (e->function == function &&
+ (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
return e;
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 2837110e66ed..9a144ca8e146 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -435,7 +435,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
__FOP_RET(#op)
asm(".pushsection .fixup, \"ax\"\n"
- ".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; ret\n"
".popsection");
@@ -4206,7 +4205,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
- return emulate_ud(ctxt);
+ return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 232a86a6faaf..d5124b520f76 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -939,7 +939,7 @@ static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
stimer_init(&hv_vcpu->stimer[i], i);
- hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
+ hv_vcpu->vp_index = vcpu->vcpu_idx;
return 0;
}
@@ -1444,7 +1444,6 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
switch (msr) {
case HV_X64_MSR_VP_INDEX: {
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
- int vcpu_idx = kvm_vcpu_get_idx(vcpu);
u32 new_vp_index = (u32)data;
if (!host || new_vp_index >= KVM_MAX_VCPUS)
@@ -1459,9 +1458,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
* VP index is changing, adjust num_mismatched_vp_indexes if
* it now matches or no longer matches vcpu_idx.
*/
- if (hv_vcpu->vp_index == vcpu_idx)
+ if (hv_vcpu->vp_index == vcpu->vcpu_idx)
atomic_inc(&hv->num_mismatched_vp_indexes);
- else if (new_vp_index == vcpu_idx)
+ else if (new_vp_index == vcpu->vcpu_idx)
atomic_dec(&hv->num_mismatched_vp_indexes);
hv_vcpu->vp_index = new_vp_index;
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 730da8537d05..ed1c4e546d04 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -83,7 +83,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
- return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu);
+ return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
}
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index ff005fe738a4..8c065da73f8e 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
unsigned index;
bool mask_before, mask_after;
union kvm_ioapic_redirect_entry *e;
- unsigned long vcpu_bitmap;
int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
+ DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
switch (ioapic->ioregsel) {
case IOAPIC_REG_VERSION:
@@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
irq.shorthand = APIC_DEST_NOSHORT;
irq.dest_id = e->fields.dest_id;
irq.msi_redir_hint = false;
- bitmap_zero(&vcpu_bitmap, 16);
+ bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
- &vcpu_bitmap);
+ vcpu_bitmap);
if (old_dest_mode != e->fields.dest_mode ||
old_dest_id != e->fields.dest_id) {
/*
@@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
kvm_lapic_irq_dest_mode(
!!e->fields.dest_mode);
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
- &vcpu_bitmap);
+ vcpu_bitmap);
}
kvm_make_scan_ioapic_request_mask(ioapic->kvm,
- &vcpu_bitmap);
+ vcpu_bitmap);
} else {
kvm_make_scan_ioapic_request(ioapic->kvm);
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 76fb00921203..d6ac32f3f650 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2321,13 +2321,14 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ u64 msr_val;
int i;
if (!init_event) {
- vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE |
- MSR_IA32_APICBASE_ENABLE;
+ msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_reset_bsp(vcpu))
- vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
+ msr_val |= MSR_IA32_APICBASE_BSP;
+ kvm_lapic_set_base(vcpu, msr_val);
}
if (!apic)
@@ -2336,11 +2337,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);
- if (!init_event) {
- apic->base_address = APIC_DEFAULT_PHYS_BASE;
-
+ /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
+ if (!init_event)
kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
- }
kvm_apic_set_version(apic->vcpu);
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
@@ -2481,6 +2480,11 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
lapic_timer_advance_dynamic = false;
}
+ /*
+ * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
+ * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
+ */
+ vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
@@ -2942,5 +2946,7 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
void kvm_lapic_exit(void)
{
static_key_deferred_flush(&apic_hw_disabled);
+ WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
static_key_deferred_flush(&apic_sw_disabled);
+ WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
}
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 2d7e61122af8..1a64ba5b9437 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2027,8 +2027,8 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
} while (!sp->unsync_children);
}
-static void mmu_sync_children(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *parent)
+static int mmu_sync_children(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *parent, bool can_yield)
{
int i;
struct kvm_mmu_page *sp;
@@ -2055,12 +2055,18 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
}
if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+ if (!can_yield) {
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ return -EINTR;
+ }
+
cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
flush = false;
}
}
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+ return 0;
}
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
@@ -2146,9 +2152,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
- if (sp->unsync_children)
- kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-
__clear_sp_write_flooding_count(sp);
trace_get_page:
@@ -3684,7 +3687,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
write_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
- mmu_sync_children(vcpu, sp);
+ mmu_sync_children(vcpu, sp, true);
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
write_unlock(&vcpu->kvm->mmu_lock);
@@ -3700,7 +3703,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
if (IS_VALID_PAE_ROOT(root)) {
root &= PT64_BASE_ADDR_MASK;
sp = to_shadow_page(root);
- mmu_sync_children(vcpu, sp);
+ mmu_sync_children(vcpu, sp, true);
}
}
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 269f11f92fd0..21427e84a82e 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -164,13 +164,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
cleanup_srcu_struct(&head->track_srcu);
}
-void kvm_page_track_init(struct kvm *kvm)
+int kvm_page_track_init(struct kvm *kvm)
{
struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head;
- init_srcu_struct(&head->track_srcu);
INIT_HLIST_HEAD(&head->track_notifier_list);
+ return init_srcu_struct(&head->track_srcu);
}
/*
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 7d03e9b7ccfa..913d52a7923e 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -707,8 +707,27 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
if (!is_shadow_present_pte(*it.sptep)) {
table_gfn = gw->table_gfn[it.level - 2];
access = gw->pt_access[it.level - 2];
- sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
- false, access);
+ sp = kvm_mmu_get_page(vcpu, table_gfn, addr,
+ it.level-1, false, access);
+ /*
+ * We must synchronize the pagetable before linking it
+ * because the guest doesn't need to flush tlb when
+ * the gpte is changed from non-present to present.
+ * Otherwise, the guest may use the wrong mapping.
+ *
+ * For PG_LEVEL_4K, kvm_mmu_get_page() has already
+ * synchronized it transiently via kvm_sync_page().
+ *
+ * For higher level pagetable, we synchronize it via
+ * the slower mmu_sync_children(). If it needs to
+ * break, some progress has been made; return
+ * RET_PF_RETRY and retry on the next #PF.
+ * KVM_REQ_MMU_SYNC is not necessary but it
+ * expedites the process.
+ */
+ if (sp->unsync_children &&
+ mmu_sync_children(vcpu, sp, false))
+ return RET_PF_RETRY;
}
/*
@@ -1047,14 +1066,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
* Using the cached information from sp->gfns is safe because:
* - The spte has a reference to the struct page, so the pfn for a given gfn
* can't change unless all sptes pointing to it are nuked first.
- *
- * Note:
- * We should flush all tlbs if spte is dropped even though guest is
- * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
- * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
- * used by guest then tlbs are not flushed, so guest is allowed to access the
- * freed pages.
- * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
@@ -1107,13 +1118,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return 0;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
- /*
- * Update spte before increasing tlbs_dirty to make
- * sure no tlb flush is lost after spte is zapped; see
- * the comments in kvm_flush_remote_tlbs().
- */
- smp_wmb();
- vcpu->kvm->tlbs_dirty++;
+ set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
continue;
}
@@ -1128,12 +1133,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (gfn != sp->gfns[i]) {
drop_spte(vcpu->kvm, &sp->spt[i]);
- /*
- * The same as above where we are doing
- * prefetch_invalid_gpte().
- */
- smp_wmb();
- vcpu->kvm->tlbs_dirty++;
+ set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
continue;
}
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 2545d0c61985..510b833cbd39 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
(svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
- svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
svm->vmcb->control.int_state = svm->nested.ctl.int_state;
svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
@@ -579,7 +578,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
}
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
- struct vmcb *vmcb12)
+ struct vmcb *vmcb12, bool from_vmrun)
{
struct vcpu_svm *svm = to_svm(vcpu);
int ret;
@@ -609,13 +608,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
nested_vmcb02_prepare_save(svm, vmcb12);
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
- nested_npt_enabled(svm), true);
+ nested_npt_enabled(svm), from_vmrun);
if (ret)
return ret;
if (!npt_enabled)
vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
+ if (!from_vmrun)
+ kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
svm_set_gif(svm, true);
return 0;
@@ -681,7 +683,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
svm->nested.nested_run_pending = 1;
- if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
+ if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
goto out_exit_err;
if (nested_svm_vmrun_msrpm(svm))
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 75e0b21ad07c..0d21d59936e5 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -595,43 +595,55 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
return 0;
}
-static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ int *error)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_launch_update_vmsa vmsa;
+ struct vcpu_svm *svm = to_svm(vcpu);
+ int ret;
+
+ /* Perform some pre-encryption checks against the VMSA */
+ ret = sev_es_sync_vmsa(svm);
+ if (ret)
+ return ret;
+
+ /*
+ * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
+ * the VMSA memory content (i.e it will write the same memory region
+ * with the guest's key), so invalidate it first.
+ */
+ clflush_cache_range(svm->vmsa, PAGE_SIZE);
+
+ vmsa.reserved = 0;
+ vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
+ vmsa.address = __sme_pa(svm->vmsa);
+ vmsa.len = PAGE_SIZE;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
+ if (ret)
+ return ret;
+
+ vcpu->arch.guest_state_protected = true;
+ return 0;
+}
+
+static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
struct kvm_vcpu *vcpu;
int i, ret;
if (!sev_es_guest(kvm))
return -ENOTTY;
- vmsa.reserved = 0;
-
kvm_for_each_vcpu(i, vcpu, kvm) {
- struct vcpu_svm *svm = to_svm(vcpu);
-
- /* Perform some pre-encryption checks against the VMSA */
- ret = sev_es_sync_vmsa(svm);
+ ret = mutex_lock_killable(&vcpu->mutex);
if (ret)
return ret;
- /*
- * The LAUNCH_UPDATE_VMSA command will perform in-place
- * encryption of the VMSA memory content (i.e it will write
- * the same memory region with the guest's key), so invalidate
- * it first.
- */
- clflush_cache_range(svm->vmsa, PAGE_SIZE);
+ ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
- vmsa.handle = sev->handle;
- vmsa.address = __sme_pa(svm->vmsa);
- vmsa.len = PAGE_SIZE;
- ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
- &argp->error);
+ mutex_unlock(&vcpu->mutex);
if (ret)
return ret;
-
- svm->vcpu.arch.guest_state_protected = true;
}
return 0;
@@ -1397,8 +1409,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Bind ASID to this guest */
ret = sev_bind_asid(kvm, start.handle, error);
- if (ret)
+ if (ret) {
+ sev_decommission(start.handle);
goto e_free_session;
+ }
params.handle = start.handle;
if (copy_to_user((void __user *)(uintptr_t)argp->data,
@@ -1464,7 +1478,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Pin guest memory */
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
- PAGE_SIZE, &n, 0);
+ PAGE_SIZE, &n, 1);
if (IS_ERR(guest_page)) {
ret = PTR_ERR(guest_page);
goto e_free_trans;
@@ -1501,6 +1515,20 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
}
+static bool cmd_allowed_from_miror(u32 cmd_id)
+{
+ /*
+ * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
+ * active mirror VMs. Also allow the debugging and status commands.
+ */
+ if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
+ cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
+ cmd_id == KVM_SEV_DBG_ENCRYPT)
+ return true;
+
+ return false;
+}
+
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -1517,8 +1545,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
mutex_lock(&kvm->lock);
- /* enc_context_owner handles all memory enc operations */
- if (is_mirroring_enc_context(kvm)) {
+ /* Only the enc_context_owner handles some memory enc operations. */
+ if (is_mirroring_enc_context(kvm) &&
+ !cmd_allowed_from_miror(sev_cmd.id)) {
r = -EINVAL;
goto out;
}
@@ -1715,8 +1744,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
{
struct file *source_kvm_file;
struct kvm *source_kvm;
- struct kvm_sev_info *mirror_sev;
- unsigned int asid;
+ struct kvm_sev_info source_sev, *mirror_sev;
int ret;
source_kvm_file = fget(source_fd);
@@ -1739,7 +1767,8 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
goto e_source_unlock;
}
- asid = to_kvm_svm(source_kvm)->sev_info.asid;
+ memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
+ sizeof(source_sev));
/*
* The mirror kvm holds an enc_context_owner ref so its asid can't
@@ -1759,8 +1788,16 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
/* Set enc_context_owner and copy its encryption context over */
mirror_sev = &to_kvm_svm(kvm)->sev_info;
mirror_sev->enc_context_owner = source_kvm;
- mirror_sev->asid = asid;
mirror_sev->active = true;
+ mirror_sev->asid = source_sev.asid;
+ mirror_sev->fd = source_sev.fd;
+ mirror_sev->es_active = source_sev.es_active;
+ mirror_sev->handle = source_sev.handle;
+ /*
+ * Do not copy ap_jump_table. Since the mirror does not share the same
+ * KVM contexts as the original, and they may have different
+ * memory-views.
+ */
mutex_unlock(&kvm->lock);
return 0;
@@ -2551,7 +2588,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
return -EINVAL;
return kvm_sev_es_string_io(&svm->vcpu, size, port,
- svm->ghcb_sa, svm->ghcb_sa_len, in);
+ svm->ghcb_sa, svm->ghcb_sa_len / size, in);
}
void sev_es_init_vmcb(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 05e8d4d27969..989685098b3e 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1566,6 +1566,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
V_IRQ_INJECTION_BITS_MASK;
+
+ svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
}
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
@@ -2222,6 +2224,10 @@ static int gp_interception(struct kvm_vcpu *vcpu)
if (error_code)
goto reinject;
+ /* All SVM instructions expect page aligned RAX */
+ if (svm->vmcb->save.rax & ~PAGE_MASK)
+ goto reinject;
+
/* Decode the instruction for usage later */
if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
goto reinject;
@@ -4285,43 +4291,44 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
struct kvm_host_map map_save;
int ret;
- if (is_guest_mode(vcpu)) {
- /* FED8h - SVM Guest */
- put_smstate(u64, smstate, 0x7ed8, 1);
- /* FEE0h - SVM Guest VMCB Physical Address */
- put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
+ if (!is_guest_mode(vcpu))
+ return 0;
- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+ /* FED8h - SVM Guest */
+ put_smstate(u64, smstate, 0x7ed8, 1);
+ /* FEE0h - SVM Guest VMCB Physical Address */
+ put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
- ret = nested_svm_vmexit(svm);
- if (ret)
- return ret;
+ svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+ svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+ svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
- /*
- * KVM uses VMCB01 to store L1 host state while L2 runs but
- * VMCB01 is going to be used during SMM and thus the state will
- * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
- * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
- * format of the area is identical to guest save area offsetted
- * by 0x400 (matches the offset of 'struct vmcb_save_area'
- * within 'struct vmcb'). Note: HSAVE area may also be used by
- * L1 hypervisor to save additional host context (e.g. KVM does
- * that, see svm_prepare_guest_switch()) which must be
- * preserved.
- */
- if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
- &map_save) == -EINVAL)
- return 1;
+ ret = nested_svm_vmexit(svm);
+ if (ret)
+ return ret;
- BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
+ /*
+ * KVM uses VMCB01 to store L1 host state while L2 runs but
+ * VMCB01 is going to be used during SMM and thus the state will
+ * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
+ * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
+ * format of the area is identical to guest save area offsetted
+ * by 0x400 (matches the offset of 'struct vmcb_save_area'
+ * within 'struct vmcb'). Note: HSAVE area may also be used by
+ * L1 hypervisor to save additional host context (e.g. KVM does
+ * that, see svm_prepare_guest_switch()) which must be
+ * preserved.
+ */
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+ &map_save) == -EINVAL)
+ return 1;
- svm_copy_vmrun_state(map_save.hva + 0x400,
- &svm->vmcb01.ptr->save);
+ BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
- kvm_vcpu_unmap(vcpu, &map_save, true);
- }
+ svm_copy_vmrun_state(map_save.hva + 0x400,
+ &svm->vmcb01.ptr->save);
+
+ kvm_vcpu_unmap(vcpu, &map_save, true);
return 0;
}
@@ -4329,50 +4336,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_host_map map, map_save;
- int ret = 0;
+ u64 saved_efer, vmcb12_gpa;
+ struct vmcb *vmcb12;
+ int ret;
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
- u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
- u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
- u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
- struct vmcb *vmcb12;
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ return 0;
- if (guest) {
- if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
- return 1;
+ /* Non-zero if SMI arrived while vCPU was in guest mode. */
+ if (!GET_SMSTATE(u64, smstate, 0x7ed8))
+ return 0;
- if (!(saved_efer & EFER_SVME))
- return 1;
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+ return 1;
- if (kvm_vcpu_map(vcpu,
- gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
- return 1;
+ saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+ if (!(saved_efer & EFER_SVME))
+ return 1;
- if (svm_allocate_nested(svm))
- return 1;
+ vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
+ return 1;
- vmcb12 = map.hva;
+ ret = 1;
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
+ goto unmap_map;
- nested_load_control_from_vmcb12(svm, &vmcb12->control);
+ if (svm_allocate_nested(svm))
+ goto unmap_save;
- ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
- kvm_vcpu_unmap(vcpu, &map, true);
+ /*
+ * Restore L1 host state from L1 HSAVE area as VMCB01 was
+ * used during SMM (see svm_enter_smm())
+ */
- /*
- * Restore L1 host state from L1 HSAVE area as VMCB01 was
- * used during SMM (see svm_enter_smm())
- */
- if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
- &map_save) == -EINVAL)
- return 1;
+ svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
- svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
- map_save.hva + 0x400);
+ /*
+ * Enter the nested guest now
+ */
- kvm_vcpu_unmap(vcpu, &map_save, true);
- }
- }
+ vmcb12 = map.hva;
+ nested_load_control_from_vmcb12(svm, &vmcb12->control);
+ ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
+unmap_save:
+ kvm_vcpu_unmap(vcpu, &map_save, true);
+unmap_map:
+ kvm_vcpu_unmap(vcpu, &map, true);
return ret;
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 524d943f3efc..5d30db599e10 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -191,7 +191,7 @@ struct vcpu_svm {
/* SEV-ES scratch area support */
void *ghcb_sa;
- u64 ghcb_sa_len;
+ u32 ghcb_sa_len;
bool ghcb_sa_sync;
bool ghcb_sa_free;
@@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
}
-int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
+int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
+ u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index 0dab1b7b529f..ba6f99f584ac 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -353,14 +353,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
switch (msr_index) {
case MSR_IA32_VMX_EXIT_CTLS:
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+ ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
break;
case MSR_IA32_VMX_ENTRY_CTLS:
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
break;
case MSR_IA32_VMX_PROCBASED_CTLS2:
- ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
+ break;
+ case MSR_IA32_VMX_PINBASED_CTLS:
+ ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+ break;
+ case MSR_IA32_VMX_VMFUNC:
+ ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
break;
}
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ccb03d69546c..eedcebf58004 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2583,8 +2583,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* Guest state is invalid and unrestricted guest is disabled,
* which means L1 attempted VMEntry to L2 with invalid state.
* Fail the VMEntry.
+ *
+ * However when force loading the guest state (SMM exit or
+ * loading nested state after migration, it is possible to
+ * have invalid guest state now, which will be later fixed by
+ * restoring L2 register state
*/
- if (CC(!vmx_guest_state_valid(vcpu))) {
+ if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL;
}
@@ -4351,6 +4356,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
vmcs12->vm_exit_msr_load_count))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
+
+ to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
}
static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
@@ -4899,14 +4906,7 @@ out_vmcs02:
return -ENOMEM;
}
-/*
- * Emulate the VMXON instruction.
- * Currently, we just remember that VMX is active, and do not save or even
- * inspect the argument to VMXON (the so-called "VMXON pointer") because we
- * do not currently need to store anything in that guest-allocated memory
- * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
- * argument is different from the VMXON pointer (which the spec says they do).
- */
+/* Emulate the VMXON instruction. */
static int handle_vmon(struct kvm_vcpu *vcpu)
{
int ret;
@@ -5903,6 +5903,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
case EXIT_REASON_VMFUNC:
/* VM functions are emulated through L2->L0 vmexits. */
return true;
+ case EXIT_REASON_BUS_LOCK:
+ /*
+ * At present, bus lock VM exit is never exposed to L1.
+ * Handle L2's bus locks in L0 directly.
+ */
+ return true;
default:
break;
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0c2c0d5ae873..7fb2a3a1ca46 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1323,7 +1323,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
vmx_prepare_switch_to_host(to_vmx(vcpu));
}
-static bool emulation_required(struct kvm_vcpu *vcpu)
+bool vmx_emulation_required(struct kvm_vcpu *vcpu)
{
return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
}
@@ -1367,7 +1367,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vmcs_writel(GUEST_RFLAGS, rflags);
if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
- vmx->emulation_required = emulation_required(vcpu);
+ vmx->emulation_required = vmx_emulation_required(vcpu);
}
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
@@ -1837,10 +1837,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
&msr_info->data))
return 1;
/*
- * Enlightened VMCS v1 doesn't have certain fields, but buggy
- * Hyper-V versions are still trying to use corresponding
- * features when they are exposed. Filter out the essential
- * minimum.
+ * Enlightened VMCS v1 doesn't have certain VMCS fields but
+ * instead of just ignoring the features, different Hyper-V
+ * versions are either trying to use them and fail or do some
+ * sanity checking and refuse to boot. Filter all unsupported
+ * features out.
*/
if (!msr_info->host_initiated &&
vmx->nested.enlightened_vmcs_enabled)
@@ -3077,7 +3078,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
/* depends on vcpu->arch.cr0 to be set to a new value */
- vmx->emulation_required = emulation_required(vcpu);
+ vmx->emulation_required = vmx_emulation_required(vcpu);
}
static int vmx_get_max_tdp_level(void)
@@ -3330,7 +3331,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int
{
__vmx_set_segment(vcpu, var, seg);
- to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
+ to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
}
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -5561,9 +5562,13 @@ static int handle_encls(struct kvm_vcpu *vcpu)
static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
{
- vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
- vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
- return 0;
+ /*
+ * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
+ * VM-Exits. Unconditionally set the flag here and leave the handling to
+ * vmx_handle_exit().
+ */
+ to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
+ return 1;
}
/*
@@ -6050,9 +6055,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
int ret = __vmx_handle_exit(vcpu, exit_fastpath);
/*
- * Even when current exit reason is handled by KVM internally, we
- * still need to exit to user space when bus lock detected to inform
- * that there is a bus lock in guest.
+ * Exit to user space when bus lock detected to inform that there is
+ * a bus lock in guest.
*/
if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
if (ret > 0)
@@ -6621,10 +6625,24 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->soft_vnmi_blocked))
vmx->loaded_vmcs->entry_time = ktime_get();
- /* Don't enter VMX if guest state is invalid, let the exit handler
- start emulation until we arrive back to a valid state */
- if (vmx->emulation_required)
+ /*
+ * Don't enter VMX if guest state is invalid, let the exit handler
+ * start emulation until we arrive back to a valid state. Synthesize a
+ * consistency check VM-Exit due to invalid guest state and bail.
+ */
+ if (unlikely(vmx->emulation_required)) {
+
+ /* We don't emulate invalid state of a nested guest */
+ vmx->fail = is_guest_mode(vcpu);
+
+ vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
+ vmx->exit_reason.failed_vmentry = 1;
+ kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
+ vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
+ kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
+ vmx->exit_intr_info = 0;
return EXIT_FASTPATH_NONE;
+ }
trace_kvm_entry(vcpu);
@@ -6833,7 +6851,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
*/
tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
if (tsx_ctrl)
- vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+ tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
}
err = alloc_loaded_vmcs(&vmx->vmcs01);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 4858c5fd95f2..592217fd7d92 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -248,12 +248,8 @@ struct vcpu_vmx {
* only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
* of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
* be loaded into hardware if those conditions aren't met.
- * nr_active_uret_msrs tracks the number of MSRs that need to be loaded
- * into hardware when running the guest. guest_uret_msrs[] is resorted
- * whenever the number of "active" uret MSRs is modified.
*/
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
- int nr_active_uret_msrs;
bool guest_uret_msrs_loaded;
#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
@@ -359,6 +355,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
unsigned long fs_base, unsigned long gs_base);
int vmx_get_cpl(struct kvm_vcpu *vcpu);
+bool vmx_emulation_required(struct kvm_vcpu *vcpu);
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 28ef14155726..0c8b5129effd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1332,6 +1332,13 @@ static const u32 msrs_to_save_all[] = {
MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
+
+ MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
+ MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
+ MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
+ MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
+ MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
+ MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
};
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
@@ -2969,7 +2976,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
offsetof(struct compat_vcpu_info, time));
if (vcpu->xen.vcpu_time_info_set)
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
- if (v == kvm_get_vcpu(v->kvm, 0))
+ if (!v->vcpu_idx)
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0;
}
@@ -7658,6 +7665,13 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
/* Process a latched INIT or SMI, if any. */
kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ /*
+ * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
+ * on SMM exit we still need to reload them from
+ * guest memory
+ */
+ vcpu->arch.pdptrs_from_userspace = false;
}
kvm_mmu_reset_context(vcpu);
@@ -10652,6 +10666,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
int r;
vcpu->arch.last_vmentry_cpu = -1;
+ vcpu->arch.regs_avail = ~0;
+ vcpu->arch.regs_dirty = ~0;
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -10893,6 +10909,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
kvm_rip_write(vcpu, 0xfff0);
+ vcpu->arch.cr3 = 0;
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
+
/*
* CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions
* of Intel's SDM list CD/NW as being set on INIT, but they contradict
@@ -11139,9 +11158,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
+ int ret;
+
if (type)
return -EINVAL;
+ ret = kvm_page_track_init(kvm);
+ if (ret)
+ return ret;
+
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@@ -11174,7 +11199,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
- kvm_page_track_init(kvm);
kvm_mmu_init_vm(kvm);
kvm_xen_init_vm(kvm);
@@ -11368,7 +11392,8 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
int level = i + 1;
int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
- WARN_ON(slot->arch.rmap[i]);
+ if (slot->arch.rmap[i])
+ continue;
slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
if (!slot->arch.rmap[i]) {
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 058f19b20465..c565def611e2 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -37,10 +37,10 @@
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
+ ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
#define __peek_nbyte_next(t, insn, n) \
- ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
+ ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
#define get_next(t, insn) \
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b2eefdefc108..84a2c8c4af73 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -710,7 +710,8 @@ oops:
static noinline void
kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, int signal, int si_code)
+ unsigned long address, int signal, int si_code,
+ u32 pkey)
{
WARN_ON_ONCE(user_mode(regs));
@@ -735,8 +736,12 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
set_signal_archinfo(address, error_code);
- /* XXX: hwpoison faults will set the wrong code. */
- force_sig_fault(signal, si_code, (void __user *)address);
+ if (si_code == SEGV_PKUERR) {
+ force_sig_pkuerr((void __user *)address, pkey);
+ } else {
+ /* XXX: hwpoison faults will set the wrong code. */
+ force_sig_fault(signal, si_code, (void __user *)address);
+ }
}
/*
@@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
struct task_struct *tsk = current;
if (!user_mode(regs)) {
- kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
+ kernelmode_fixup_or_oops(regs, error_code, address,
+ SIGSEGV, si_code, pkey);
return;
}
@@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
{
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) {
- kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
+ kernelmode_fixup_or_oops(regs, error_code, address,
+ SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
return;
}
@@ -1396,7 +1403,8 @@ good_area:
*/
if (!user_mode(regs))
kernelmode_fixup_or_oops(regs, error_code, address,
- SIGBUS, BUS_ADRERR);
+ SIGBUS, BUS_ADRERR,
+ ARCH_DEFAULT_PKEY);
return;
}
@@ -1416,7 +1424,8 @@ good_area:
return;
if (fatal_signal_pending(current) && !user_mode(regs)) {
- kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
+ kernelmode_fixup_or_oops(regs, error_code, address,
+ 0, 0, ARCH_DEFAULT_PKEY);
return;
}
@@ -1424,7 +1433,8 @@ good_area:
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address,
- SIGSEGV, SEGV_MAPERR);
+ SIGSEGV, SEGV_MAPERR,
+ ARCH_DEFAULT_PKEY);
return;
}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a6e11763763f..36098226a957 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
return 0;
p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d))
+ if (!p4d_present(*p4d))
return 0;
pud = pud_offset(p4d, addr);
- if (pud_none(*pud))
+ if (!pud_present(*pud))
return 0;
if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ if (!pmd_present(*pmd))
return 0;
if (pmd_large(*pmd))
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 3112ca7786ed..4ba2a3ee4bce 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
int err = 0;
start = sanitize_phys(start);
- end = sanitize_phys(end);
+
+ /*
+ * The end address passed into this function is exclusive, but
+ * sanitize_phys() expects an inclusive address.
+ */
+ end = sanitize_phys(end - 1) + 1;
if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type));
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 20d2d6a1f9de..576ef1a6954a 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1334,9 +1334,10 @@ st: if (is_imm8(insn->off))
if (insn->imm == (BPF_AND | BPF_FETCH) ||
insn->imm == (BPF_OR | BPF_FETCH) ||
insn->imm == (BPF_XOR | BPF_FETCH)) {
- u8 *branch_target;
bool is64 = BPF_SIZE(insn->code) == BPF_DW;
u32 real_src_reg = src_reg;
+ u32 real_dst_reg = dst_reg;
+ u8 *branch_target;
/*
* Can't be implemented with a single x86 insn.
@@ -1347,11 +1348,13 @@ st: if (is_imm8(insn->off))
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
if (src_reg == BPF_REG_0)
real_src_reg = BPF_REG_AX;
+ if (dst_reg == BPF_REG_0)
+ real_dst_reg = BPF_REG_AX;
branch_target = prog;
/* Load old value */
emit_ldx(&prog, BPF_SIZE(insn->code),
- BPF_REG_0, dst_reg, insn->off);
+ BPF_REG_0, real_dst_reg, insn->off);
/*
* Perform the (commutative) operation locally,
* put the result in the AUX_REG.
@@ -1362,7 +1365,8 @@ st: if (is_imm8(insn->off))
add_2reg(0xC0, AUX_REG, real_src_reg));
/* Attempt to swap in new value */
err = emit_atomic(&prog, BPF_CMPXCHG,
- dst_reg, AUX_REG, insn->off,
+ real_dst_reg, AUX_REG,
+ insn->off,
BPF_SIZE(insn->code));
if (WARN_ON(err))
return err;
@@ -1376,11 +1380,10 @@ st: if (is_imm8(insn->off))
/* Restore R0 after clobbering RAX */
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
break;
-
}
err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
- insn->off, BPF_SIZE(insn->code));
+ insn->off, BPF_SIZE(insn->code));
if (err)
return err;
break;
@@ -1737,7 +1740,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
}
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
- struct bpf_prog *p, int stack_size, bool mod_ret)
+ struct bpf_prog *p, int stack_size, bool save_ret)
{
u8 *prog = *pprog;
u8 *jmp_insn;
@@ -1770,11 +1773,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
if (emit_call(&prog, p->bpf_func, prog))
return -EINVAL;
- /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
+ /*
+ * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
* of the previous call which is then passed on the stack to
* the next BPF program.
+ *
+ * BPF_TRAMP_FENTRY trampoline may need to return the return
+ * value of BPF_PROG_TYPE_STRUCT_OPS prog.
*/
- if (mod_ret)
+ if (save_ret)
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
/* replace 2 nops with JE insn, since jmp target is known */
@@ -1821,13 +1828,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
}
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
- struct bpf_tramp_progs *tp, int stack_size)
+ struct bpf_tramp_progs *tp, int stack_size,
+ bool save_ret)
{
int i;
u8 *prog = *pprog;
for (i = 0; i < tp->nr_progs; i++) {
- if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
+ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
+ save_ret))
return -EINVAL;
}
*pprog = prog;
@@ -1870,6 +1879,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
return 0;
}
+static bool is_valid_bpf_tramp_flags(unsigned int flags)
+{
+ if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
+ (flags & BPF_TRAMP_F_SKIP_FRAME))
+ return false;
+
+ /*
+ * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
+ * and it must be used alone.
+ */
+ if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
+ (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
+ return false;
+
+ return true;
+}
+
/* Example:
* __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
* its 'struct btf_func_model' will be nr_args=2
@@ -1942,17 +1968,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
u8 **branches = NULL;
u8 *prog;
+ bool save_ret;
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
if (nr_args > 6)
return -ENOTSUPP;
- if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
- (flags & BPF_TRAMP_F_SKIP_FRAME))
+ if (!is_valid_bpf_tramp_flags(flags))
return -EINVAL;
- if (flags & BPF_TRAMP_F_CALL_ORIG)
- stack_size += 8; /* room for return value of orig_call */
+ /* room for return value of orig_call or fentry prog */
+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+ if (save_ret)
+ stack_size += 8;
if (flags & BPF_TRAMP_F_IP_ARG)
stack_size += 8; /* room for IP address argument */
@@ -1998,7 +2026,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
}
if (fentry->nr_progs)
- if (invoke_bpf(m, &prog, fentry, stack_size))
+ if (invoke_bpf(m, &prog, fentry, stack_size,
+ flags & BPF_TRAMP_F_RET_FENTRY_RET))
return -EINVAL;
if (fmod_ret->nr_progs) {
@@ -2045,7 +2074,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
}
if (fexit->nr_progs)
- if (invoke_bpf(m, &prog, fexit, stack_size)) {
+ if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
ret = -EINVAL;
goto cleanup;
}
@@ -2065,9 +2094,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
ret = -EINVAL;
goto cleanup;
}
- /* restore original return value back into RAX */
- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
}
+ /* restore return value of orig_call or fentry prog back into RAX */
+ if (save_ret)
+ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
EMIT1(0x5B); /* pop rbx */
EMIT1(0xC9); /* leave */
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 3d41a09c2c14..5debe4ac6f81 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -113,7 +113,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
false /* no mapping of GSI to PIRQ */);
}
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
static int xen_register_gsi(u32 gsi, int triggering, int polarity)
{
int rc, irq;
@@ -261,7 +261,7 @@ error:
return irq;
}
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
static bool __read_mostly pci_seg_supported = true;
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -375,10 +375,10 @@ static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
}
}
-#else /* CONFIG_XEN_DOM0 */
+#else /* CONFIG_XEN_PV_DOM0 */
#define xen_initdom_setup_msi_irqs NULL
#define xen_initdom_restore_msi_irqs NULL
-#endif /* !CONFIG_XEN_DOM0 */
+#endif /* !CONFIG_XEN_PV_DOM0 */
static void xen_teardown_msi_irqs(struct pci_dev *dev)
{
@@ -555,7 +555,7 @@ int __init pci_xen_hvm_init(void)
return 0;
}
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
int __init pci_xen_initial_domain(void)
{
int irq;
@@ -583,6 +583,9 @@ int __init pci_xen_initial_domain(void)
}
return 0;
}
+#endif
+
+#ifdef CONFIG_XEN_DOM0
struct xen_device_domain_owner {
domid_t domain;
@@ -656,4 +659,4 @@ int xen_unregister_device_domain_owner(struct pci_dev *dev)
return 0;
}
EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
-#endif
+#endif /* CONFIG_XEN_DOM0 */
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index ee2beda590d0..1d4a00e767ec 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -274,7 +274,7 @@ static struct olpc_ec_driver ec_xo1_driver = {
static struct olpc_ec_driver ec_xo1_5_driver = {
.ec_cmd = olpc_xo1_ec_cmd,
-#ifdef CONFIG_OLPC_XO1_5_SCI
+#ifdef CONFIG_OLPC_XO15_SCI
/*
* XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
* compiled in
diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c
index 9ac7457f52a3..ed0442e35434 100644
--- a/arch/x86/platform/pvh/enlighten.c
+++ b/arch/x86/platform/pvh/enlighten.c
@@ -16,15 +16,15 @@
/*
* PVH variables.
*
- * pvh_bootparams and pvh_start_info need to live in the data segment since
+ * pvh_bootparams and pvh_start_info need to live in a data segment since
* they are used after startup_{32|64}, which clear .bss, are invoked.
*/
-struct boot_params pvh_bootparams __section(".data");
-struct hvm_start_info pvh_start_info __section(".data");
+struct boot_params __initdata pvh_bootparams;
+struct hvm_start_info __initdata pvh_start_info;
-unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
+const unsigned int __initconst pvh_start_info_sz = sizeof(pvh_start_info);
-static u64 pvh_get_root_pointer(void)
+static u64 __init pvh_get_root_pointer(void)
{
return pvh_start_info.rsdp_paddr;
}
@@ -107,7 +107,7 @@ void __init __weak xen_pvh_init(struct boot_params *boot_params)
BUG();
}
-static void hypervisor_specific_init(bool xen_guest)
+static void __init hypervisor_specific_init(bool xen_guest)
{
if (xen_guest)
xen_pvh_init(&pvh_bootparams);
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index afc1da68b06d..6bcd3d8ca6ac 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -43,13 +43,9 @@ config XEN_PV_SMP
def_bool y
depends on XEN_PV && SMP
-config XEN_DOM0
- bool "Xen PV Dom0 support"
- default y
- depends on XEN_PV && PCI_XEN && SWIOTLB_XEN
- depends on X86_IO_APIC && ACPI && PCI
- help
- Support running as a Xen PV Dom0 guest.
+config XEN_PV_DOM0
+ def_bool y
+ depends on XEN_PV && XEN_DOM0
config XEN_PVHVM
def_bool y
@@ -86,3 +82,12 @@ config XEN_PVH
def_bool n
help
Support for running as a Xen PVH guest.
+
+config XEN_DOM0
+ bool "Xen Dom0 support"
+ default XEN_PV
+ depends on (XEN_PV && SWIOTLB_XEN) || (XEN_PVH && X86_64)
+ depends on X86_IO_APIC && ACPI && PCI
+ select X86_X2APIC if XEN_PVH && X86_64
+ help
+ Support running as a Xen Dom0 guest.
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 40b5779fce21..4953260e281c 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -45,7 +45,7 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_XEN_DOM0) += vga.o
+obj-$(CONFIG_XEN_PV_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c79bd0af2e8c..95d970359e17 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -3,6 +3,7 @@
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
#include <linux/memblock.h>
#endif
+#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/slab.h>
@@ -10,12 +11,15 @@
#include <xen/xen.h>
#include <xen/features.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/version.h>
#include <xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <asm/cpu.h>
#include <asm/e820/api.h>
+#include <asm/setup.h>
#include "xen-ops.h"
#include "smp.h"
@@ -52,9 +56,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
-enum xen_domain_type xen_domain_type = XEN_NATIVE;
-EXPORT_SYMBOL_GPL(xen_domain_type);
-
unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
EXPORT_SYMBOL(machine_to_phys_mapping);
unsigned long machine_to_phys_nr;
@@ -69,10 +70,12 @@ __read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/*
- * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
- * before clearing the bss.
+ * NB: These need to live in .data or alike because they're used by
+ * xen_prepare_pvh() which runs before clearing the bss.
*/
-uint32_t xen_start_flags __section(".data") = 0;
+enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
+EXPORT_SYMBOL_GPL(xen_domain_type);
+uint32_t __ro_after_init xen_start_flags;
EXPORT_SYMBOL(xen_start_flags);
/*
@@ -258,6 +261,45 @@ int xen_vcpu_setup(int cpu)
return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
}
+void __init xen_banner(void)
+{
+ unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ struct xen_extraversion extra;
+
+ HYPERVISOR_xen_version(XENVER_extraversion, &extra);
+
+ pr_info("Booting kernel on %s\n", pv_info.name);
+ pr_info("Xen version: %u.%u%s%s\n",
+ version >> 16, version & 0xffff, extra.extraversion,
+ xen_feature(XENFEAT_mmu_pt_update_preserve_ad)
+ ? " (preserve-AD)" : "");
+}
+
+/* Check if running on Xen version (major, minor) or later */
+bool xen_running_on_version_or_later(unsigned int major, unsigned int minor)
+{
+ unsigned int version;
+
+ if (!xen_domain())
+ return false;
+
+ version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
+ ((version >> 16) > major))
+ return true;
+ return false;
+}
+
+void __init xen_add_preferred_consoles(void)
+{
+ add_preferred_console("xenboot", 0, NULL);
+ if (!boot_params.screen_info.orig_video_isVGA)
+ add_preferred_console("tty", 0, NULL);
+ add_preferred_console("hvc", 0, NULL);
+ if (boot_params.screen_info.orig_video_isVGA)
+ add_preferred_console("tty", 0, NULL);
+}
+
void xen_reboot(int reason)
{
struct sched_shutdown r = { .reason = reason };
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 753f63734c13..a7b7d674f500 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -28,7 +28,6 @@
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/highmem.h>
-#include <linux/console.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/edd.h>
@@ -109,17 +108,6 @@ struct tls_descs {
*/
static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
-static void __init xen_banner(void)
-{
- unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
- struct xen_extraversion extra;
- HYPERVISOR_xen_version(XENVER_extraversion, &extra);
-
- pr_info("Booting paravirtualized kernel on %s\n", pv_info.name);
- pr_info("Xen version: %d.%d%s (preserve-AD)\n",
- version >> 16, version & 0xffff, extra.extraversion);
-}
-
static void __init xen_pv_init_platform(void)
{
populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
@@ -142,22 +130,6 @@ static void __init xen_pv_guest_late_init(void)
#endif
}
-/* Check if running on Xen version (major, minor) or later */
-bool
-xen_running_on_version_or_later(unsigned int major, unsigned int minor)
-{
- unsigned int version;
-
- if (!xen_domain())
- return false;
-
- version = HYPERVISOR_xen_version(XENVER_version, NULL);
- if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
- ((version >> 16) > major))
- return true;
- return false;
-}
-
static __read_mostly unsigned int cpuid_leaf5_ecx_val;
static __read_mostly unsigned int cpuid_leaf5_edx_val;
@@ -755,8 +727,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
preempt_enable();
}
-static void xen_convert_trap_info(const struct desc_ptr *desc,
- struct trap_info *traps)
+static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
+ struct trap_info *traps, bool full)
{
unsigned in, out, count;
@@ -766,17 +738,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
for (in = out = 0; in < count; in++) {
gate_desc *entry = (gate_desc *)(desc->address) + in;
- if (cvt_gate_to_trap(in, entry, &traps[out]))
+ if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
out++;
}
- traps[out].address = 0;
+
+ return out;
}
void xen_copy_trap_info(struct trap_info *traps)
{
const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
- xen_convert_trap_info(desc, traps);
+ xen_convert_trap_info(desc, traps, true);
}
/* Load a new IDT into Xen. In principle this can be per-CPU, so we
@@ -786,6 +759,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
{
static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257];
+ unsigned out;
trace_xen_cpu_load_idt(desc);
@@ -793,7 +767,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
- xen_convert_trap_info(desc, traps);
+ out = xen_convert_trap_info(desc, traps, false);
+ memset(&traps[out], 0, sizeof(traps[0]));
xen_mc_flush();
if (HYPERVISOR_set_trap_table(traps))
@@ -1214,6 +1189,11 @@ static void __init xen_dom0_set_legacy_features(void)
x86_platform.legacy.rtc = 1;
}
+static void __init xen_domu_set_legacy_features(void)
+{
+ x86_platform.legacy.rtc = 0;
+}
+
/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void)
{
@@ -1356,9 +1336,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
if (!xen_initial_domain()) {
- add_preferred_console("xenboot", 0, NULL);
if (pci_xen)
x86_init.pci.arch_init = pci_xen_init;
+ x86_platform.set_legacy_features =
+ xen_domu_set_legacy_features;
} else {
const struct dom0_vga_console_info *info =
(void *)((char *)xen_start_info +
@@ -1399,11 +1380,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
#endif
}
- if (!boot_params.screen_info.orig_video_isVGA)
- add_preferred_console("tty", 0, NULL);
- add_preferred_console("hvc", 0, NULL);
- if (boot_params.screen_info.orig_video_isVGA)
- add_preferred_console("tty", 0, NULL);
+ xen_add_preferred_consoles();
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 0d5e34b9e6f9..bcae606bbc5c 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
+#include <linux/export.h>
#include <xen/hvc-console.h>
@@ -18,10 +19,11 @@
/*
* PVH variables.
*
- * The variable xen_pvh needs to live in the data segment since it is used
+ * The variable xen_pvh needs to live in a data segment since it is used
* after startup_{32|64} is invoked, which will clear the .bss segment.
*/
-bool xen_pvh __section(".data") = 0;
+bool __ro_after_init xen_pvh;
+EXPORT_SYMBOL_GPL(xen_pvh);
void __init xen_pvh_init(struct boot_params *boot_params)
{
@@ -36,6 +38,10 @@ void __init xen_pvh_init(struct boot_params *boot_params)
pfn = __pa(hypercall_page);
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+ if (xen_initial_domain())
+ x86_init.oem.arch_setup = xen_add_preferred_consoles;
+ x86_init.oem.banner = xen_banner;
+
xen_efi_init(boot_params);
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 1df5f01529e5..3359c23573c5 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
if (pinned) {
struct page *page = pfn_to_page(pfn);
- if (static_branch_likely(&xen_struct_pages_ready))
+ pinned = false;
+ if (static_branch_likely(&xen_struct_pages_ready)) {
+ pinned = PagePinned(page);
SetPagePinned(page);
+ }
xen_mc_batch();
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU);
@@ -2395,7 +2398,7 @@ static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages)
+ unsigned int domid, bool no_translate)
{
int err = 0;
struct remap_data rmd;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 54f9aa7e8457..46df59aeaa06 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -18,7 +18,7 @@
#endif
#include <linux/export.h>
-int xen_swiotlb __read_mostly;
+static int xen_swiotlb __read_mostly;
/*
* pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
@@ -56,7 +56,7 @@ int __init pci_xen_swiotlb_detect(void)
return xen_swiotlb;
}
-void __init pci_xen_swiotlb_init(void)
+static void __init pci_xen_swiotlb_init(void)
{
if (xen_swiotlb) {
xen_swiotlb_init_early();
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 96afadf9878e..7ed56c6075b0 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -290,8 +290,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
gdt = get_cpu_gdt_rw(cpu);
- memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
-
/*
* Bring up the CPU in cpu_bringup_and_idle() with the stack
* pointing just below where pt_regs would be if it were a normal
@@ -308,8 +306,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
xen_copy_trap_info(ctxt->trap_ctxt);
- ctxt->ldt_ents = 0;
-
BUG_ON((unsigned long)gdt & ~PAGE_MASK);
gdt_mfn = arbitrary_virt_to_mfn(gdt);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 8d7ec49a35fb..8bc8b72a205d 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -51,6 +51,7 @@ void __init xen_remap_memory(void);
phys_addr_t __init xen_find_free_area(phys_addr_t size);
char * __init xen_memory_setup(void);
void __init xen_arch_setup(void);
+void xen_banner(void);
void xen_enable_sysenter(void);
void xen_enable_syscall(void);
void xen_vcpu_restore(void);
@@ -109,7 +110,7 @@ static inline void xen_uninit_lock_cpu(int cpu)
struct dom0_vga_console_info;
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
@@ -118,6 +119,8 @@ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
}
#endif
+void xen_add_preferred_consoles(void);
+
void __init xen_init_apic(void);
#ifdef CONFIG_XEN_EFI
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 7cbf68ca7106..6fc05cba61a2 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -78,7 +78,7 @@
#endif
#define XCHAL_KIO_SIZE 0x10000000
-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#ifndef __ASSEMBLY__
extern unsigned long xtensa_kio_paddr;
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 764b54bef701..15051a8a1539 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -143,7 +143,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq)
void __init init_IRQ(void)
{
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
irqchip_init();
#else
#ifdef CONFIG_HAVE_SMP
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index ed184106e4cf..ee9082a142fe 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -63,7 +63,7 @@ extern unsigned long initrd_end;
extern int initrd_below_start_ok;
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
void *dtb_start = __dtb_start;
#endif
@@ -125,7 +125,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
#endif /* CONFIG_BLK_DEV_INITRD */
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
static int __init parse_tag_fdt(const bp_tag_t *tag)
{
@@ -135,7 +135,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
__tagtable(BP_TAG_FDT, parse_tag_fdt);
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
static int __init parse_tag_cmdline(const bp_tag_t* tag)
{
@@ -183,7 +183,7 @@ static int __init parse_bootparam(const bp_tag_t *tag)
}
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -232,7 +232,7 @@ void __init early_init_devtree(void *params)
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
}
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
/*
* Initialize architecture. (Early stage)
@@ -253,7 +253,7 @@ void __init init_arch(bp_tag_t *bp_start)
if (bp_start)
parse_bootparam(bp_start);
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
early_init_devtree(dtb_start);
#endif
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 7e4d97dc8bd8..38acda4f04e8 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -101,7 +101,7 @@ void init_mmu(void)
void init_kio(void)
{
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
/*
* Update the IO area mapping in case xtensa_kio_paddr has changed
*/
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 4986226a5ab2..8b806d305948 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -467,7 +467,7 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(hwaddr->sa_data))
return -EADDRNOTAVAIL;
spin_lock_bh(&lp->lock);
- memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, hwaddr->sa_data);
spin_unlock_bh(&lp->lock);
return 0;
}
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 4f7d6142d41f..538e6748e85a 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -51,8 +51,12 @@ void platform_power_off(void)
void platform_restart(void)
{
- /* Flush and reset the mmu, simulate a processor reset, and
- * jump to the reset vector. */
+ /* Try software reset first. */
+ WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
+
+ /* If software reset did not work, flush and reset the mmu,
+ * simulate a processor reset, and jump to the reset vector.
+ */
cpu_reset();
/* control never gets here */
}
@@ -66,7 +70,7 @@ void __init platform_calibrate_ccount(void)
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
static void __init xtfpga_clk_setup(struct device_node *np)
{
@@ -284,4 +288,4 @@ static int __init xtavnet_init(void)
*/
arch_initcall(xtavnet_init);
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
diff --git a/block/bdev.c b/block/bdev.c
index cf2780cb44a7..485a258b0ab3 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -490,7 +490,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
bdev = I_BDEV(inode);
mutex_init(&bdev->bd_fsfreeze_mutex);
spin_lock_init(&bdev->bd_size_lock);
- bdev->bd_disk = disk;
bdev->bd_partno = partno;
bdev->bd_inode = inode;
bdev->bd_stats = alloc_percpu(struct disk_stats);
@@ -498,6 +497,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
iput(inode);
return NULL;
}
+ bdev->bd_disk = disk;
return bdev;
}
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index e2f14508f2d6..85b8e1c3a762 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -666,6 +666,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
bfqg_and_blkg_put(bfqq_group(bfqq));
+ if (entity->parent &&
+ entity->parent->last_bfqq_created == bfqq)
+ entity->parent->last_bfqq_created = NULL;
+ else if (bfqd->last_bfqq_created == bfqq)
+ bfqd->last_bfqq_created = NULL;
+
entity->parent = bfqg->my_entity;
entity->sched_data = &bfqg->sched_data;
/* pin down bfqg and its associated blkg */
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index dd13c2bbc29c..480e1a134859 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2662,15 +2662,6 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
* are likely to increase the throughput.
*/
bfqq->new_bfqq = new_bfqq;
- /*
- * The above assignment schedules the following redirections:
- * each time some I/O for bfqq arrives, the process that
- * generated that I/O is disassociated from bfqq and
- * associated with new_bfqq. Here we increases new_bfqq->ref
- * in advance, adding the number of processes that are
- * expected to be associated with new_bfqq as they happen to
- * issue I/O.
- */
new_bfqq->ref += process_refs;
return new_bfqq;
}
@@ -2733,10 +2724,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{
struct bfq_queue *in_service_bfqq, *new_bfqq;
- /* if a merge has already been setup, then proceed with that first */
- if (bfqq->new_bfqq)
- return bfqq->new_bfqq;
-
/*
* Check delayed stable merge for rotational or non-queueing
* devs. For this branch to be executed, bfqq must not be
@@ -2838,6 +2825,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (bfq_too_late_for_merging(bfqq))
return NULL;
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
return NULL;
diff --git a/block/bio.c b/block/bio.c
index 5df3dd282e40..a6fb6a0b4295 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1466,7 +1466,7 @@ again:
if (!bio_integrity_endio(bio))
return;
- if (bio->bi_bdev)
+ if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3c88a79a319b..38b9f7684952 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded)
radix_tree_preload_end();
- ret = blk_iolatency_init(q);
- if (ret)
- goto err_destroy_all;
-
ret = blk_ioprio_init(q);
if (ret)
goto err_destroy_all;
@@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
if (ret)
goto err_destroy_all;
+ ret = blk_iolatency_init(q);
+ if (ret) {
+ blk_throtl_exit(q);
+ goto err_destroy_all;
+ }
+
return 0;
err_destroy_all:
@@ -1364,10 +1366,14 @@ enomem:
/* alloc failed, nothing's initialized yet, free everything */
spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
+
+ spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) {
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
+ spin_unlock(&blkcg->lock);
}
spin_unlock_irq(&q->queue_lock);
ret = -ENOMEM;
@@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
__clear_bit(pol->plid, q->blkcg_pols);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
+
+ spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) {
if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[pol->plid]);
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
+ spin_unlock(&blkcg->lock);
}
spin_unlock_irq(&q->queue_lock);
diff --git a/block/blk-core.c b/block/blk-core.c
index 5454db2fa263..4d8f5fe91588 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -49,7 +49,6 @@
#include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-pm.h"
-#include "blk-rq-qos.h"
struct dentry *blk_debugfs_root;
@@ -337,23 +336,25 @@ void blk_put_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_put_queue);
-void blk_set_queue_dying(struct request_queue *q)
+void blk_queue_start_drain(struct request_queue *q)
{
- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
-
/*
* When queue DYING flag is set, we need to block new req
* entering queue, so we call blk_freeze_queue_start() to
* prevent I/O from crossing blk_queue_enter().
*/
blk_freeze_queue_start(q);
-
if (queue_is_mq(q))
blk_mq_wake_waiters(q);
-
/* Make blk_queue_enter() reexamine the DYING flag. */
wake_up_all(&q->mq_freeze_wq);
}
+
+void blk_set_queue_dying(struct request_queue *q)
+{
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+ blk_queue_start_drain(q);
+}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
/**
@@ -385,13 +386,8 @@ void blk_cleanup_queue(struct request_queue *q)
*/
blk_freeze_queue(q);
- rq_qos_exit(q);
-
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
- /* for synchronous bio-based driver finish in-flight integrity i/o */
- blk_flush_integrity();
-
blk_sync_queue(q);
if (queue_is_mq(q))
blk_mq_exit_queue(q);
@@ -416,6 +412,30 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);
+static bool blk_try_enter_queue(struct request_queue *q, bool pm)
+{
+ rcu_read_lock();
+ if (!percpu_ref_tryget_live(&q->q_usage_counter))
+ goto fail;
+
+ /*
+ * The code that increments the pm_only counter must ensure that the
+ * counter is globally visible before the queue is unfrozen.
+ */
+ if (blk_queue_pm_only(q) &&
+ (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
+ goto fail_put;
+
+ rcu_read_unlock();
+ return true;
+
+fail_put:
+ percpu_ref_put(&q->q_usage_counter);
+fail:
+ rcu_read_unlock();
+ return false;
+}
+
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
@@ -425,40 +445,18 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
const bool pm = flags & BLK_MQ_REQ_PM;
- while (true) {
- bool success = false;
-
- rcu_read_lock();
- if (percpu_ref_tryget_live(&q->q_usage_counter)) {
- /*
- * The code that increments the pm_only counter is
- * responsible for ensuring that that counter is
- * globally visible before the queue is unfrozen.
- */
- if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
- !blk_queue_pm_only(q)) {
- success = true;
- } else {
- percpu_ref_put(&q->q_usage_counter);
- }
- }
- rcu_read_unlock();
-
- if (success)
- return 0;
-
+ while (!blk_try_enter_queue(q, pm)) {
if (flags & BLK_MQ_REQ_NOWAIT)
return -EBUSY;
/*
- * read pair of barrier in blk_freeze_queue_start(),
- * we need to order reading __PERCPU_REF_DEAD flag of
- * .q_usage_counter and reading .mq_freeze_depth or
- * queue dying flag, otherwise the following wait may
- * never return if the two reads are reordered.
+ * read pair of barrier in blk_freeze_queue_start(), we need to
+ * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
+ * reading .mq_freeze_depth or queue dying flag, otherwise the
+ * following wait may never return if the two reads are
+ * reordered.
*/
smp_rmb();
-
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
blk_pm_resume_queue(pm, q)) ||
@@ -466,23 +464,43 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
if (blk_queue_dying(q))
return -ENODEV;
}
+
+ return 0;
}
static inline int bio_queue_enter(struct bio *bio)
{
- struct request_queue *q = bio->bi_bdev->bd_disk->queue;
- bool nowait = bio->bi_opf & REQ_NOWAIT;
- int ret;
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ struct request_queue *q = disk->queue;
- ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
- if (unlikely(ret)) {
- if (nowait && !blk_queue_dying(q))
+ while (!blk_try_enter_queue(q, false)) {
+ if (bio->bi_opf & REQ_NOWAIT) {
+ if (test_bit(GD_DEAD, &disk->state))
+ goto dead;
bio_wouldblock_error(bio);
- else
- bio_io_error(bio);
+ return -EBUSY;
+ }
+
+ /*
+ * read pair of barrier in blk_freeze_queue_start(), we need to
+ * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
+ * reading .mq_freeze_depth or queue dying flag, otherwise the
+ * following wait may never return if the two reads are
+ * reordered.
+ */
+ smp_rmb();
+ wait_event(q->mq_freeze_wq,
+ (!q->mq_freeze_depth &&
+ blk_pm_resume_queue(false, q)) ||
+ test_bit(GD_DEAD, &disk->state));
+ if (test_bit(GD_DEAD, &disk->state))
+ goto dead;
}
- return ret;
+ return 0;
+dead:
+ bio_io_error(bio);
+ return -ENODEV;
}
void blk_queue_exit(struct request_queue *q)
@@ -899,11 +917,18 @@ static blk_qc_t __submit_bio(struct bio *bio)
struct gendisk *disk = bio->bi_bdev->bd_disk;
blk_qc_t ret = BLK_QC_T_NONE;
- if (blk_crypto_bio_prep(&bio)) {
- if (!disk->fops->submit_bio)
- return blk_mq_submit_bio(bio);
+ if (unlikely(bio_queue_enter(bio) != 0))
+ return BLK_QC_T_NONE;
+
+ if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
+ goto queue_exit;
+ if (disk->fops->submit_bio) {
ret = disk->fops->submit_bio(bio);
+ goto queue_exit;
}
+ return blk_mq_submit_bio(bio);
+
+queue_exit:
blk_queue_exit(disk->queue);
return ret;
}
@@ -941,9 +966,6 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct bio_list lower, same;
- if (unlikely(bio_queue_enter(bio) != 0))
- continue;
-
/*
* Create a fresh bio_list for all subordinate requests.
*/
@@ -979,23 +1001,12 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
{
struct bio_list bio_list[2] = { };
- blk_qc_t ret = BLK_QC_T_NONE;
+ blk_qc_t ret;
current->bio_list = bio_list;
do {
- struct gendisk *disk = bio->bi_bdev->bd_disk;
-
- if (unlikely(bio_queue_enter(bio) != 0))
- continue;
-
- if (!blk_crypto_bio_prep(&bio)) {
- blk_queue_exit(disk->queue);
- ret = BLK_QC_T_NONE;
- continue;
- }
-
- ret = blk_mq_submit_bio(bio);
+ ret = __submit_bio(bio);
} while ((bio = bio_list_pop(&bio_list[0])));
current->bio_list = NULL;
@@ -1013,9 +1024,6 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
*/
blk_qc_t submit_bio_noacct(struct bio *bio)
{
- if (!submit_bio_checks(bio))
- return BLK_QC_T_NONE;
-
/*
* We only want one ->submit_bio to be active at a time, else stack
* usage with stacked devices could be a problem. Use current->bio_list
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 69a12177dfb6..16d5d5338392 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
*/
void blk_integrity_unregister(struct gendisk *disk)
{
+ struct blk_integrity *bi = &disk->queue->integrity;
+
+ if (!bi->profile)
+ return;
+
+ /* ensure all bios are off the integrity workqueue */
+ blk_flush_integrity();
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
- memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+ memset(bi, 0, sizeof(*bi));
}
EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 4b66d2776eda..3b38d15723de 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(PCI_P2PDMA),
QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
+ QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(NOWAIT),
};
#undef QUEUE_FLAG_NAME
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 86f87346232a..ff5caeb82542 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
spin_lock_irqsave(&tags->lock, flags);
rq = tags->rqs[bitnr];
- if (!rq || !refcount_inc_not_zero(&rq->ref))
+ if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
rq = NULL;
spin_unlock_irqrestore(&tags->lock, flags);
return rq;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 108a352051be..bc026372de43 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -188,9 +188,11 @@ void blk_mq_freeze_queue(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
-void blk_mq_unfreeze_queue(struct request_queue *q)
+void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
{
mutex_lock(&q->mq_freeze_lock);
+ if (force_atomic)
+ q->q_usage_counter.data->force_atomic = true;
q->mq_freeze_depth--;
WARN_ON_ONCE(q->mq_freeze_depth < 0);
if (!q->mq_freeze_depth) {
@@ -199,6 +201,11 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
}
mutex_unlock(&q->mq_freeze_lock);
}
+
+void blk_mq_unfreeze_queue(struct request_queue *q)
+{
+ __blk_mq_unfreeze_queue(q, false);
+}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
/*
diff --git a/block/blk.h b/block/blk.h
index 7d2a0ba7ed21..6c3c00a8fe19 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -51,6 +51,8 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
void blk_free_flush_queue(struct blk_flush_queue *q);
void blk_freeze_queue(struct request_queue *q);
+void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
+void blk_queue_start_drain(struct request_queue *q);
#define BIO_INLINE_VECS 4
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
diff --git a/block/bsg.c b/block/bsg.c
index 351095193788..882f56bff14f 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -165,13 +165,20 @@ static const struct file_operations bsg_fops = {
.llseek = default_llseek,
};
+static void bsg_device_release(struct device *dev)
+{
+ struct bsg_device *bd = container_of(dev, struct bsg_device, device);
+
+ ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+ kfree(bd);
+}
+
void bsg_unregister_queue(struct bsg_device *bd)
{
if (bd->queue->kobj.sd)
sysfs_remove_link(&bd->queue->kobj, "bsg");
cdev_device_del(&bd->cdev, &bd->device);
- ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
- kfree(bd);
+ put_device(&bd->device);
}
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
@@ -193,11 +200,13 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
if (ret < 0) {
if (ret == -ENOSPC)
dev_err(parent, "bsg: too many bsg devices\n");
- goto out_kfree;
+ kfree(bd);
+ return ERR_PTR(ret);
}
bd->device.devt = MKDEV(bsg_major, ret);
bd->device.class = bsg_class;
bd->device.parent = parent;
+ bd->device.release = bsg_device_release;
dev_set_name(&bd->device, "%s", name);
device_initialize(&bd->device);
@@ -205,7 +214,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
bd->cdev.owner = THIS_MODULE;
ret = cdev_device_add(&bd->cdev, &bd->device);
if (ret)
- goto out_ida_remove;
+ goto out_put_device;
if (q->kobj.sd) {
ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
@@ -217,10 +226,8 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
out_device_del:
cdev_device_del(&bd->cdev, &bd->device);
-out_ida_remove:
- ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
-out_kfree:
- kfree(bd);
+out_put_device:
+ put_device(&bd->device);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(bsg_register_queue);
diff --git a/block/fops.c b/block/fops.c
index ffce6f6c68dd..1e970c247e0e 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -14,6 +14,7 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/falloc.h>
#include <linux/suspend.h>
+#include <linux/fs.h>
#include "blk.h"
static struct inode *bdev_file_inode(struct file *file)
@@ -553,7 +554,8 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
static long blkdev_fallocate(struct file *file, int mode, loff_t start,
loff_t len)
{
- struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+ struct inode *inode = bdev_file_inode(file);
+ struct block_device *bdev = I_BDEV(inode);
loff_t end = start + len - 1;
loff_t isize;
int error;
@@ -580,10 +582,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
return -EINVAL;
+ filemap_invalidate_lock(inode->i_mapping);
+
/* Invalidate the page cache, including dirty pages. */
error = truncate_bdev_range(bdev, file->f_mode, start, end);
if (error)
- return error;
+ goto fail;
switch (mode) {
case FALLOC_FL_ZERO_RANGE:
@@ -600,17 +604,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
GFP_KERNEL, 0);
break;
default:
- return -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
}
- if (error)
- return error;
- /*
- * Invalidate the page cache again; if someone wandered in and dirtied
- * a page, we just discard it - userspace has no way of knowing whether
- * the write happened before or after discard completing...
- */
- return truncate_bdev_range(bdev, file->f_mode, start, end);
+ fail:
+ filemap_invalidate_unlock(inode->i_mapping);
+ return error;
}
const struct file_operations def_blk_fops = {
diff --git a/block/genhd.c b/block/genhd.c
index 7b6e5e1cf956..b49858550fa6 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -26,6 +26,7 @@
#include <linux/badblocks.h>
#include "blk.h"
+#include "blk-rq-qos.h"
static struct kobject *block_depr;
@@ -559,6 +560,8 @@ EXPORT_SYMBOL(device_add_disk);
*/
void del_gendisk(struct gendisk *disk)
{
+ struct request_queue *q = disk->queue;
+
might_sleep();
if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
@@ -575,8 +578,27 @@ void del_gendisk(struct gendisk *disk)
fsync_bdev(disk->part0);
__invalidate_device(disk->part0, true);
+ /*
+ * Fail any new I/O.
+ */
+ set_bit(GD_DEAD, &disk->state);
set_capacity(disk, 0);
+ /*
+ * Prevent new I/O from crossing bio_queue_enter().
+ */
+ blk_queue_start_drain(q);
+ blk_mq_freeze_queue_wait(q);
+
+ rq_qos_exit(q);
+ blk_sync_queue(q);
+ blk_flush_integrity();
+ /*
+ * Allow using passthrough request again after the queue is torn down.
+ */
+ blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
+ __blk_mq_unfreeze_queue(q, true);
+
if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
@@ -1056,6 +1078,7 @@ static void disk_release(struct device *dev)
struct gendisk *disk = dev_to_disk(dev);
might_sleep();
+ WARN_ON_ONCE(disk_live(disk));
disk_release_events(disk);
kfree(disk->random);
@@ -1268,6 +1291,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
out_destroy_part_tbl:
xa_destroy(&disk->part_tbl);
+ disk->part0->bd_disk = NULL;
iput(disk->part0->bd_inode);
out_free_bdi:
bdi_put(disk->bdi);
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 15a8be57203d..a0ffbabfac2c 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -151,6 +151,7 @@ struct kyber_ctx_queue {
struct kyber_queue_data {
struct request_queue *q;
+ dev_t dev;
/*
* Each scheduling domain has a limited number of in-flight requests
@@ -257,7 +258,7 @@ static int calculate_percentile(struct kyber_queue_data *kqd,
}
memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
- trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
+ trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
kyber_latency_type_names[type], percentile,
bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
@@ -270,7 +271,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd,
depth = clamp(depth, 1U, kyber_depth[sched_domain]);
if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
- trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
+ trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
depth);
}
}
@@ -366,6 +367,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
goto err;
kqd->q = q;
+ kqd->dev = disk_devt(q->disk);
kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
GFP_KERNEL | __GFP_ZERO);
@@ -774,7 +776,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
list_del_init(&rq->queuelist);
return rq;
} else {
- trace_kyber_throttled(kqd->q,
+ trace_kyber_throttled(kqd->dev,
kyber_domain_names[khd->cur_domain]);
}
} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
@@ -787,7 +789,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
list_del_init(&rq->queuelist);
return rq;
} else {
- trace_kyber_throttled(kqd->q,
+ trace_kyber_throttled(kqd->dev,
kyber_domain_names[khd->cur_domain]);
}
}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 30d2db37cc87..0d399ddaa185 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -17,6 +17,8 @@ source "drivers/bus/Kconfig"
source "drivers/connector/Kconfig"
+source "drivers/firmware/Kconfig"
+
source "drivers/gnss/Kconfig"
source "drivers/mtd/Kconfig"
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 0a0a982f9c28..c0e77c1c8e09 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -36,7 +36,7 @@ struct acpi_gtdt_descriptor {
static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
-static inline void *next_platform_timer(void *platform_timer)
+static inline __init void *next_platform_timer(void *platform_timer)
{
struct acpi_gtdt_header *gh = platform_timer;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index a3ef6cce644c..7dd80acf92c7 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3007,6 +3007,18 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
ndr_desc->target_node = NUMA_NO_NODE;
}
+ /* Fallback to address based numa information if node lookup failed */
+ if (ndr_desc->numa_node == NUMA_NO_NODE) {
+ ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
+ dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ }
+ if (ndr_desc->target_node == NUMA_NO_NODE) {
+ ndr_desc->target_node = phys_to_target_node(spa->address);
+ dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ }
+
/*
* Persistence domain bits are hierarchical, if
* ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index a43f1521efe6..45c5c0e45e33 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -284,8 +284,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
#define should_use_kmap(pfn) page_is_ram(pfn)
#endif
-static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
- bool memory)
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
{
unsigned long pfn;
@@ -295,8 +294,7 @@ static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
return NULL;
return (void __iomem __force *)kmap(pfn_to_page(pfn));
} else
- return memory ? acpi_os_memmap(pg_off, pg_sz) :
- acpi_os_ioremap(pg_off, pg_sz);
+ return acpi_os_ioremap(pg_off, pg_sz);
}
static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
@@ -311,10 +309,9 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
}
/**
- * __acpi_os_map_iomem - Get a virtual address for a given physical address range.
+ * acpi_os_map_iomem - Get a virtual address for a given physical address range.
* @phys: Start of the physical address range to map.
* @size: Size of the physical address range to map.
- * @memory: true if remapping memory, false if IO
*
* Look up the given physical address range in the list of existing ACPI memory
* mappings. If found, get a reference to it and return a pointer to it (its
@@ -324,8 +321,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done.
*/
-static void __iomem __ref
-*__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory)
+void __iomem __ref
+*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
void __iomem *virt;
@@ -356,7 +353,7 @@ static void __iomem __ref
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
- virt = acpi_map(phys, size, memory);
+ virt = acpi_map(phys, size);
if (!virt) {
mutex_unlock(&acpi_ioremap_lock);
kfree(map);
@@ -375,17 +372,11 @@ out:
mutex_unlock(&acpi_ioremap_lock);
return map->virt + (phys - map->phys);
}
-
-void __iomem *__ref
-acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
-{
- return __acpi_os_map_iomem(phys, size, false);
-}
EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
- return (void *)__acpi_os_map_iomem(phys, size, true);
+ return (void *)acpi_os_map_iomem(phys, size);
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f9383736fa0f..71419eb16e09 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -21,6 +21,7 @@
#include <linux/earlycpio.h>
#include <linux/initrd.h>
#include <linux/security.h>
+#include <linux/kmemleak.h>
#include "internal.h"
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -601,6 +602,8 @@ void __init acpi_table_upgrade(void)
*/
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+ kmemleak_ignore_phys(acpi_tables_addr);
+
/*
* early_ioremap only can remap 256k one time. If we map all
* tables one time, we will hit the limit. Need to map chunks
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index bd92b549fd5a..1c48358b43ba 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -371,7 +371,7 @@ static int lps0_device_attach(struct acpi_device *adev,
return 0;
if (acpi_s2idle_vendor_amd()) {
- /* AMD0004, AMDI0005:
+ /* AMD0004, AMD0005, AMDI0005:
* - Should use rev_id 0x0
* - function mask > 0x3: Should use AMD method, but has off by one bug
* - function mask = 0x3: Should use Microsoft method
@@ -390,6 +390,7 @@ static int lps0_device_attach(struct acpi_device *adev,
ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
&lps0_dsm_guid_microsoft);
if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
+ !strcmp(hid, "AMD0005") ||
!strcmp(hid, "AMDI0005"))) {
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d9030cb6b1e4..9edacc8b9768 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
}
static void binder_transaction_buffer_release(struct binder_proc *proc,
+ struct binder_thread *thread,
struct binder_buffer *buffer,
binder_size_t failed_at,
bool is_failure)
@@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
&proc->alloc, &fd, buffer,
offset, sizeof(fd));
WARN_ON(err);
- if (!err)
+ if (!err) {
binder_deferred_fd_close(fd);
+ /*
+ * Need to make sure the thread goes
+ * back to userspace to complete the
+ * deferred close
+ */
+ if (thread)
+ thread->looper_need_return = true;
+ }
}
} break;
default:
@@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
- if (target_thread->is_dead || target_proc->is_frozen) {
- return_error = target_thread->is_dead ?
- BR_DEAD_REPLY : BR_FROZEN_REPLY;
+ if (target_thread->is_dead) {
+ return_error = BR_DEAD_REPLY;
binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread;
}
@@ -3105,7 +3113,7 @@ err_bad_parent:
err_copy_data_failed:
binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
- binder_transaction_buffer_release(target_proc, t->buffer,
+ binder_transaction_buffer_release(target_proc, NULL, t->buffer,
buffer_offset, true);
if (target_node)
binder_dec_node_tmpref(target_node);
@@ -3184,7 +3192,9 @@ err_invalid_target_handle:
* Cleanup buffer and free it.
*/
static void
-binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
+binder_free_buf(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_buffer *buffer)
{
binder_inner_proc_lock(proc);
if (buffer->transaction) {
@@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, buffer, 0, false);
+ binder_transaction_buffer_release(proc, thread, buffer, 0, false);
binder_alloc_free_buf(&proc->alloc, buffer);
}
@@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
- binder_free_buf(proc, buffer);
+ binder_free_buf(proc, thread, buffer);
break;
}
@@ -4107,7 +4117,7 @@ retry:
buffer->transaction = NULL;
binder_cleanup_transaction(t, "fd fixups failed",
BR_FAILED_REPLY);
- binder_free_buf(proc, buffer);
+ binder_free_buf(proc, thread, buffer);
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
proc->pid, thread->pid,
@@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
return 0;
}
+static bool binder_txns_pending_ilocked(struct binder_proc *proc)
+{
+ struct rb_node *n;
+ struct binder_thread *thread;
+
+ if (proc->outstanding_txns > 0)
+ return true;
+
+ for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
+ thread = rb_entry(n, struct binder_thread, rb_node);
+ if (thread->transaction_stack)
+ return true;
+ }
+ return false;
+}
+
static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc)
{
@@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
(!target_proc->outstanding_txns),
msecs_to_jiffies(info->timeout_ms));
- if (!ret && target_proc->outstanding_txns)
- ret = -EAGAIN;
+ /* Check pending transactions that wait for reply */
+ if (ret >= 0) {
+ binder_inner_proc_lock(target_proc);
+ if (binder_txns_pending_ilocked(target_proc))
+ ret = -EAGAIN;
+ binder_inner_proc_unlock(target_proc);
+ }
if (ret < 0) {
binder_inner_proc_lock(target_proc);
@@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
{
struct binder_proc *target_proc;
bool found = false;
+ __u32 txns_pending;
info->sync_recv = 0;
info->async_recv = 0;
@@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
if (target_proc->pid == info->pid) {
found = true;
binder_inner_proc_lock(target_proc);
- info->sync_recv |= target_proc->sync_recv;
+ txns_pending = binder_txns_pending_ilocked(target_proc);
+ info->sync_recv |= target_proc->sync_recv |
+ (txns_pending << 1);
info->async_recv |= target_proc->async_recv;
binder_inner_proc_unlock(target_proc);
}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 810c0b84d3f8..402c4d4362a8 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -378,6 +378,8 @@ struct binder_ref {
* binder transactions
* (protected by @inner_lock)
* @sync_recv: process received sync transactions since last frozen
+ * bit 0: received sync transaction after being frozen
+ * bit 1: new pending sync transaction during freezing
* (protected by @inner_lock)
* @async_recv: process received async transactions since last frozen
* (protected by @inner_lock)
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index b2f552088291..0910441321f7 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -440,10 +440,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
hpriv->phy_regulator = devm_regulator_get(dev, "phy");
if (IS_ERR(hpriv->phy_regulator)) {
rc = PTR_ERR(hpriv->phy_regulator);
- if (rc == -EPROBE_DEFER)
- goto err_out;
- rc = 0;
- hpriv->phy_regulator = NULL;
+ goto err_out;
}
if (flags & AHCI_PLATFORM_GET_RESETS) {
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index c3e6592712c4..0a8bf09a5c19 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -352,7 +352,8 @@ static unsigned int pdc_data_xfer_vlb(struct ata_queued_cmd *qc,
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) {
- __le32 pad;
+ __le32 pad = 0;
+
if (rw == READ) {
pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop);
@@ -742,7 +743,8 @@ static unsigned int vlb32_data_xfer(struct ata_queued_cmd *qc,
ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) {
- __le32 pad;
+ __le32 pad = 0;
+
if (rw == WRITE) {
memcpy(&pad, buf + buflen - slop, slop);
iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index e65dd803a453..249da496581a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -95,6 +95,8 @@ int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
list_add(&link->s_hook, &sup->consumers);
list_add(&link->c_hook, &con->suppliers);
+ pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
+ con, sup);
out:
mutex_unlock(&fwnode_link_lock);
@@ -102,6 +104,21 @@ out:
}
/**
+ * __fwnode_link_del - Delete a link between two fwnode_handles.
+ * @link: the fwnode_link to be deleted
+ *
+ * The fwnode_link_lock needs to be held when this function is called.
+ */
+static void __fwnode_link_del(struct fwnode_link *link)
+{
+ pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
+ link->consumer, link->supplier);
+ list_del(&link->s_hook);
+ list_del(&link->c_hook);
+ kfree(link);
+}
+
+/**
* fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
* @fwnode: fwnode whose supplier links need to be deleted
*
@@ -112,11 +129,8 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
struct fwnode_link *link, *tmp;
mutex_lock(&fwnode_link_lock);
- list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
- list_del(&link->s_hook);
- list_del(&link->c_hook);
- kfree(link);
- }
+ list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
+ __fwnode_link_del(link);
mutex_unlock(&fwnode_link_lock);
}
@@ -131,11 +145,8 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
struct fwnode_link *link, *tmp;
mutex_lock(&fwnode_link_lock);
- list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
- list_del(&link->s_hook);
- list_del(&link->c_hook);
- kfree(link);
- }
+ list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
+ __fwnode_link_del(link);
mutex_unlock(&fwnode_link_lock);
}
@@ -676,7 +687,8 @@ struct device_link *device_link_add(struct device *consumer,
{
struct device_link *link;
- if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
+ if (!consumer || !supplier || consumer == supplier ||
+ flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
(flags & DL_FLAG_SYNC_STATE_ONLY &&
(flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
@@ -975,6 +987,7 @@ int device_links_check_suppliers(struct device *dev)
{
struct device_link *link;
int ret = 0;
+ struct fwnode_handle *sup_fw;
/*
* Device waiting for supplier to become available is not allowed to
@@ -983,10 +996,11 @@ int device_links_check_suppliers(struct device *dev)
mutex_lock(&fwnode_link_lock);
if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
!fw_devlink_is_permissive()) {
- dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
- list_first_entry(&dev->fwnode->suppliers,
- struct fwnode_link,
- c_hook)->supplier);
+ sup_fw = list_first_entry(&dev->fwnode->suppliers,
+ struct fwnode_link,
+ c_hook)->supplier;
+ dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",
+ sup_fw);
mutex_unlock(&fwnode_link_lock);
return -EPROBE_DEFER;
}
@@ -1001,8 +1015,9 @@ int device_links_check_suppliers(struct device *dev)
if (link->status != DL_STATE_AVAILABLE &&
!(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
device_links_missing_supplier(dev);
- dev_dbg(dev, "probe deferral - supplier %s not ready\n",
- dev_name(link->supplier));
+ dev_err_probe(dev, -EPROBE_DEFER,
+ "supplier %s not ready\n",
+ dev_name(link->supplier));
ret = -EPROBE_DEFER;
break;
}
@@ -1722,6 +1737,25 @@ static int fw_devlink_create_devlink(struct device *con,
struct device *sup_dev;
int ret = 0;
+ /*
+ * In some cases, a device P might also be a supplier to its child node
+ * C. However, this would defer the probe of C until the probe of P
+ * completes successfully. This is perfectly fine in the device driver
+ * model. device_add() doesn't guarantee probe completion of the device
+ * by the time it returns.
+ *
+ * However, there are a few drivers that assume C will finish probing
+ * as soon as it's added and before P finishes probing. So, we provide
+ * a flag to let fw_devlink know not to delay the probe of C until the
+ * probe of P completes successfully.
+ *
+ * When such a flag is set, we can't create device links where P is the
+ * supplier of C as that would delay the probe of C.
+ */
+ if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
+ fwnode_is_ancestor_of(sup_handle, con->fwnode))
+ return -EINVAL;
+
sup_dev = get_dev_from_fwnode(sup_handle);
if (sup_dev) {
/*
@@ -1772,14 +1806,21 @@ static int fw_devlink_create_devlink(struct device *con,
* be broken by applying logic. Check for these types of cycles and
* break them so that devices in the cycle probe properly.
*
- * If the supplier's parent is dependent on the consumer, then
- * the consumer-supplier dependency is a false dependency. So,
- * treat it as an invalid link.
+ * If the supplier's parent is dependent on the consumer, then the
+ * consumer and supplier have a cyclic dependency. Since fw_devlink
+ * can't tell which of the inferred dependencies are incorrect, don't
+ * enforce probe ordering between any of the devices in this cyclic
+ * dependency. Do this by relaxing all the fw_devlink device links in
+ * this cycle and by treating the fwnode link between the consumer and
+ * the supplier as an invalid dependency.
*/
sup_dev = fwnode_get_next_parent_dev(sup_handle);
if (sup_dev && device_is_dependent(con, sup_dev)) {
- dev_dbg(con, "Not linking to %pfwP - False link\n",
- sup_handle);
+ dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
+ sup_handle, dev_name(sup_dev));
+ device_links_write_lock();
+ fw_devlink_relax_cycle(con, sup_dev);
+ device_links_write_unlock();
ret = -EINVAL;
} else {
/*
@@ -1858,9 +1899,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
if (!own_link || ret == -EAGAIN)
continue;
- list_del(&link->s_hook);
- list_del(&link->c_hook);
- kfree(link);
+ __fwnode_link_del(link);
}
}
@@ -1912,9 +1951,7 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
if (!own_link || ret == -EAGAIN)
continue;
- list_del(&link->s_hook);
- list_del(&link->c_hook);
- kfree(link);
+ __fwnode_link_del(link);
/* If no device link was created, nothing more to do. */
if (ret)
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index a97f33d0c59f..94665037f4a3 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
+#include <linux/init.h>
#include <linux/mc146818rtc.h>
@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value;
+ if (!x86_platform.legacy.rtc)
+ return;
+
user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
static int __init early_resume_init(void)
{
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
user = val % USERHASH;
val = val / USERHASH;
file = val % FILEHASH;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 453918eb7390..f1f35b48ab8b 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -15,7 +15,6 @@
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/property.h>
-#include <linux/etherdevice.h>
#include <linux/phy.h>
struct fwnode_handle *dev_fwnode(struct device *dev)
@@ -935,68 +934,6 @@ int device_get_phy_mode(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_get_phy_mode);
-static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode,
- const char *name, char *addr,
- int alen)
-{
- int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen);
-
- if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
- return addr;
- return NULL;
-}
-
-/**
- * fwnode_get_mac_address - Get the MAC from the firmware node
- * @fwnode: Pointer to the firmware node
- * @addr: Address of buffer to store the MAC in
- * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
- *
- * Search the firmware node for the best MAC address to use. 'mac-address' is
- * checked first, because that is supposed to contain to "most recent" MAC
- * address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address. If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree.
- *
- * Note that the 'address' property is supposed to contain a virtual address of
- * the register set, but some DTS files have redefined that property to be the
- * MAC address.
- *
- * All-zero MAC addresses are rejected, because those could be properties that
- * exist in the firmware tables, but were not updated by the firmware. For
- * example, the DTS could define 'mac-address' and 'local-mac-address', with
- * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'.
- * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
- * exists but is all zeros.
-*/
-void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen)
-{
- char *res;
-
- res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen);
- if (res)
- return res;
-
- res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen);
- if (res)
- return res;
-
- return fwnode_get_mac_addr(fwnode, "address", addr, alen);
-}
-EXPORT_SYMBOL(fwnode_get_mac_address);
-
-/**
- * device_get_mac_address - Get the MAC for a given device
- * @dev: Pointer to the device
- * @addr: Address of buffer to store the MAC in
- * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
- */
-void *device_get_mac_address(struct device *dev, char *addr, int alen)
-{
- return fwnode_get_mac_address(dev_fwnode(dev), addr, alen);
-}
-EXPORT_SYMBOL(device_get_mac_address);
-
/**
* fwnode_irq_get - Get IRQ directly from a fwnode
* @fwnode: Pointer to the firmware node
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 7bd0f3cfb7eb..c46f6a8e14d2 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -1116,6 +1116,9 @@ int device_create_managed_software_node(struct device *dev,
to_swnode(fwnode)->managed = true;
set_secondary_fwnode(dev, fwnode);
+ if (device_is_registered(dev))
+ software_node_notify(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(device_create_managed_software_node);
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 64b2f3d744d5..7f76fee6f989 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
-CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
+CFLAGS_property-entry-test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index c6d6ba0d00b1..8e7ca3e4c8c4 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -20,7 +20,7 @@ MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
/* contains the number the next bus should get. */
-static unsigned int bcma_bus_next_num = 0;
+static unsigned int bcma_bus_next_num;
/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 58ec167aa018..530b31240203 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -373,10 +373,22 @@ static int brd_alloc(int i)
struct gendisk *disk;
char buf[DISK_NAME_LEN];
+ mutex_lock(&brd_devices_mutex);
+ list_for_each_entry(brd, &brd_devices, brd_list) {
+ if (brd->brd_number == i) {
+ mutex_unlock(&brd_devices_mutex);
+ return -EEXIST;
+ }
+ }
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
- if (!brd)
+ if (!brd) {
+ mutex_unlock(&brd_devices_mutex);
return -ENOMEM;
+ }
brd->brd_number = i;
+ list_add_tail(&brd->brd_list, &brd_devices);
+ mutex_unlock(&brd_devices_mutex);
+
spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
@@ -411,37 +423,30 @@ static int brd_alloc(int i)
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
add_disk(disk);
- list_add_tail(&brd->brd_list, &brd_devices);
return 0;
out_free_dev:
+ mutex_lock(&brd_devices_mutex);
+ list_del(&brd->brd_list);
+ mutex_unlock(&brd_devices_mutex);
kfree(brd);
return -ENOMEM;
}
static void brd_probe(dev_t dev)
{
- int i = MINOR(dev) / max_part;
- struct brd_device *brd;
-
- mutex_lock(&brd_devices_mutex);
- list_for_each_entry(brd, &brd_devices, brd_list) {
- if (brd->brd_number == i)
- goto out_unlock;
- }
-
- brd_alloc(i);
-out_unlock:
- mutex_unlock(&brd_devices_mutex);
+ brd_alloc(MINOR(dev) / max_part);
}
static void brd_del_one(struct brd_device *brd)
{
- list_del(&brd->brd_list);
del_gendisk(brd->brd_disk);
blk_cleanup_disk(brd->brd_disk);
brd_free_pages(brd);
+ mutex_lock(&brd_devices_mutex);
+ list_del(&brd->brd_list);
+ mutex_unlock(&brd_devices_mutex);
kfree(brd);
}
@@ -491,25 +496,21 @@ static int __init brd_init(void)
brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
- mutex_lock(&brd_devices_mutex);
for (i = 0; i < rd_nr; i++) {
err = brd_alloc(i);
if (err)
goto out_free;
}
- mutex_unlock(&brd_devices_mutex);
-
pr_info("brd: module loaded\n");
return 0;
out_free:
+ unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
debugfs_remove_recursive(brd_debugfs_dir);
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd);
- mutex_unlock(&brd_devices_mutex);
- unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
pr_info("brd: module NOT loaded !!!\n");
return err;
@@ -519,13 +520,12 @@ static void __exit brd_exit(void)
{
struct brd_device *brd, *next;
+ unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
debugfs_remove_recursive(brd_debugfs_dir);
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd);
- unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
-
pr_info("brd: module unloaded\n");
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 5170a630778d..1183f7872b71 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -97,13 +97,18 @@ struct nbd_config {
atomic_t recv_threads;
wait_queue_head_t recv_wq;
- loff_t blksize;
+ unsigned int blksize_bits;
loff_t bytesize;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbg_dir;
#endif
};
+static inline unsigned int nbd_blksize(struct nbd_config *config)
+{
+ return 1u << config->blksize_bits;
+}
+
struct nbd_device {
struct blk_mq_tag_set tag_set;
@@ -146,7 +151,7 @@ static struct dentry *nbd_dbg_dir;
#define NBD_MAGIC 0x68797548
-#define NBD_DEF_BLKSIZE 1024
+#define NBD_DEF_BLKSIZE_BITS 10
static unsigned int nbds_max = 16;
static int max_part = 16;
@@ -317,12 +322,12 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
loff_t blksize)
{
if (!blksize)
- blksize = NBD_DEF_BLKSIZE;
+ blksize = 1u << NBD_DEF_BLKSIZE_BITS;
if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
return -EINVAL;
nbd->config->bytesize = bytesize;
- nbd->config->blksize = blksize;
+ nbd->config->blksize_bits = __ffs(blksize);
if (!nbd->task_recv)
return 0;
@@ -1337,7 +1342,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
- return nbd_set_size(nbd, config->bytesize, config->blksize);
+ return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
}
static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
@@ -1406,11 +1411,11 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_BLKSIZE:
return nbd_set_size(nbd, config->bytesize, arg);
case NBD_SET_SIZE:
- return nbd_set_size(nbd, arg, config->blksize);
+ return nbd_set_size(nbd, arg, nbd_blksize(config));
case NBD_SET_SIZE_BLOCKS:
- if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
+ if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
return -EINVAL;
- return nbd_set_size(nbd, bytesize, config->blksize);
+ return nbd_set_size(nbd, bytesize, nbd_blksize(config));
case NBD_SET_TIMEOUT:
nbd_set_cmd_timeout(nbd, arg);
return 0;
@@ -1476,7 +1481,7 @@ static struct nbd_config *nbd_alloc_config(void)
atomic_set(&config->recv_threads, 0);
init_waitqueue_head(&config->recv_wq);
init_waitqueue_head(&config->conn_wait);
- config->blksize = NBD_DEF_BLKSIZE;
+ config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
try_module_get(THIS_MODULE);
return config;
@@ -1604,7 +1609,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
- debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
+ debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
return 0;
@@ -1826,7 +1831,7 @@ nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
- u64 bsize = config->blksize;
+ u64 bsize = nbd_blksize(config);
u64 bytes = config->bytesize;
if (info->attrs[NBD_ATTR_SIZE_BYTES])
@@ -1835,7 +1840,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
- if (bytes != config->bytesize || bsize != config->blksize)
+ if (bytes != config->bytesize || bsize != nbd_blksize(config))
return nbd_set_size(nbd, bytes, bsize);
return 0;
}
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 4b93fd83bf79..44e45af00e83 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -71,8 +71,10 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
int opt_mask = 0;
int token;
int ret = -EINVAL;
- int i, dest_port, nr_poll_queues;
+ int nr_poll_queues = 0;
+ int dest_port = 0;
int p_cnt = 0;
+ int i;
options = kstrdup(buf, GFP_KERNEL);
if (!options)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 9b3bd083b411..303caf2d17d0 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -689,28 +689,6 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
-static int virtblk_validate(struct virtio_device *vdev)
-{
- u32 blk_size;
-
- if (!vdev->config->get) {
- dev_err(&vdev->dev, "%s failure: config access disabled\n",
- __func__);
- return -EINVAL;
- }
-
- if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
- return 0;
-
- blk_size = virtio_cread32(vdev,
- offsetof(struct virtio_blk_config, blk_size));
-
- if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
- __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
-
- return 0;
-}
-
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -722,6 +700,12 @@ static int virtblk_probe(struct virtio_device *vdev)
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
GFP_KERNEL);
if (err < 0)
@@ -836,14 +820,6 @@ static int virtblk_probe(struct virtio_device *vdev)
else
blk_size = queue_logical_block_size(q);
- if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE) {
- dev_err(&vdev->dev,
- "block size is changed unexpectedly, now is %u\n",
- blk_size);
- err = -EINVAL;
- goto out_cleanup_disk;
- }
-
/* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp,
@@ -1009,7 +985,6 @@ static struct virtio_driver virtio_blk = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
- .validate = virtblk_validate,
.probe = virtblk_probe,
.remove = virtblk_remove,
.config_changed = virtblk_config_changed,
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index f1705b46fc88..9359bff47296 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -1037,8 +1037,9 @@ static bool btintel_firmware_version(struct hci_dev *hdev,
params = (void *)(fw_ptr + sizeof(*cmd));
- bt_dev_info(hdev, "Boot Address: 0x%x",
- le32_to_cpu(params->boot_addr));
+ *boot_addr = le32_to_cpu(params->boot_addr);
+
+ bt_dev_info(hdev, "Boot Address: 0x%x", *boot_addr);
bt_dev_info(hdev, "Firmware Version: %u-%u.%u",
params->fw_build_num, params->fw_build_ww,
@@ -1071,9 +1072,6 @@ int btintel_download_firmware(struct hci_dev *hdev,
/* Skip version checking */
break;
default:
- /* Skip reading firmware file version in bootloader mode */
- if (ver->fw_variant == 0x06)
- break;
/* Skip download if firmware has the same version */
if (btintel_firmware_version(hdev, ver->fw_build_num,
@@ -1114,19 +1112,16 @@ static int btintel_download_fw_tlv(struct hci_dev *hdev,
int err;
u32 css_header_ver;
- /* Skip reading firmware file version in bootloader mode */
- if (ver->img_type != 0x01) {
- /* Skip download if firmware has the same version */
- if (btintel_firmware_version(hdev, ver->min_fw_build_nn,
- ver->min_fw_build_cw,
- ver->min_fw_build_yy,
- fw, boot_param)) {
- bt_dev_info(hdev, "Firmware already loaded");
- /* Return -EALREADY to indicate that firmware has
- * already been loaded.
- */
- return -EALREADY;
- }
+ /* Skip download if firmware has the same version */
+ if (btintel_firmware_version(hdev, ver->min_fw_build_nn,
+ ver->min_fw_build_cw,
+ ver->min_fw_build_yy,
+ fw, boot_param)) {
+ bt_dev_info(hdev, "Firmware already loaded");
+ /* Return -EALREADY to indicate that firmware has
+ * already been loaded.
+ */
+ return -EALREADY;
}
/* The firmware variant determines if the device is in bootloader
@@ -1285,12 +1280,16 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
- u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
+ u8 mask[11] = { 0x0a, 0x92, 0x02, 0x7f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00 };
+ u8 period[5] = { 0x04, 0x91, 0x02, 0x05, 0x00 };
+ u8 trace_enable = 0x02;
struct sk_buff *skb;
- if (!features)
+ if (!features) {
+ bt_dev_warn(hdev, "Debug features not read");
return -EINVAL;
+ }
if (!(features->page1[0] & 0x3f)) {
bt_dev_info(hdev, "Telemetry exception format not supported");
@@ -1303,11 +1302,95 @@ static int btintel_set_debug_features(struct hci_dev *hdev,
PTR_ERR(skb));
return PTR_ERR(skb);
}
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfc8b, 5, period, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting periodicity for link statistics traces failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Enable tracing of link statistics events failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ bt_dev_info(hdev, "set debug features: trace_enable 0x%02x mask 0x%02x",
+ trace_enable, mask[3]);
+
+ return 0;
+}
+
+static int btintel_reset_debug_features(struct hci_dev *hdev,
+ const struct intel_debug_features *features)
+{
+ u8 mask[11] = { 0x0a, 0x92, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 };
+ u8 trace_enable = 0x00;
+ struct sk_buff *skb;
+
+ if (!features) {
+ bt_dev_warn(hdev, "Debug features not read");
+ return -EINVAL;
+ }
+
+ if (!(features->page1[0] & 0x3f)) {
+ bt_dev_info(hdev, "Telemetry exception format not supported");
+ return 0;
+ }
+
+ /* Should stop the trace before writing ddc event mask. */
+ skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Stop tracing of link statistics events failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+ skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
kfree_skb(skb);
+
+ bt_dev_info(hdev, "reset debug features: trace_enable 0x%02x mask 0x%02x",
+ trace_enable, mask[3]);
+
return 0;
}
+int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ struct intel_debug_features features;
+ int err;
+
+ bt_dev_dbg(hdev, "enable %d", enable);
+
+ /* Read the Intel supported features and if new exception formats
+ * supported, need to load the additional DDC config to enable.
+ */
+ err = btintel_read_debug_features(hdev, &features);
+ if (err)
+ return err;
+
+ /* Set or reset the debug features. */
+ if (enable)
+ err = btintel_set_debug_features(hdev, &features);
+ else
+ err = btintel_reset_debug_features(hdev, &features);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btintel_set_quality_report);
+
static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@@ -1893,7 +1976,6 @@ static int btintel_bootloader_setup(struct hci_dev *hdev,
u32 boot_param;
char ddcname[64];
int err;
- struct intel_debug_features features;
BT_DBG("%s", hdev->name);
@@ -1934,14 +2016,7 @@ static int btintel_bootloader_setup(struct hci_dev *hdev,
btintel_load_ddc_config(hdev, ddcname);
}
- /* Read the Intel supported features and if new exception formats
- * supported, need to load the additional DDC config to enable.
- */
- err = btintel_read_debug_features(hdev, &features);
- if (!err) {
- /* Set DDC mask for available debug features */
- btintel_set_debug_features(hdev, &features);
- }
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
/* Read the Intel version information after loading the FW */
err = btintel_read_version(hdev, &new_ver);
@@ -2083,13 +2158,102 @@ done:
return err;
}
+static int btintel_get_codec_config_data(struct hci_dev *hdev,
+ __u8 link, struct bt_codec *codec,
+ __u8 *ven_len, __u8 **ven_data)
+{
+ int err = 0;
+
+ if (!ven_data || !ven_len)
+ return -EINVAL;
+
+ *ven_len = 0;
+ *ven_data = NULL;
+
+ if (link != ESCO_LINK) {
+ bt_dev_err(hdev, "Invalid link type(%u)", link);
+ return -EINVAL;
+ }
+
+ *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL);
+ if (!*ven_data) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* supports only CVSD and mSBC offload codecs */
+ switch (codec->id) {
+ case 0x02:
+ **ven_data = 0x00;
+ break;
+ case 0x05:
+ **ven_data = 0x01;
+ break;
+ default:
+ err = -EINVAL;
+ bt_dev_err(hdev, "Invalid codec id(%u)", codec->id);
+ goto error;
+ }
+ /* codec and its capabilities are pre-defined to ids
+ * preset id = 0x00 represents CVSD codec with sampling rate 8K
+ * preset id = 0x01 represents mSBC codec with sampling rate 16K
+ */
+ *ven_len = sizeof(__u8);
+ return err;
+
+error:
+ kfree(*ven_data);
+ *ven_data = NULL;
+ return err;
+}
+
+static int btintel_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
+{
+ /* Intel uses 1 as data path id for all the usecases */
+ *data_path_id = 1;
+ return 0;
+}
+
+static int btintel_configure_offload(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err = 0;
+ struct intel_offload_use_cases *use_cases;
+
+ skb = __hci_cmd_sync(hdev, 0xfc86, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading offload use cases failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len < sizeof(*use_cases)) {
+ err = -EIO;
+ goto error;
+ }
+
+ use_cases = (void *)skb->data;
+
+ if (use_cases->status) {
+ err = -bt_to_errno(skb->data[0]);
+ goto error;
+ }
+
+ if (use_cases->preset[0] & 0x03) {
+ hdev->get_data_path_id = btintel_get_data_path_id;
+ hdev->get_codec_config_data = btintel_get_codec_config_data;
+ }
+error:
+ kfree_skb(skb);
+ return err;
+}
+
static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
u32 boot_param;
char ddcname[64];
int err;
- struct intel_debug_features features;
struct intel_version_tlv new_ver;
bt_dev_dbg(hdev, "");
@@ -2125,14 +2289,10 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
*/
btintel_load_ddc_config(hdev, ddcname);
- /* Read the Intel supported features and if new exception formats
- * supported, need to load the additional DDC config to enable.
- */
- err = btintel_read_debug_features(hdev, &features);
- if (!err) {
- /* Set DDC mask for available debug features */
- btintel_set_debug_features(hdev, &features);
- }
+ /* Read supported use cases and set callbacks to fetch datapath id */
+ btintel_configure_offload(hdev);
+
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
/* Read the Intel version information after loading the FW */
err = btintel_read_version_tlv(hdev, &new_ver);
@@ -2232,6 +2392,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ /* Set up the quality report callback for Intel devices */
+ hdev->set_quality_report = btintel_set_quality_report;
+
/* For Legacy device, check the HW platform value and size */
if (skb->len == sizeof(ver) && skb->data[1] == 0x37) {
bt_dev_dbg(hdev, "Read the legacy Intel version information");
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index aa64072bbe68..e500c0d7a729 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -132,6 +132,11 @@ struct intel_debug_features {
__u8 page1[16];
} __packed;
+struct intel_offload_use_cases {
+ __u8 status;
+ __u8 preset[8];
+} __packed;
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
@@ -204,6 +209,7 @@ int btintel_configure_setup(struct hci_dev *hdev);
void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len);
void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len);
+int btintel_set_quality_report(struct hci_dev *hdev, bool enable);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -294,4 +300,9 @@ static inline void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len)
{
}
+
+static inline int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ return -ENODEV;
+}
#endif
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 8b9d78ce6bb2..5ccbe4d459d0 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -587,12 +587,12 @@ static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0;
}
-static bool btmrvl_prevent_wake(struct hci_dev *hdev)
+static bool btmrvl_wakeup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
- return !device_may_wakeup(&card->func->dev);
+ return device_may_wakeup(&card->func->dev);
}
/*
@@ -696,7 +696,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->send = btmrvl_send_frame;
hdev->setup = btmrvl_setup;
hdev->set_bdaddr = btmrvl_set_bdaddr;
- hdev->prevent_wake = btmrvl_prevent_wake;
+ hdev->wakeup = btmrvl_wakeup;
SET_HCIDEV_DEV(hdev, &card->func->dev);
hdev->dev_type = priv->btmrvl_dev.dev_type;
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index e9d91d7c0db4..9ba22b13b4fa 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -158,8 +158,10 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
int err;
hlen = sizeof(*hdr) + wmt_params->dlen;
- if (hlen > 255)
- return -EINVAL;
+ if (hlen > 255) {
+ err = -EINVAL;
+ goto err_free_skb;
+ }
hdr = (struct mtk_wmt_hdr *)&wc;
hdr->dir = 1;
@@ -173,7 +175,7 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
if (err < 0) {
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_skb;
}
/* The vendor specific WMT commands are all answered by a vendor
@@ -190,13 +192,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_skb;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err_free_skb;
}
/* Parse and handle the return WMT event */
diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
index 8646b6dd11e9..634cf8f5ed2d 100644
--- a/drivers/bluetooth/btrsi.c
+++ b/drivers/bluetooth/btrsi.c
@@ -19,7 +19,6 @@
#include <net/bluetooth/hci_core.h>
#include <asm/unaligned.h>
#include <net/rsi_91x.h>
-#include <net/genetlink.h>
#define RSI_DMA_ALIGN 8
#define RSI_FRAME_DESC_SIZE 16
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 1f8afa0244d8..c2bdd1e6060e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -59,6 +59,7 @@ struct id_table {
__u8 hci_bus;
bool config_needed;
bool has_rom_version;
+ bool has_msft_ext;
char *fw_name;
char *cfg_name;
};
@@ -121,6 +122,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8821c_fw.bin",
.cfg_name = "rtl_bt/rtl8821c_config" },
@@ -135,6 +137,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8761b_fw.bin",
.cfg_name = "rtl_bt/rtl8761b_config" },
@@ -149,6 +152,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART),
.config_needed = true,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822cs_fw.bin",
.cfg_name = "rtl_bt/rtl8822cs_config" },
@@ -156,6 +160,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822cu_fw.bin",
.cfg_name = "rtl_bt/rtl8822cu_config" },
@@ -163,6 +168,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB),
.config_needed = true,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822b_fw.bin",
.cfg_name = "rtl_bt/rtl8822b_config" },
@@ -170,6 +176,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8852au_fw.bin",
.cfg_name = "rtl_bt/rtl8852au_config" },
};
@@ -594,8 +601,10 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
- if (resp->hci_ver == 0x8 && le16_to_cpu(resp->hci_rev) == 0x826c &&
- resp->lmp_ver == 0x8 && le16_to_cpu(resp->lmp_subver) == 0xa99e)
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
+
+ if (!btrtl_dev->ic_info)
btrtl_dev->drop_fw = true;
if (btrtl_dev->drop_fw) {
@@ -634,13 +643,13 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
hci_ver = resp->hci_ver;
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
+
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
}
out_free:
kfree_skb(skb);
- btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
- hdev->bus);
-
if (!btrtl_dev->ic_info) {
rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
lmp_subver, hci_rev, hci_ver);
@@ -684,12 +693,8 @@ out_free:
/* The following chips supports the Microsoft vendor extension,
* therefore set the corresponding VsMsftOpCode.
*/
- switch (lmp_subver) {
- case RTL_ROM_LMP_8822B:
- case RTL_ROM_LMP_8852A:
+ if (btrtl_dev->ic_info->has_msft_ext)
hci_set_msft_opcode(hdev, 0xFCF0);
- break;
- }
return btrtl_dev;
@@ -746,6 +751,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852A:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_aosp_capable(hdev);
break;
default:
rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 60d2fce59a71..75c83768c257 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -384,6 +384,12 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
@@ -410,6 +416,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -433,6 +442,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8761B Bluetooth devices */
+ { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Additional Realtek 8761BU Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -451,10 +464,6 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
- /* Bluetooth component of Realtek 8852AE device */
- { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
- BTUSB_WIDEBAND_SPEECH },
-
{ USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK |
@@ -652,11 +661,33 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
+ struct gpio_desc *reset_gpio = data->reset_gpio;
int err;
if (++data->cmd_timeout_cnt < 5)
return;
+ if (reset_gpio) {
+ bt_dev_err(hdev, "Reset qca device via bt_en gpio");
+
+ /* Toggle the hard reset line. The qca bt device is going to
+ * yank itself off the USB and then replug. The cleanup is handled
+ * correctly on the way out (standard USB disconnect), and the new
+ * device is detected cleanly and bound to the driver again like
+ * it should be.
+ */
+ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
+ bt_dev_err(hdev, "last reset failed? Not resetting again");
+ return;
+ }
+
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ msleep(200);
+ gpiod_set_value_cansleep(reset_gpio, 1);
+
+ return;
+ }
+
bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device.");
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
@@ -2200,6 +2231,23 @@ struct btmtk_section_map {
};
} __packed;
+static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ long ret;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
+ ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
static void btusb_mtk_wmt_recv(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
@@ -2804,6 +2852,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
case 0x7668:
fwname = FIRMWARE_MT7668;
break;
+ case 0x7922:
case 0x7961:
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
@@ -3591,11 +3640,11 @@ static void btusb_check_needs_reset_resume(struct usb_interface *intf)
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
-static bool btusb_prevent_wake(struct hci_dev *hdev)
+static bool btusb_wakeup(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
- return !device_may_wakeup(&data->udev->dev);
+ return device_may_wakeup(&data->udev->dev);
}
static int btusb_shutdown_qca(struct hci_dev *hdev)
@@ -3752,7 +3801,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
- hdev->prevent_wake = btusb_prevent_wake;
+ hdev->wakeup = btusb_wakeup;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -3819,6 +3868,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
hdev->cmd_timeout = btusb_mtk_cmd_timeout;
+ hdev->set_bdaddr = btusb_set_bdaddr_mtk;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
data->recv_acl = btusb_recv_acl_mtk;
}
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 0c0dedece59c..34286ffe0568 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -587,9 +587,11 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
count -= processed;
}
- pm_runtime_get(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
- pm_runtime_put_autosuspend(&hu->serdev->dev);
+ if (hu->serdev) {
+ pm_runtime_get(&hu->serdev->dev);
+ pm_runtime_mark_last_busy(&hu->serdev->dev);
+ pm_runtime_put_autosuspend(&hu->serdev->dev);
+ }
return 0;
}
@@ -814,7 +816,6 @@ static int h5_serdev_probe(struct serdev_device *serdev)
struct device *dev = &serdev->dev;
struct h5 *h5;
const struct h5_device_data *data;
- int err;
h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
if (!h5)
@@ -846,6 +847,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
h5->vnd = data->vnd;
}
+ if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
+ set_bit(H5_WAKEUP_DISABLE, &h5->flags);
h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(h5->enable_gpio))
@@ -856,14 +859,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (IS_ERR(h5->device_wake_gpio))
return PTR_ERR(h5->device_wake_gpio);
- err = hci_uart_register_device(&h5->serdev_hu, &h5p);
- if (err)
- return err;
-
- if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
- set_bit(H5_WAKEUP_DISABLE, &h5->flags);
-
- return 0;
+ return hci_uart_register_device(&h5->serdev_hu, &h5p);
}
static void h5_serdev_remove(struct serdev_device *serdev)
@@ -962,11 +958,13 @@ static void h5_btrtl_open(struct h5 *h5)
serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
serdev_device_set_baudrate(h5->hu->serdev, 115200);
- pm_runtime_set_active(&h5->hu->serdev->dev);
- pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
- pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
- SUSPEND_TIMEOUT_MS);
- pm_runtime_enable(&h5->hu->serdev->dev);
+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
+ pm_runtime_set_active(&h5->hu->serdev->dev);
+ pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
+ pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
+ SUSPEND_TIMEOUT_MS);
+ pm_runtime_enable(&h5->hu->serdev->dev);
+ }
/* The controller needs up to 500ms to wakeup */
gpiod_set_value_cansleep(h5->enable_gpio, 1);
@@ -976,7 +974,8 @@ static void h5_btrtl_open(struct h5 *h5)
static void h5_btrtl_close(struct h5 *h5)
{
- pm_runtime_disable(&h5->hu->serdev->dev);
+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags))
+ pm_runtime_disable(&h5->hu->serdev->dev);
gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
gpiod_set_value_cansleep(h5->enable_gpio, 0);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 5ed2cfa7da1d..5e32e4d5367a 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -479,6 +479,9 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_DBG("tty %p", tty);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
/* Error if the tty has no write op instead of leaving an exploitable
* hole
*/
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 53deea2eb7b4..dd768a8ed7cb 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1577,7 +1577,7 @@ static void qca_cmd_timeout(struct hci_dev *hdev)
mutex_unlock(&qca->hci_memdump_lock);
}
-static bool qca_prevent_wake(struct hci_dev *hdev)
+static bool qca_wakeup(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
@@ -1730,6 +1730,7 @@ retry:
if (qca_is_wcn399x(soc_type) ||
qca_is_wcn6750(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);
if (ret)
@@ -1764,7 +1765,7 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
- hu->hdev->prevent_wake = qca_prevent_wake;
+ hu->hdev->wakeup = qca_wakeup;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 8ab26dec5f6e..b45db0db347c 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -37,6 +38,9 @@ struct vhci_data {
struct mutex open_mutex;
struct delayed_work open_timeout;
+
+ bool suspended;
+ bool wakeup;
};
static int vhci_open_dev(struct hci_dev *hdev)
@@ -73,6 +77,115 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id)
+{
+ *data_path_id = 0;
+ return 0;
+}
+
+static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data)
+{
+ if (type != ESCO_LINK)
+ return -EINVAL;
+
+ *vnd_len = 0;
+ *vnd_data = NULL;
+ return 0;
+}
+
+static bool vhci_wakeup(struct hci_dev *hdev)
+{
+ struct vhci_data *data = hci_get_drvdata(hdev);
+
+ return data->wakeup;
+}
+
+static ssize_t force_suspend_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ char buf[3];
+
+ buf[0] = data->suspended ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_suspend_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (data->suspended == enable)
+ return -EALREADY;
+
+ if (enable)
+ err = hci_suspend_dev(data->hdev);
+ else
+ err = hci_resume_dev(data->hdev);
+
+ if (err)
+ return err;
+
+ data->suspended = enable;
+
+ return count;
+}
+
+static const struct file_operations force_suspend_fops = {
+ .open = simple_open,
+ .read = force_suspend_read,
+ .write = force_suspend_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t force_wakeup_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ char buf[3];
+
+ buf[0] = data->wakeup ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_wakeup_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (data->wakeup == enable)
+ return -EALREADY;
+
+ return count;
+}
+
+static const struct file_operations force_wakeup_fops = {
+ .open = simple_open,
+ .read = force_wakeup_read,
+ .write = force_wakeup_write,
+ .llseek = default_llseek,
+};
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -112,6 +225,9 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
+ hdev->get_data_path_id = vhci_get_data_path_id;
+ hdev->get_codec_config_data = vhci_get_codec_config_data;
+ hdev->wakeup = vhci_wakeup;
/* bit 6 is for external configuration */
if (opcode & 0x40)
@@ -129,6 +245,12 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
return -EBUSY;
}
+ debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
+ &force_suspend_fops);
+
+ debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
+ &force_wakeup_fops);
+
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb_put_u8(skb, 0xff);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index a5b96f3aad67..a4cf3d692dc3 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -152,18 +152,6 @@ config QCOM_EBI2
Interface 2, which can be used to connect things like NAND Flash,
SRAM, ethernet adapters, FPGAs and LCD displays.
-config SIMPLE_PM_BUS
- tristate "Simple Power-Managed Bus Driver"
- depends on OF && PM
- help
- Driver for transparent busses that don't need a real driver, but
- where the bus controller is part of a PM domain, or under the control
- of a functional clock, and thus relies on runtime PM for managing
- this PM domain and/or clock.
- An example of such a bus controller is the Renesas Bus State
- Controller (BSC, sometimes called "LBSC within Bus Bridge", or
- "External Bus Interface") as found on several Renesas ARM SoCs.
-
config SUN50I_DE2_BUS
bool "Allwinner A64 DE2 Bus Driver"
default ARM64
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 1c29c5e8ffb8..52c2f35a26a9 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
-obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
+obj-$(CONFIG_OF) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
obj-$(CONFIG_TI_PWMSS) += ti-pwmss.o
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 01a3d0cd08ed..6b8d6257ed8a 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -13,11 +13,36 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
static int simple_pm_bus_probe(struct platform_device *pdev)
{
- const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
+ const struct device *dev = &pdev->dev;
+ const struct of_dev_auxdata *lookup = dev_get_platdata(dev);
+ struct device_node *np = dev->of_node;
+ const struct of_device_id *match;
+
+ /*
+ * Allow user to use driver_override to bind this driver to a
+ * transparent bus device which has a different compatible string
+ * that's not listed in simple_pm_bus_of_match. We don't want to do any
+ * of the simple-pm-bus tasks for these devices, so return early.
+ */
+ if (pdev->driver_override)
+ return 0;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ /*
+ * These are transparent bus devices (not simple-pm-bus matches) that
+ * have their child nodes populated automatically. So, don't need to
+ * do anything more. We only match with the device if this driver is
+ * the most specific match because we don't want to incorrectly bind to
+ * a device that has a more specific driver.
+ */
+ if (match && match->data) {
+ if (of_property_match_string(np, "compatible", match->compatible) == 0)
+ return 0;
+ else
+ return -ENODEV;
+ }
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -31,14 +56,25 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
static int simple_pm_bus_remove(struct platform_device *pdev)
{
+ const void *data = of_device_get_match_data(&pdev->dev);
+
+ if (pdev->driver_override || data)
+ return 0;
+
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_disable(&pdev->dev);
return 0;
}
+#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
+
static const struct of_device_id simple_pm_bus_of_match[] = {
{ .compatible = "simple-pm-bus", },
+ { .compatible = "simple-bus", .data = ONLY_BUS },
+ { .compatible = "simple-mfd", .data = ONLY_BUS },
+ { .compatible = "isa", .data = ONLY_BUS },
+ { .compatible = "arm,amba-bus", .data = ONLY_BUS },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index a51c2a8feed9..6a8b7fb5be58 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1464,6 +1464,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* Quirks that need to be set based on detected module */
SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
SYSC_MODULE_QUIRK_AESS),
+ /* Errata i893 handling for dra7 dcan1 and 2 */
+ SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
@@ -2954,6 +2957,7 @@ static int sysc_init_soc(struct sysc *ddata)
break;
case SOC_AM3:
sysc_add_disabled(0x48310000); /* rng */
+ break;
default:
break;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 0a5596797b93..9ef007b3cf9b 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -564,6 +564,7 @@ config SM_GCC_6125
config SM_GCC_6350
tristate "SM6350 Global Clock Controller"
+ select QCOM_GDSC
help
Support for the global clock controller on SM6350 devices.
Say Y if you want to use peripheral devices such as UART,
diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
index bc09736ece76..68fe9f6f0d2f 100644
--- a/drivers/clk/qcom/gcc-sm6115.c
+++ b/drivers/clk/qcom/gcc-sm6115.c
@@ -3242,7 +3242,7 @@ static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
};
static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
- .gdscr = 0x7d060,
+ .gdscr = 0x7d07c,
.pd = {
.name = "hlos1_vote_turing_mmu_tbu0",
},
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 4c94b94c4125..1490446985e2 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -186,6 +186,8 @@ static struct rzg2l_reset r9a07g044_resets[] = {
static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A07G044_GIC600_GICCLK,
+ MOD_CLK_BASE + R9A07G044_IA55_CLK,
+ MOD_CLK_BASE + R9A07G044_DMAC_ACLK,
};
const struct rzg2l_cpg_info r9a07g044_cpg_info = {
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 3b3b2c3347f3..761922ea5db7 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -391,7 +391,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
value = readl(priv->base + CLK_MON_R(clock->off));
- return !(value & bitmask);
+ return value & bitmask;
}
static const struct clk_ops rzg2l_mod_clock_ops = {
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
index 242e94c0cf8a..bf8cd928c228 100644
--- a/drivers/clk/socfpga/clk-agilex.c
+++ b/drivers/clk/socfpga/clk-agilex.c
@@ -165,13 +165,6 @@ static const struct clk_parent_data mpu_mux[] = {
.name = "boot_clk", },
};
-static const struct clk_parent_data s2f_usr0_mux[] = {
- { .fw_name = "f2s-free-clk",
- .name = "f2s-free-clk", },
- { .fw_name = "boot_clk",
- .name = "boot_clk", },
-};
-
static const struct clk_parent_data emac_mux[] = {
{ .fw_name = "emaca_free_clk",
.name = "emaca_free_clk", },
@@ -312,8 +305,6 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
4, 0x44, 28, 1, 0, 0, 0},
{ AGILEX_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
5, 0, 0, 0, 0x30, 1, 0},
- { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24,
- 6, 0, 0, 0, 0, 0, 0},
{ AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
0, 0, 0, 0, 0x94, 26, 0},
{ AGILEX_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index df77b6bf5c64..763cea8418f8 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg)
mutex_lock(&dev->mutex);
rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
mutex_unlock(&dev->mutex);
+ kfree(insns);
return rc;
}
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
index 66b05a326910..a6f365b9cc1a 100644
--- a/drivers/cpufreq/cpufreq_governor_attr_set.c
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
if (count)
return count;
- kobject_put(&attr_set->kobj);
mutex_destroy(&attr_set->update_lock);
+ kobject_put(&attr_set->kobj);
return 0;
}
EXPORT_SYMBOL_GPL(gov_attr_set_put);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1097f826ad70..8c176b7dae41 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
- if (no_load)
- return -ENODEV;
-
id = x86_match_cpu(hwp_support_ids);
if (id) {
+ bool hwp_forced = intel_pstate_hwp_is_enabled();
+
+ if (hwp_forced)
+ pr_info("HWP enabled by BIOS\n");
+ else if (no_load)
+ return -ENODEV;
+
copy_cpu_funcs(&core_funcs);
/*
* Avoid enabling HWP for processors without EPP support,
@@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void)
* If HWP is enabled already, though, there is no choice but to
* deal with it.
*/
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
- intel_pstate_hwp_is_enabled()) {
+ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void)
goto hwp_cpu_matched;
}
+ pr_info("HWP not enabled\n");
} else {
+ if (no_load)
+ return -ENODEV;
+
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) {
pr_info("CPU model not supported\n");
@@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str)
else if (!strcmp(str, "passive"))
default_driver = &intel_cpufreq;
- if (!strcmp(str, "no_hwp")) {
- pr_info("HWP disabled\n");
+ if (!strcmp(str, "no_hwp"))
no_hwp = 1;
- }
+
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index bb88198c874e..aa4e1a500691 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -778,7 +778,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
if (ret)
- goto e_ctx;
+ goto e_aad;
if (in_place) {
dst = src;
@@ -863,7 +863,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.aes.size = 0;
ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret)
- goto e_dst;
+ goto e_final_wa;
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
@@ -873,17 +873,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
- goto e_tag;
+ goto e_final_wa;
ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
- if (ret)
- goto e_tag;
+ if (ret) {
+ ccp_dm_free(&tag);
+ goto e_final_wa;
+ }
ret = crypto_memneq(tag.address, final_wa.address,
authsize) ? -EBADMSG : 0;
ccp_dm_free(&tag);
}
-e_tag:
+e_final_wa:
ccp_dm_free(&final_wa);
e_dst:
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index e3e757513d1b..b1f46a974b9e 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -178,7 +178,7 @@ static void axp_mc_check(struct mem_ctl_info *mci)
"details unavailable (multiple errors)");
if (cnt_dbe)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
- cnt_sbe, /* error count */
+ cnt_dbe, /* error count */
0, 0, 0, /* pfn, offset, syndrome */
-1, -1, -1, /* top, mid, low layer */
mci->ctl_name,
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index fc1153ab1ebb..b8a7d9594afd 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -464,7 +464,7 @@ static void dmc520_init_csrow(struct mem_ctl_info *mci)
dimm->grain = pvt->mem_width_in_bytes;
dimm->dtype = dt;
dimm->mtype = mt;
- dimm->edac_mode = EDAC_FLAG_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
dimm->nr_pages = pages_per_rank / csi->nr_channels;
}
}
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 7e7146b22c16..7d08627e738b 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -782,7 +782,7 @@ static void init_csrows(struct mem_ctl_info *mci)
for (j = 0; j < csi->nr_channels; j++) {
dimm = csi->channels[j]->dimm;
- dimm->edac_mode = EDAC_FLAG_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
dimm->mtype = p_data->get_mtype(priv->baseaddr);
dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
dimm->grain = SYNPS_EDAC_ERR_GRAIN;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 220a58cf0a44..cda7d7162cbb 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -203,10 +203,7 @@ config INTEL_STRATIX10_RSU
Say Y here if you want Intel RSU support.
config QCOM_SCM
- tristate "Qcom SCM driver"
- depends on ARM || ARM64
- depends on HAVE_ARM_SMCCC
- select RESET_CONTROLLER
+ tristate
config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
bool "Qualcomm download mode enabled by default"
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 00fe595a5bc8..641a91819088 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -49,6 +49,13 @@ static int ffa_device_probe(struct device *dev)
return ffa_drv->probe(ffa_dev);
}
+static void ffa_device_remove(struct device *dev)
+{
+ struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
+
+ ffa_drv->remove(to_ffa_dev(dev));
+}
+
static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
@@ -86,6 +93,7 @@ struct bus_type ffa_bus_type = {
.name = "arm_ffa",
.match = ffa_device_match,
.probe = ffa_device_probe,
+ .remove = ffa_device_remove,
.uevent = ffa_device_uevent,
.dev_groups = ffa_device_attributes_groups,
};
@@ -127,7 +135,7 @@ static void ffa_release_device(struct device *dev)
static int __ffa_devices_unregister(struct device *dev, void *data)
{
- ffa_release_device(dev);
+ device_unregister(dev);
return 0;
}
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index 7f4d2435503b..3d7081e84853 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -68,7 +68,7 @@ config ARM_SCMI_TRANSPORT_SMC
config ARM_SCMI_TRANSPORT_VIRTIO
bool "SCMI transport based on VirtIO"
- depends on VIRTIO
+ depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
select ARM_SCMI_HAVE_TRANSPORT
select ARM_SCMI_HAVE_MSG
help
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index 224577f86928..11e8efb71375 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -110,18 +110,16 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
if (vioch->is_rx) {
scmi_vio_feed_vq_rx(vioch, msg);
} else {
- unsigned long flags;
-
- spin_lock_irqsave(&vioch->lock, flags);
+ /* Here IRQs are assumed to be already disabled by the caller */
+ spin_lock(&vioch->lock);
list_add(&msg->list, &vioch->free_list);
- spin_unlock_irqrestore(&vioch->lock, flags);
+ spin_unlock(&vioch->lock);
}
}
static void scmi_vio_complete_cb(struct virtqueue *vqueue)
{
unsigned long ready_flags;
- unsigned long flags;
unsigned int length;
struct scmi_vio_channel *vioch;
struct scmi_vio_msg *msg;
@@ -140,7 +138,8 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
goto unlock_ready_out;
}
- spin_lock_irqsave(&vioch->lock, flags);
+ /* IRQs already disabled here no need to irqsave */
+ spin_lock(&vioch->lock);
if (cb_enabled) {
virtqueue_disable_cb(vqueue);
cb_enabled = false;
@@ -151,7 +150,7 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
goto unlock_out;
cb_enabled = true;
}
- spin_unlock_irqrestore(&vioch->lock, flags);
+ spin_unlock(&vioch->lock);
if (msg) {
msg->rx_len = length;
@@ -161,11 +160,18 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
scmi_finalize_message(vioch, msg);
}
+ /*
+ * Release ready_lock and re-enable IRQs between loop iterations
+ * to allow virtio_chan_free() to possibly kick in and set the
+ * flag vioch->ready to false even in between processing of
+ * messages, so as to force outstanding messages to be ignored
+ * when system is shutting down.
+ */
spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
}
unlock_out:
- spin_unlock_irqrestore(&vioch->lock, flags);
+ spin_unlock(&vioch->lock);
unlock_ready_out:
spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
}
@@ -384,8 +390,11 @@ static int scmi_vio_probe(struct virtio_device *vdev)
struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
/* Only one SCMI VirtiO device allowed */
- if (scmi_vdev)
- return -EINVAL;
+ if (scmi_vdev) {
+ dev_err(dev,
+ "One SCMI Virtio device was already initialized: only one allowed.\n");
+ return -EBUSY;
+ }
have_vq_rx = scmi_vio_have_vq_rx(vdev);
vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
@@ -428,16 +437,25 @@ static int scmi_vio_probe(struct virtio_device *vdev)
}
vdev->priv = channels;
- scmi_vdev = vdev;
+ /* Ensure initialized scmi_vdev is visible */
+ smp_store_mb(scmi_vdev, vdev);
return 0;
}
static void scmi_vio_remove(struct virtio_device *vdev)
{
+ /*
+ * Once we get here, virtio_chan_free() will have already been called by
+ * the SCMI core for any existing channel and, as a consequence, all the
+ * virtio channels will have been already marked NOT ready, causing any
+ * outstanding message on any vqueue to be ignored by complete_cb: now
+ * we can just stop processing buffers and destroy the vqueues.
+ */
vdev->config->reset(vdev);
vdev->config->del_vqs(vdev);
- scmi_vdev = NULL;
+ /* Ensure scmi_vdev is visible as NULL */
+ smp_store_mb(scmi_vdev, NULL);
}
static int scmi_vio_validate(struct virtio_device *vdev)
@@ -476,7 +494,7 @@ static int __init virtio_scmi_init(void)
return register_virtio_driver(&virtio_scmi_driver);
}
-static void __exit virtio_scmi_exit(void)
+static void virtio_scmi_exit(void)
{
unregister_virtio_driver(&virtio_scmi_driver);
}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 73bdbd207e7a..6ec8edec6329 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -25,8 +25,6 @@
#include <acpi/ghes.h>
#include <ras/ras_event.h>
-static char rcd_decode_str[CPER_REC_LEN];
-
/*
* CPER record ID need to be unique even after reboot, because record
* ID is used as index for ERST storage, while CPER records from
@@ -312,6 +310,7 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
struct cper_mem_err_compact *cmem)
{
const char *ret = trace_seq_buffer_ptr(p);
+ char rcd_decode_str[CPER_REC_LEN];
if (cper_mem_err_location(cmem, rcd_decode_str))
trace_seq_printf(p, "%s", rcd_decode_str);
@@ -326,6 +325,7 @@ static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
int len)
{
struct cper_mem_err_compact cmem;
+ char rcd_decode_str[CPER_REC_LEN];
/* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
if (len == sizeof(struct cper_sec_mem_err_old) &&
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 365c3a43a198..fe567be0f118 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -271,7 +271,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
return status;
}
- efi_info("Exiting boot services and installing virtual address map...\n");
+ efi_info("Exiting boot services...\n");
map.map = &memory_map;
status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX);
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 1410beaef5c3..f3e54f6616f0 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -414,7 +414,7 @@ static void virt_efi_reset_system(int reset_type,
unsigned long data_size,
efi_char16_t *data)
{
- if (down_interruptible(&efi_runtime_lock)) {
+ if (down_trylock(&efi_runtime_lock)) {
pr_warn("failed to invoke the reset_system() runtime service:\n"
"could not get exclusive access to the firmware\n");
return;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index c99b78ee008a..f86666cf2c6a 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1019,16 +1019,18 @@ create_feature_instance(struct build_feature_devs_info *binfo,
{
unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo;
+ u8 revision = 0;
int ret;
- u8 revision;
u64 v;
- v = readq(binfo->ioaddr + ofst);
- revision = FIELD_GET(DFH_REVISION, v);
+ if (fid != FEATURE_ID_AFU) {
+ v = readq(binfo->ioaddr + ofst);
+ revision = FIELD_GET(DFH_REVISION, v);
- /* read feature size and id if inputs are invalid */
- size = size ? size : feature_size(v);
- fid = fid ? fid : feature_id(v);
+ /* read feature size and id if inputs are invalid */
+ size = size ? size : feature_size(v);
+ fid = fid ? fid : feature_id(v);
+ }
if (binfo->len - ofst < size)
return -EINVAL;
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 69dec5af23c3..029d3cdb918d 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -192,12 +192,19 @@ static const struct of_device_id ice40_fpga_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ice40_fpga_of_match);
+static const struct spi_device_id ice40_fpga_spi_ids[] = {
+ { .name = "ice40-fpga-mgr", },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, ice40_fpga_spi_ids);
+
static struct spi_driver ice40_fpga_driver = {
.probe = ice40_fpga_probe,
.driver = {
.name = "ice40spi",
.of_match_table = of_match_ptr(ice40_fpga_of_match),
},
+ .id_table = ice40_fpga_spi_ids,
};
module_spi_driver(ice40_fpga_driver);
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index 1afb41aa20d7..ea2ec3c6815c 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
goto fail;
get_status(spi, &status);
- if (test_bit(FAIL, &status))
+ if (test_bit(FAIL, &status)) {
+ ret = -EINVAL;
goto fail;
+ }
dump_status_reg(&status);
spi_message_init(&msg);
@@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
dump_status_reg(&status);
if (!test_bit(DONE, &status)) {
machxo2_cleanup(mgr);
+ ret = -EINVAL;
goto fail;
}
@@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
break;
if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
machxo2_cleanup(mgr);
+ ret = -EINVAL;
goto fail;
}
} while (1);
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 05637d585152..4a55cdf089d6 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -174,6 +174,13 @@ static int gen_74x164_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id gen_74x164_spi_ids[] = {
+ { .name = "74hc595" },
+ { .name = "74lvc594" },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, gen_74x164_spi_ids);
+
static const struct of_device_id gen_74x164_dt_ids[] = {
{ .compatible = "fairchild,74hc595" },
{ .compatible = "nxp,74lvc594" },
@@ -188,6 +195,7 @@ static struct spi_driver gen_74x164_driver = {
},
.probe = gen_74x164_probe,
.remove = gen_74x164_remove,
+ .id_table = gen_74x164_spi_ids,
};
module_spi_driver(gen_74x164_driver);
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 10f303d15225..3d6ef37a7702 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
reg = ioread32(bank_reg(data, bank, reg_irq_status));
for_each_set_bit(p, &reg, 32)
- generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
+ generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
}
chained_irq_exit(ic, desc);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 0a9d746a0fe0..d26bff29157b 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -476,10 +476,19 @@ static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC];
static void gpio_mockup_unregister_pdevs(void)
{
+ struct platform_device *pdev;
+ struct fwnode_handle *fwnode;
int i;
- for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++)
- platform_device_unregister(gpio_mockup_pdevs[i]);
+ for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) {
+ pdev = gpio_mockup_pdevs[i];
+ if (!pdev)
+ continue;
+
+ fwnode = dev_fwnode(&pdev->dev);
+ platform_device_unregister(pdev);
+ fwnode_remove_software_node(fwnode);
+ }
}
static __init char **gpio_mockup_make_line_names(const char *label,
@@ -508,6 +517,7 @@ static int __init gpio_mockup_register_chip(int idx)
struct property_entry properties[GPIO_MOCKUP_MAX_PROP];
struct platform_device_info pdevinfo;
struct platform_device *pdev;
+ struct fwnode_handle *fwnode;
char **line_names = NULL;
char chip_label[32];
int prop = 0, base;
@@ -536,13 +546,18 @@ static int __init gpio_mockup_register_chip(int idx)
"gpio-line-names", line_names, ngpio);
}
+ fwnode = fwnode_create_software_node(properties, NULL);
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
pdevinfo.name = "gpio-mockup";
pdevinfo.id = idx;
- pdevinfo.properties = properties;
+ pdevinfo.fwnode = fwnode;
pdev = platform_device_register_full(&pdevinfo);
kfree_strarray(line_names, ngpio);
if (IS_ERR(pdev)) {
+ fwnode_remove_software_node(fwnode);
pr_err("error registering device");
return PTR_ERR(pdev);
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index f5cfc0698799..d2fe76f3f34f 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -468,15 +468,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
mutex_lock(&chip->i2c_lock);
ret = regmap_read(chip->regmap, inreg, &reg_val);
mutex_unlock(&chip->i2c_lock);
- if (ret < 0) {
- /*
- * NOTE:
- * diagnostic already emitted; that's all we should
- * do unless gpio_*_value_cansleep() calls become different
- * from their nonsleeping siblings (and report faults).
- */
- return 0;
- }
+ if (ret < 0)
+ return ret;
return !!(reg_val & bit);
}
@@ -566,21 +559,21 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
mutex_lock(&chip->i2c_lock);
- /* Disable pull-up/pull-down */
- ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
- if (ret)
- goto exit;
-
/* Configure pull-up/pull-down */
if (config == PIN_CONFIG_BIAS_PULL_UP)
ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit);
else if (config == PIN_CONFIG_BIAS_PULL_DOWN)
ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0);
+ else
+ ret = 0;
if (ret)
goto exit;
- /* Enable pull-up/pull-down */
- ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
+ /* Disable/Enable pull-up/pull-down */
+ if (config == PIN_CONFIG_BIAS_DISABLE)
+ ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
+ else
+ ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
exit:
mutex_unlock(&chip->i2c_lock);
@@ -594,7 +587,9 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_DISABLE:
return pca953x_gpio_set_pull_up_down(chip, offset, config);
default:
return -ENOTSUPP;
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 036b2d959503..ce63cbd14d69 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -141,7 +141,7 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip,
u32 data;
data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr);
- if (data & BIT(offset))
+ if (data)
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
@@ -195,7 +195,7 @@ static int rockchip_gpio_set_debounce(struct gpio_chip *gc,
unsigned int cur_div_reg;
u64 div;
- if (!IS_ERR(bank->db_clk)) {
+ if (bank->gpio_type == GPIO_TYPE_V2 && !IS_ERR(bank->db_clk)) {
div_debounce_support = true;
freq = clk_get_rate(bank->db_clk);
max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq;
@@ -689,6 +689,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
struct device_node *pctlnp = of_get_parent(np);
struct pinctrl_dev *pctldev = NULL;
struct rockchip_pin_bank *bank = NULL;
+ struct rockchip_pin_output_deferred *cfg;
static int gpio;
int id, ret;
@@ -716,12 +717,33 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /*
+ * Prevent clashes with a deferred output setting
+ * being added right at this moment.
+ */
+ mutex_lock(&bank->deferred_lock);
+
ret = rockchip_gpiolib_register(bank);
if (ret) {
clk_disable_unprepare(bank->clk);
+ mutex_unlock(&bank->deferred_lock);
return ret;
}
+ while (!list_empty(&bank->deferred_output)) {
+ cfg = list_first_entry(&bank->deferred_output,
+ struct rockchip_pin_output_deferred, head);
+ list_del(&cfg->head);
+
+ ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
+ if (ret)
+ dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);
+
+ kfree(cfg);
+ }
+
+ mutex_unlock(&bank->deferred_lock);
+
platform_set_drvdata(pdev, bank);
dev_info(dev, "probed %pOF\n", np);
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index f99f3c10bed0..39dca147d587 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -184,7 +184,7 @@ static void uniphier_gpio_irq_mask(struct irq_data *data)
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
- return irq_chip_mask_parent(data);
+ irq_chip_mask_parent(data);
}
static void uniphier_gpio_irq_unmask(struct irq_data *data)
@@ -194,7 +194,7 @@ static void uniphier_gpio_irq_unmask(struct irq_data *data)
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
- return irq_chip_unmask_parent(data);
+ irq_chip_unmask_parent(data);
}
static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 411525ac4cc4..47712b6903b5 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -313,9 +313,11 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
if (ret)
- gpiochip_free_own_desc(desc);
+ dev_warn(chip->parent,
+ "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
+ pin, ret);
- return ret ? ERR_PTR(ret) : desc;
+ return desc;
}
static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index dc3c6b3a00e5..269437b01328 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -758,7 +758,7 @@ enum amd_hw_ip_block_type {
MAX_HWIP
};
-#define HWIP_MAX_INSTANCE 8
+#define HWIP_MAX_INSTANCE 10
struct amd_powerplay {
void *pp_handle;
@@ -1087,6 +1087,7 @@ struct amdgpu_device {
bool no_hw_access;
struct pci_saved_state *pci_state;
+ pci_channel_state_t pci_channel_state;
struct amdgpu_reset_control *reset_cntl;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 3003ee1c9487..1d41c2c00623 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
kgd2kfd_suspend(adev->kfd.dev, run_pm);
}
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd.dev)
+ r = kgd2kfd_resume_iommu(adev->kfd.dev);
+
+ return r;
+}
+
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{
int r = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index ec028cf963f5..3bc52b2c604f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
@@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
@@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
}
+static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 2d6b2d77b738..054c1a224def 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -563,6 +563,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
+ kfree(ttm->sg);
ttm->sg = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 277128846dd1..463b9c0283f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
struct dentry *ent;
int r, i;
-
-
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt);
- if (!ent) {
+ if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
- return -EIO;
+ return PTR_ERR(ent);
}
ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
&fops_sclk_set);
- if (!ent) {
+ if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
- return -EIO;
+ return PTR_ERR(ent);
}
/* Register debugfs entries for amdgpu_ttm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 41c6b3aacd37..af9bdf16eefd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2432,6 +2432,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.pending_reset)
amdgpu_amdkfd_device_init(adev);
+ r = amdgpu_amdkfd_resume_iommu(adev);
+ if (r)
+ goto init_failed;
+
amdgpu_fru_get_product_info(adev);
init_failed:
@@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{
int r;
+ r = amdgpu_amdkfd_resume_iommu(adev);
+ if (r)
+ return r;
+
r = amdgpu_device_ip_resume_phase1(adev);
if (r)
return r;
@@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
dev_warn(tmp_adev->dev, "asic atom init failed!");
} else {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_amdkfd_resume_iommu(tmp_adev);
+ if (r)
+ goto out;
+
r = amdgpu_device_ip_resume_phase1(tmp_adev);
if (r)
goto out;
@@ -5387,6 +5399,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
return PCI_ERS_RESULT_DISCONNECT;
}
+ adev->pci_channel_state = state;
+
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
@@ -5529,6 +5543,10 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
DRM_INFO("PCI error: resume callback!!\n");
+ /* Only continue execution for the case of pci_channel_io_frozen */
+ if (adev->pci_channel_state != pci_channel_io_frozen)
+ return;
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 7a7316731911..dc50c05f23fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -837,6 +837,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
return 0;
}
+/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
+static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
+{
+ u64 micro_tile_mode;
+
+ /* Zero swizzle mode means linear */
+ if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
+ return 0;
+
+ micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
+ switch (micro_tile_mode) {
+ case 0: /* DISPLAY */
+ case 3: /* RENDER */
+ return 0;
+ default:
+ drm_dbg_kms(afb->base.dev,
+ "Micro tile mode %llu not supported for scanout\n",
+ micro_tile_mode);
+ return -EINVAL;
+ }
+}
+
static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
unsigned int *width, unsigned int *height)
{
@@ -1103,6 +1125,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
+ struct amdgpu_device *adev = drm_to_adev(dev);
int ret, i;
/*
@@ -1122,6 +1145,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret)
return ret;
+ if (!dev->mode_config.allow_fb_modifiers) {
+ drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
+ "GFX9+ requires FB check based on format modifier\n");
+ ret = check_tiling_flags_gfx6(rfb);
+ if (ret)
+ return ret;
+ }
+
if (dev->mode_config.allow_fb_modifiers &&
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
ret = convert_tiling_flags_to_modifier(rfb);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index e7f06bd0f0cd..1916ec84dd71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -31,6 +31,8 @@
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
+#define GFX_OFF_NO_DELAY 0
+
/*
* GPU GFX IP block helpers function.
*/
@@ -558,6 +560,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
{
+ unsigned long delay = GFX_OFF_DELAY_ENABLE;
+
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return;
@@ -573,8 +577,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
adev->gfx.gfx_off_req_count--;
- if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+ if (adev->gfx.gfx_off_req_count == 0 &&
+ !adev->gfx.gfx_off_state) {
+ /* If going to s2idle, no need to wait */
+ if (adev->in_s0ix)
+ delay = GFX_OFF_NO_DELAY;
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ delay);
+ }
} else {
if (adev->gfx.gfx_off_req_count == 0) {
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index c7797eac83c3..9ff600a38559 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
break;
default:
adev->gmc.tmz_enabled = false;
- dev_warn(adev->dev,
+ dev_info(adev->dev,
"Trusted Memory Zone (TMZ) feature not supported\n");
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index dc44c946a244..98732518543e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -757,7 +757,7 @@ Out:
return res;
}
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void)
+uint32_t amdgpu_ras_eeprom_max_record_count(void)
{
return RAS_MAX_RECORD_COUNT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index f95fc61b3021..6bb00578bfbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records, const u32 num);
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void);
+uint32_t amdgpu_ras_eeprom_max_record_count(void);
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 7b634a1517f9..0554576d3695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops);
- if (!ent)
- return -ENOMEM;
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 38dade421d46..94126dc39688 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
goto out;
}
+ if (bo->type == ttm_bo_type_device &&
+ new_mem->mem_type == TTM_PL_VRAM &&
+ old_mem->mem_type != TTM_PL_VRAM) {
+ /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+ * accesses the BO after it's moved.
+ */
+ abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ }
+
if (adev->mman.buffer_funcs_enabled) {
if (((old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) ||
@@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
- if (bo->type == ttm_bo_type_device &&
- new_mem->mem_type == TTM_PL_VRAM &&
- old_mem->mem_type != TTM_PL_VRAM) {
- /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
- * accesses the BO after it's moved.
- */
- abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- }
-
out:
/* update statistics */
atomic64_add(bo->base.size, &adev->num_bytes_moved);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 603c259b073b..025184a556ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3599,7 +3599,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
/* set static priority for a queue/ring */
gfx_v9_0_mqd_set_priority(ring, mqd);
- mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+ mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 41c3a0d70b7c..e47104a1f559 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1098,6 +1098,8 @@ static int gmc_v10_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v10_0_gart_disable(adev);
+
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -1106,7 +1108,6 @@ static int gmc_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
- gmc_v10_0_gart_disable(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index d90c16a6b2b8..5551359d5dfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1794,6 +1794,8 @@ static int gmc_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v9_0_gart_disable(adev);
+
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -1802,7 +1804,6 @@ static int gmc_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
- gmc_v9_0_gart_disable(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 779f5c911e11..e32efcfb0c8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -868,6 +868,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
+ /* TODO: check whether can submit a doorbell request to raise
+ * a doorbell fence to exit gfxoff.
+ */
+ if (adev->in_s0ix)
+ amdgpu_gfx_off_ctrl(adev, false);
+
sdma_v5_2_soft_reset(adev);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
@@ -876,6 +882,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
/* start the gfx rings and rlc compute queues */
r = sdma_v5_2_gfx_resume(adev);
+ if (adev->in_s0ix)
+ amdgpu_gfx_off_ctrl(adev, true);
if (r)
return r;
r = sdma_v5_2_rlc_resume(adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 16a57b70cc1a..4a416231b24c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 145,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 145,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 145,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 4,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
@@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = {
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
+ .no_atomic_fw_version = 92,
.num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
@@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
if (!kfd)
return NULL;
- /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
- * 32 and 64-bit requests are possible and must be
- * supported.
- */
- kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
- if (device_info->needs_pci_atomics &&
- !kfd->pci_atomic_requested) {
- dev_info(kfd_device,
- "skipped device %x:%x, PCI rejects atomics\n",
- pdev->vendor, pdev->device);
- kfree(kfd);
- return NULL;
- }
-
kfd->kgd = kgd;
kfd->device_info = device_info;
kfd->pdev = pdev;
@@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- kfd->vm_info.first_vmid_kfd + 1;
+ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+ * 32 and 64-bit requests are possible and must be
+ * supported.
+ */
+ kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
+ if (!kfd->pci_atomic_requested &&
+ kfd->device_info->needs_pci_atomics &&
+ (!kfd->device_info->no_atomic_fw_version ||
+ kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
+ dev_info(kfd_device,
+ "skipped device %x:%x, PCI rejects atomics %d<%d\n",
+ kfd->pdev->vendor, kfd->pdev->device,
+ kfd->mec_fw_version,
+ kfd->device_info->no_atomic_fw_version);
+ return false;
+ }
+
/* Verify module parameters regarding mapped process number*/
if ((hws_max_conc_proc < 0)
|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
@@ -959,7 +971,6 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
- svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
@@ -1057,31 +1068,29 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret;
}
-static int kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
int err = 0;
err = kfd_iommu_resume(kfd);
- if (err) {
+ if (err)
dev_err(kfd_device,
"Failed to resume IOMMU for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
- return err;
- }
+ return err;
+}
+
+static int kfd_resume(struct kfd_dev *kfd)
+{
+ int err = 0;
err = kfd->dqm->ops.start(kfd->dqm);
- if (err) {
+ if (err)
dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
- goto dqm_start_error;
- }
return err;
-
-dqm_start_error:
- kfd_iommu_suspend(kfd);
- return err;
}
static inline void kfd_queue_work(struct workqueue_struct *wq,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index dab290a4d19d..4a16e3c257b9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -891,9 +891,16 @@ int svm_migrate_init(struct amdgpu_device *adev)
pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+
+ /* Device manager releases device-specific resources, memory region and
+ * pgmap when driver disconnects from device.
+ */
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
+
+ /* Disable SVM support capability */
+ pgmap->type = 0;
devm_release_mem_region(adev->dev, res->start,
res->end - res->start + 1);
return PTR_ERR(r);
@@ -908,12 +915,3 @@ int svm_migrate_init(struct amdgpu_device *adev)
return 0;
}
-
-void svm_migrate_fini(struct amdgpu_device *adev)
-{
- struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap;
-
- devm_memunmap_pages(adev->dev, pgmap);
- devm_release_mem_region(adev->dev, pgmap->range.start,
- pgmap->range.end - pgmap->range.start + 1);
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index 0de76b5d4973..2f5b3394c9ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -47,7 +47,6 @@ unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
int svm_migrate_init(struct amdgpu_device *adev);
-void svm_migrate_fini(struct amdgpu_device *adev);
#else
@@ -55,10 +54,6 @@ static inline int svm_migrate_init(struct amdgpu_device *adev)
{
return 0;
}
-static inline void svm_migrate_fini(struct amdgpu_device *adev)
-{
- /* empty */
-}
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index ab83b0de6b22..6d8f9bb2d905 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -207,6 +207,7 @@ struct kfd_device_info {
bool supports_cwsr;
bool needs_iommu_device;
bool needs_pci_atomics;
+ uint32_t no_atomic_fw_version;
unsigned int num_sdma_engines;
unsigned int num_xgmi_sdma_engines;
unsigned int num_sdma_queues_per_engine;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9fc8021bb0ab..9d0f65a90002 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -118,6 +118,13 @@ static void svm_range_remove_notifier(struct svm_range *prange)
mmu_interval_notifier_remove(&prange->notifier);
}
+static bool
+svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr && !dma_mapping_error(dev, dma_addr) &&
+ !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
+}
+
static int
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long offset, unsigned long npages,
@@ -139,8 +146,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
addr += offset;
for (i = 0; i < npages; i++) {
- if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
- "leaking dma mapping\n"))
+ if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
page = hmm_pfn_to_page(hmm_pfns[i]);
@@ -209,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
return;
for (i = offset; i < offset + npages; i++) {
- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+ if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
continue;
pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
@@ -1165,7 +1171,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned long last_start;
int last_domain;
int r = 0;
- int64_t i;
+ int64_t i, j;
last_start = prange->start + offset;
@@ -1178,7 +1184,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
for (i = offset; i < offset + npages; i++) {
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
- if ((prange->start + i) < prange->last &&
+
+ /* Collect all pages in the same address range and memory domain
+ * that can be mapped with a single call to update mapping.
+ */
+ if (i < offset + npages - 1 &&
last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
continue;
@@ -1201,6 +1211,10 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
NULL, dma_addr,
&vm->last_update,
&table_freed);
+
+ for (j = last_start - prange->start; j <= i; j++)
+ dma_addr[j] |= last_domain;
+
if (r) {
pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
goto out;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 7dffc04a557e..127667e549c1 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -25,6 +25,8 @@ config DRM_AMD_DC_HDCP
config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs"
+ depends on DRM_AMDGPU_SI
+ depends on DRM_AMD_DC
default n
help
Choose this option to enable new AMD DC support for SI asics
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9b1fc54555ee..1ea31dcc7a8b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
uint32_t agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+ memset(pa_config, 0, sizeof(*pa_config));
+
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
@@ -1113,6 +1115,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.pci_revision_id = adev->pdev->revision;
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+ init_data.asic_id.chip_id = adev->pdev->device;
init_data.asic_id.vram_width = adev->gmc.vram_width;
/* TODO: initialize init_data.asic_id.vram_type here!!!! */
@@ -1717,6 +1720,7 @@ static int dm_late_init(void *handle)
linear_lut[i] = 0xFFFF * i / 15;
params.set = 0;
+ params.backlight_ramping_override = false;
params.backlight_ramping_start = 0xCCCC;
params.backlight_ramping_reduction = 0xCCCCCCCC;
params.backlight_lut_array_size = 16;
@@ -6024,21 +6028,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
return 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
+ if (dm->vblank_control_workqueue) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
- INIT_WORK(&work->work, vblank_control_worker);
- work->dm = dm;
- work->acrtc = acrtc;
- work->enable = enable;
+ INIT_WORK(&work->work, vblank_control_worker);
+ work->dm = dm;
+ work->acrtc = acrtc;
+ work->enable = enable;
- if (acrtc_state->stream) {
- dc_stream_retain(acrtc_state->stream);
- work->stream = acrtc_state->stream;
- }
+ if (acrtc_state->stream) {
+ dc_stream_retain(acrtc_state->stream);
+ work->stream = acrtc_state->stream;
+ }
- queue_work(dm->vblank_control_workqueue, &work->work);
+ queue_work(dm->vblank_control_workqueue, &work->work);
+ }
#endif
return 0;
@@ -6792,14 +6798,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
#if defined(CONFIG_DRM_AMD_DC_DCN)
static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
- struct dc_state *dc_state)
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
struct dc_stream_state *stream = NULL;
struct drm_connector *connector;
struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
- int i, j, clock, bpp;
+ int i, j, clock;
int vcpi, pbn_div, pbn = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6838,9 +6845,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
}
pbn_div = dm_mst_get_pbn_divider(stream->link);
- bpp = stream->timing.dsc_cfg.bits_per_pixel;
clock = stream->timing.pix_clk_100hz / 10;
- pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+ /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (vars[j].aconnector == aconnector) {
+ pbn = vars[j].pbn;
+ break;
+ }
+ }
+
vcpi = drm_dp_mst_atomic_enable_dsc(state,
aconnector->port,
pbn, pbn_div,
@@ -7519,6 +7532,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
}
}
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+ const struct drm_display_mode *native_mode;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ return;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+ if (!encoder)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ native_mode = &amdgpu_encoder->native_mode;
+ if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+ return;
+
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ native_mode->hdisplay,
+ native_mode->vdisplay);
+}
+
static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
struct edid *edid)
{
@@ -7547,6 +7586,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, edid);
+
+ amdgpu_set_panel_orientation(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
}
@@ -8058,8 +8099,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
- * hot-plug, headless s3, dpms
+ /* Stream removed and re-enabled
+ *
+ * Can sometimes overlap with the HPD case,
+ * thus set update_hdcp to false to avoid
+ * setting HDCP multiple times.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (!(old_state->crtc && old_state->crtc->enabled) &&
+ state->crtc && state->crtc->enabled &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ dm_con_state->update_hdcp = false;
+ return true;
+ }
+
+ /* Hot-plug, headless s3, dpms
+ *
+ * Only start HDCP if the display is connected/enabled.
+ * update_hdcp flag will be set to false until the next
+ * HPD comes in.
*
* Handles: DESIRED -> DESIRED (Special case)
*/
@@ -8648,7 +8707,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* If PSR or idle optimizations are enabled then flush out
* any pending work before hardware programming.
*/
- flush_workqueue(dm->vblank_control_workqueue);
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
#endif
bundle->stream_update.stream = acrtc_state->stream;
@@ -8983,7 +9043,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* if there mode set or reset, disable eDP PSR */
if (mode_set_reset_required) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
- flush_workqueue(dm->vblank_control_workqueue);
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
#endif
amdgpu_dm_psr_disable_all(dm);
}
@@ -10243,6 +10304,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int ret, i;
bool lock_and_validation_needed = false;
struct dm_crtc_state *dm_old_crtc_state;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+#endif
trace_amdgpu_dm_atomic_check_begin(state);
@@ -10473,10 +10537,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
goto fail;
- ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
if (ret)
goto fail;
#endif
@@ -10492,7 +10556,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
status = dc_validate_global_state(dc, dm_state->context, false);
if (status != DC_OK) {
- DC_LOG_WARNING("DC global validation failure: %s (%d)",
+ drm_dbg_atomic(dev,
+ "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 1bcba6943fd7..7af0d58c231b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -518,12 +518,7 @@ struct dsc_mst_fairness_params {
uint32_t num_slices_h;
uint32_t num_slices_v;
uint32_t bpp_overwrite;
-};
-
-struct dsc_mst_fairness_vars {
- int pbn;
- bool dsc_enabled;
- int bpp_x16;
+ struct amdgpu_dm_connector *aconnector;
};
static int kbps_to_peak_pbn(int kbps)
@@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
- struct dc_link *dc_link)
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_vars *vars)
{
int i;
struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES];
- struct dsc_mst_fairness_vars vars[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
int count = 0;
bool debugfs_overwrite = false;
@@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].timing = &stream->timing;
params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ params[count].aconnector = aconnector;
params[count].port = aconnector->port;
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
@@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
}
/* Try no compression */
for (i = 0; i < count; i++) {
+ vars[i].aconnector = params[i].aconnector;
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i].dsc_enabled = false;
vars[i].bpp_x16 = 0;
@@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
}
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state)
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
{
int i, j;
struct dc_stream_state *stream;
@@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
return false;
mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index b38bd68121ce..900d3f7a8498 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -39,8 +39,17 @@ void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
#if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_vars {
+ int pbn;
+ bool dsc_enabled;
+ int bpp_x16;
+ struct amdgpu_dm_connector *aconnector;
+};
+
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
- struct dc_state *dc_state);
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
index c9f47d167472..b1bf80da3a55 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
@@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void)
depth = *pcpu;
put_cpu_ptr(&fpu_recursion_depth);
- ASSERT(depth > 1);
+ ASSERT(depth >= 1);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 8bd7f42a8053..1e44b13c1c7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
int dc_link_get_backlight_level(const struct dc_link *link)
{
-
struct abm *abm = get_abm_from_stream_res(link);
+ struct panel_cntl *panel_cntl = link->panel_cntl;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ bool fw_set_brightness = true;
- if (abm == NULL || abm->funcs->get_current_backlight == NULL)
- return DC_ERROR_UNEXPECTED;
+ if (dmcu)
+ fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
- return (int) abm->funcs->get_current_backlight(abm);
+ if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+ return panel_cntl->funcs->get_current_backlight(panel_cntl);
+ else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+ return (int) abm->funcs->get_current_backlight(abm);
+ else
+ return DC_ERROR_UNEXPECTED;
}
int dc_link_get_target_backlight_pwm(const struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 330edd666b7d..6d655e158267 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1,4 +1,26 @@
-/* Copyright 2015 Advanced Micro Devices, Inc. */
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
#include "dm_services.h"
#include "dc.h"
#include "dc_link_dp.h"
@@ -1284,12 +1306,6 @@ static void override_training_settings(
{
uint32_t lane;
- /* Override link settings */
- if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
- lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate;
- if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN)
- lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count;
-
/* Override link spread */
if (!link->dp_ss_off && overrides->downspread != NULL)
lt_settings->link_settings.link_spread = *overrides->downspread ?
@@ -1804,14 +1820,13 @@ bool perform_link_training_with_retries(
if (panel_mode == DP_PANEL_MODE_EDP) {
struct cp_psp *cp_psp = &stream->ctx->cp_psp;
- if (cp_psp && cp_psp->funcs.enable_assr) {
- if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
- /* since eDP implies ASSR on, change panel
- * mode to disable ASSR
- */
- panel_mode = DP_PANEL_MODE_DEFAULT;
- }
- }
+ if (cp_psp && cp_psp->funcs.enable_assr)
+ /* ASSR is bound to fail with unsigned PSP
+ * verstage used during devlopment phase.
+ * Report and continue with eDP panel mode to
+ * perform eDP link training with right settings
+ */
+ cp_psp->funcs.enable_assr(cp_psp->handle, link);
}
#endif
@@ -1840,9 +1855,13 @@ bool perform_link_training_with_retries(
dp_disable_link_phy(link, signal);
/* Abort link training if failure due to sink being unplugged. */
- if (status == LINK_TRAINING_ABORT)
- break;
- else if (do_fallback) {
+ if (status == LINK_TRAINING_ABORT) {
+ enum dc_connection_type type = dc_connection_none;
+
+ dc_link_detect_sink(link, &type);
+ if (type == dc_connection_none)
+ break;
+ } else if (do_fallback) {
decide_fallback_link_setting(*link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets
* stream requirements.
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index e14f99b4b0c3..3c3347341103 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -42,7 +42,7 @@
#define DC_LOGGER \
engine->ctx->logger
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
#define IS_DC_I2CAUX_LOGGING_ENABLED() (false)
#define LOG_FLAG_Error_I2cAux LOG_ERROR
#define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX
@@ -76,7 +76,7 @@ enum {
#define DEFAULT_AUX_ENGINE_MULT 0
#define DEFAULT_AUX_ENGINE_LENGTH 69
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
static void release_engine(
struct dce_aux *engine)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index e92339235863..e8570060d007 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -49,7 +49,6 @@
static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
{
uint64_t current_backlight;
- uint32_t round_result;
uint32_t bl_period, bl_int_count;
uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1;
- current_backlight = (uint64_t)(current_backlight) * bl_period;
-
- round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
- round_result = (round_result >> (bl_int_count-1)) & 1;
-
- current_backlight >>= bl_int_count;
- current_backlight += round_result;
-
return (uint32_t)(current_backlight);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index d8b22618b79e..c337588231ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -118,6 +118,7 @@ struct dcn10_link_enc_registers {
uint32_t RDPCSTX_PHY_CNTL4;
uint32_t RDPCSTX_PHY_CNTL5;
uint32_t RDPCSTX_PHY_CNTL6;
+ uint32_t RDPCSPIPE_PHY_CNTL6;
uint32_t RDPCSTX_PHY_CNTL7;
uint32_t RDPCSTX_PHY_CNTL8;
uint32_t RDPCSTX_PHY_CNTL9;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 90127c1f9e35..b0892443fbd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -37,6 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
+#include "dal_asic_id.h"
#define CTX \
enc10->base.ctx
@@ -62,6 +63,10 @@
#define AUX_REG_WRITE(reg_name, val) \
dm_write_reg(CTX, AUX_REG(reg_name), val)
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
void dcn31_link_encoder_set_dio_phy_mux(
struct link_encoder *enc,
enum encoder_type_select sel,
@@ -215,8 +220,8 @@ static const struct link_encoder_funcs dcn31_link_enc_funcs = {
.fec_is_active = enc2_fec_is_active,
.get_dig_frontend = dcn10_get_dig_frontend,
.get_dig_mode = dcn10_get_dig_mode,
- .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
- .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
+ .is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
};
@@ -404,3 +409,60 @@ void dcn31_link_encoder_disable_output(
}
}
+bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable;
+ bool is_usb_c_alt_mode = false;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ // [Note] no need to check hw_internal_rev once phy mux selection is ready
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ } else {
+ /*
+ * B0 phys use a new set of registers to check whether alt mode is disabled.
+ * if value == 1 alt mode is disabled, otherwise it is enabled.
+ */
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+ || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+ || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ } else {
+ // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ }
+ }
+
+ is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+ }
+
+ return is_usb_c_alt_mode;
+}
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t is_in_usb_c_dp4_mode = 0;
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+ /* in usb c dp2 mode, max lane count is 2 */
+ if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ // [Note] no need to check hw_internal_rev once phy mux selection is ready
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ } else {
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+ || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+ || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ } else {
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ }
+ }
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
index 32d146312838..3454f1e7c1f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
@@ -69,6 +69,7 @@
SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+ SRI(RDPCSPIPE_PHY_CNTL6, RDPCSPIPE, id), \
SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
@@ -115,7 +116,9 @@
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
- LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
@@ -243,4 +246,13 @@ void dcn31_link_encoder_disable_output(
struct link_encoder *enc,
enum signal_type signal);
+/*
+ * Check whether USB-C DP Alt mode is disabled
+ */
+bool dcn31_link_encoder_is_in_alt_mode(
+ struct link_encoder *enc);
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
#endif /* __DC_LINK_ENCODER__DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index a7702d3c75cd..0006bbac466c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -928,7 +928,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 7680,/*upto 8K*/
+ .max_downscale_src_width = 3840,/*upto 4K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
@@ -1284,6 +1284,12 @@ static struct stream_encoder *dcn31_stream_encoder_create(
if (!enc1 || !vpg || !afmt)
return NULL;
+ if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD))
+ eng_id = eng_id + 3; // For B0 only. C->F, D->G.
+ }
+
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 381c17caace1..5adc471bef57 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -227,7 +227,7 @@ enum {
#define FAMILY_YELLOW_CARP 146
#define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x02 // TODO: DCN31 - update with correct B0 ID
+#define YELLOW_CARP_B0 0x1A
#define YELLOW_CARP_UNKNOWN 0xFF
#ifndef ASICREV_IS_YELLOW_CARP
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
index 92caf8441d1e..01a56556cde1 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
@@ -11932,5 +11932,32 @@
#define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_RX_OVRD_OUT_2 0xe0c7
#define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_TX_OVRD_IN_2 0xe0c8
+//RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+
+//RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+
+//[Note] Hack. RDPCSPIPE only has 2 instances.
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6 0x2d73
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6 0x2e4b
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6 0x2d73
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6 0x2e4b
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6 0x2d73
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
index 8a08ecc34c69..4884a4e1f261 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
@@ -33,63 +33,47 @@
#define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging
#define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible
#define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible
-#define TABLE_COUNT 6
+#define TABLE_SMU_METRICS 6 // Called by Driver
+#define TABLE_COUNT 7
-#define NUM_DSPCLK_LEVELS 8
-#define NUM_SOCCLK_DPM_LEVELS 8
-#define NUM_DCEFCLK_DPM_LEVELS 4
-#define NUM_FCLK_DPM_LEVELS 4
-#define NUM_MEMCLK_DPM_LEVELS 4
+typedef struct SmuMetricsTable_t {
+ //CPU status
+ uint16_t CoreFrequency[6]; //[MHz]
+ uint32_t CorePower[6]; //[mW]
+ uint16_t CoreTemperature[6]; //[centi-Celsius]
+ uint16_t L3Frequency[2]; //[MHz]
+ uint16_t L3Temperature[2]; //[centi-Celsius]
+ uint16_t C0Residency[6]; //Percentage
-#define NUMBER_OF_PSTATES 8
-#define NUMBER_OF_CORES 8
+ // GFX status
+ uint16_t GfxclkFrequency; //[MHz]
+ uint16_t GfxTemperature; //[centi-Celsius]
-typedef enum {
- S3_TYPE_ENTRY,
- S5_TYPE_ENTRY,
-} Sleep_Type_e;
+ // SOC IP info
+ uint16_t SocclkFrequency; //[MHz]
+ uint16_t VclkFrequency; //[MHz]
+ uint16_t DclkFrequency; //[MHz]
+ uint16_t MemclkFrequency; //[MHz]
-typedef enum {
- GFX_OFF = 0,
- GFX_ON = 1,
-} GFX_Mode_e;
+ // power, VF info for CPU/GFX telemetry rails, and then socket power total
+ uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX
+ uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX
+ uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX
+ uint32_t CurrentSocketPower; //[mW]
-typedef enum {
- CPU_P0 = 0,
- CPU_P1,
- CPU_P2,
- CPU_P3,
- CPU_P4,
- CPU_P5,
- CPU_P6,
- CPU_P7
-} CPU_PState_e;
+ uint16_t SocTemperature; //[centi-Celsius]
+ uint16_t EdgeTemperature;
+ uint16_t ThrottlerStatus;
+ uint16_t Spare;
-typedef enum {
- CPU_CORE0 = 0,
- CPU_CORE1,
- CPU_CORE2,
- CPU_CORE3,
- CPU_CORE4,
- CPU_CORE5,
- CPU_CORE6,
- CPU_CORE7
-} CORE_ID_e;
+} SmuMetricsTable_t;
-typedef enum {
- DF_DPM0 = 0,
- DF_DPM1,
- DF_DPM2,
- DF_DPM3,
- DF_PState_Count
-} DF_PState_e;
-
-typedef enum {
- GFX_DPM0 = 0,
- GFX_DPM1,
- GFX_DPM2,
- GFX_DPM3,
- GFX_PState_Count
-} GFX_PState_e;
+typedef struct SmuMetrics_t {
+ SmuMetricsTable_t Current;
+ SmuMetricsTable_t Average;
+ uint32_t SampleStartTime;
+ uint32_t SampleStopTime;
+ uint32_t Accnt;
+} SmuMetrics_t;
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 6f1b1b50d527..18b862a90fbe 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -226,7 +226,10 @@
__SMU_DUMMY_MAP(SetUclkDpmMode), \
__SMU_DUMMY_MAP(LightSBR), \
__SMU_DUMMY_MAP(GfxDriverResetRecovery), \
- __SMU_DUMMY_MAP(BoardPowerCalibration),
+ __SMU_DUMMY_MAP(BoardPowerCalibration), \
+ __SMU_DUMMY_MAP(RequestGfxclk), \
+ __SMU_DUMMY_MAP(ForceGfxVid), \
+ __SMU_DUMMY_MAP(UnforceGfxVid),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
index 6e6088760b18..909a86aa60f3 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
@@ -65,6 +65,13 @@
#define PPSMC_MSG_SetDriverTableVMID 0x34
#define PPSMC_MSG_SetSoftMinCclk 0x35
#define PPSMC_MSG_SetSoftMaxCclk 0x36
-#define PPSMC_Message_Count 0x37
+#define PPSMC_MSG_GetGfxFrequency 0x37
+#define PPSMC_MSG_GetGfxVid 0x38
+#define PPSMC_MSG_ForceGfxFreq 0x39
+#define PPSMC_MSG_UnForceGfxFreq 0x3A
+#define PPSMC_MSG_ForceGfxVid 0x3B
+#define PPSMC_MSG_UnforceGfxVid 0x3C
+#define PPSMC_MSG_GetEnabledSmuFeatures 0x3D
+#define PPSMC_Message_Count 0x3E
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
index bdbbeb959c68..81f82aa05ec2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
@@ -6867,6 +6867,8 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
+ ni_update_current_ps(adev, boot_ps);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 3ab1ce4d3419..04863a797115 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu)
*/
if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+ (adev->asic_type <= CHIP_BEIGE_GOBY))
return smu_disable_all_features_with_exception(smu,
true,
SMU_FEATURE_COUNT);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index e343cc218990..082f01893f3d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = NULL;
uint32_t gen_speed, lane_width;
- if (amdgpu_ras_intr_triggered())
- return sysfs_emit(buf, "unavailable\n");
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
dpm_context = smu_dpm->dpm_context;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index b05f9541accc..3d4c65bc29dc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -44,6 +44,27 @@
#undef pr_info
#undef pr_debug
+/* unit: MHz */
+#define CYAN_SKILLFISH_SCLK_MIN 1000
+#define CYAN_SKILLFISH_SCLK_MAX 2000
+#define CYAN_SKILLFISH_SCLK_DEFAULT 1800
+
+/* unit: mV */
+#define CYAN_SKILLFISH_VDDC_MIN 700
+#define CYAN_SKILLFISH_VDDC_MAX 1129
+#define CYAN_SKILLFISH_VDDC_MAGIC 5118 // 0x13fe
+
+static struct gfx_user_settings {
+ uint32_t sclk;
+ uint32_t vddc;
+} cyan_skillfish_user_settings;
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+ FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+
static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
@@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0),
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
+ MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
+ MSG_MAP(RequestGfxclk, PPSMC_MSG_RequestGfxclk, 0),
+ MSG_MAP(ForceGfxVid, PPSMC_MSG_ForceGfxVid, 0),
+ MSG_MAP(UnforceGfxVid, PPSMC_MSG_UnforceGfxVid, 0),
+};
+
+static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP_VALID(SMU_METRICS),
};
+static int cyan_skillfish_tables_init(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ sizeof(SmuMetrics_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ goto err0_out;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err1_out;
+
+ smu_table->metrics_time = 0;
+
+ return 0;
+
+err1_out:
+ smu_table->gpu_metrics_table_size = 0;
+ kfree(smu_table->metrics_table);
+err0_out:
+ return -ENOMEM;
+}
+
+static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = cyan_skillfish_tables_init(smu);
+ if (ret)
+ return ret;
+
+ return smu_v11_0_init_smc_tables(smu);
+}
+
+static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ kfree(smu_table->metrics_table);
+ smu_table->metrics_table = NULL;
+
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+ smu_table->gpu_metrics_table_size = 0;
+
+ smu_table->metrics_time = 0;
+
+ return 0;
+}
+
+static int
+cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+
+ ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ *value = metrics->Current.GfxclkFrequency;
+ break;
+ case METRICS_CURR_SOCCLK:
+ *value = metrics->Current.SocclkFrequency;
+ break;
+ case METRICS_CURR_VCLK:
+ *value = metrics->Current.VclkFrequency;
+ break;
+ case METRICS_CURR_DCLK:
+ *value = metrics->Current.DclkFrequency;
+ break;
+ case METRICS_CURR_UCLK:
+ *value = metrics->Current.MemclkFrequency;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->Current.CurrentSocketPower << 8) /
+ 1000;
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->Current.GfxTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->Current.SocTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_VOLTAGE_VDDSOC:
+ *value = metrics->Current.Voltage[0];
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = metrics->Current.Voltage[1];
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = metrics->Current.ThrottlerStatus;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
+
+static int cyan_skillfish_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data,
+ uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ mutex_lock(&smu->sensor_lock);
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_CURR_GFXCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_CURR_UCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_HOTSPOT,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_EDGE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDNB:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDSOC,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDGFX,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&smu->sensor_lock);
+
+ return ret;
+}
+
+static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ member_type = METRICS_CURR_GFXCLK;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case SMU_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_CURR_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_CURR_DCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cyan_skillfish_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ char *buf)
+{
+ int ret = 0, size = 0;
+ uint32_t cur_value = 0;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ switch (clk_type) {
+ case SMU_OD_SCLK:
+ ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC");
+ size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value);
+ break;
+ case SMU_OD_RANGE:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+ size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n",
+ CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_FCLK:
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ return ret;
+ size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+ break;
+ default:
+ dev_warn(smu->adev->dev, "Unsupported clock type\n");
+ return ret;
+ }
+
+ return size;
+}
+
+static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ uint32_t feature_mask[2];
+ uint64_t feature_enabled;
+
+ /* we need to re-init after suspend so return false */
+ if (adev->in_suspend)
+ return false;
+
+ ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+
+ if (ret)
+ return false;
+
+ feature_enabled = (uint64_t)feature_mask[0] |
+ ((uint64_t)feature_mask[1] << 32);
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_2 *gpu_metrics =
+ (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int i, ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_soc_power = metrics.Current.Power[0];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[1];
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ for (i = 0; i < 6; i++) {
+ gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i];
+ gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i];
+ gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i];
+ }
+
+ for (i = 0; i < 2; i++) {
+ gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i];
+ gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i];
+ }
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_2);
+}
+
+static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ int ret = 0;
+ uint32_t vid;
+
+ switch (type) {
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (size != 3 || input[0] != 0) {
+ dev_err(smu->adev->dev, "Invalid parameter!\n");
+ return -EINVAL;
+ }
+
+ if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
+ input[1] > CYAN_SKILLFISH_SCLK_MAX) {
+ dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+ CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+ return -EINVAL;
+ }
+
+ if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
+ input[2] > CYAN_SKILLFISH_VDDC_MAX) {
+ dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+ CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+ return -EINVAL;
+ }
+
+ cyan_skillfish_user_settings.sclk = input[1];
+ cyan_skillfish_user_settings.vddc = input[2];
+
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Invalid parameter!\n");
+ return -EINVAL;
+ }
+
+ cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
+ cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
+
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Invalid parameter!\n");
+ return -EINVAL;
+ }
+
+ if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN ||
+ cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) {
+ dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+ CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+ return -EINVAL;
+ }
+
+ if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) &&
+ (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN ||
+ cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) {
+ dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+ CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk,
+ cyan_skillfish_user_settings.sclk, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set sclk failed!\n");
+ return ret;
+ }
+
+ if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Unforce vddc failed!\n");
+ return ret;
+ }
+ } else {
+ /*
+ * PMFW accepts SVI2 VID code, convert voltage to VID:
+ * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001)
+ */
+ vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Force vddc failed!\n");
+ return ret;
+ }
+ }
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
.check_fw_version = smu_v11_0_check_fw_version,
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
+ .init_smc_tables = cyan_skillfish_init_smc_tables,
+ .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+ .read_sensor = cyan_skillfish_read_sensor,
+ .print_clk_levels = cyan_skillfish_print_clk_levels,
+ .is_dpm_running = cyan_skillfish_is_dpm_running,
+ .get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
+ .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
.register_irq_handler = smu_v11_0_register_irq_handler,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
@@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
smu->message_map = cyan_skillfish_message_map;
+ smu->table_map = cyan_skillfish_table_map;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index a5fc5d7cb6c7..b1ad451af06b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
uint32_t min_value, max_value;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_RANGE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
@@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm)
+ /*
+ * This aims the case below:
+ * amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
+ *
+ * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
+ * make that possible, PMFW needs to acknowledge the dstate transition
+ * process for both gfx(function 0) and audio(function 1) function of
+ * the ASIC.
+ *
+ * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
+ * device representing the audio function of the ASIC. And that means
+ * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
+ * possible runpm suspend kicked on the ASIC. However without the dstate
+ * transition notification from audio function, pmfw cannot handle the
+ * BACO in/exit correctly. And that will cause driver hang on runpm
+ * resuming.
+ *
+ * To address this, we revert to legacy message way(driver masters the
+ * timing for BACO in/exit) on sound driver missing.
+ */
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else
return smu_v11_0_baco_enter(smu);
@@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm) {
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 5e292c3f5050..ca57221e3962 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
uint32_t smu_version;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if (!smu->od_enabled || !od_table || !od_settings)
break;
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
@@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm)
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else
return smu_v11_0_baco_enter(smu);
@@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->in_runpm) {
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 3a3421452e57..f6ef0ce6e9e2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
if (ret)
return ret;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break;
case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break;
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
@@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (ret)
return ret;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 5aa175e12a78..145f13b8c977 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
if (ret)
return ret;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index ab652028e003..5019903db492 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
uint32_t freq_values[3] = {0};
uint32_t min_clk, max_clk;
- if (amdgpu_ras_intr_triggered())
- return sysfs_emit(buf, "unavailable\n");
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
dpm_context = smu_dpm->dpm_context;
switch (type) {
case SMU_OD_SCLK:
- size = sysfs_emit(buf, "%s:\n", "GFXCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
fallthrough;
case SMU_SCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
- size = sysfs_emit(buf, "%s:\n", "MCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
fallthrough;
case SMU_MCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 627ba2eec7fd..a403657151ba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
switch (clk_type) {
case SMU_OD_SCLK:
- size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break;
case SMU_OD_RANGE:
- size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 66711ab24c15..843d2cbfc71d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
return ret;
}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
+{
+ struct pci_dev *p = NULL;
+ bool snd_driver_loaded;
+
+ /*
+ * If the ASIC comes with no audio function, we always assume
+ * it is "enabled".
+ */
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (!p)
+ return true;
+
+ snd_driver_loaded = pci_is_enabled(p) ? true : false;
+
+ pci_dev_put(p);
+
+ return snd_driver_loaded;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 16993daa2ae0..beea03810bca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
+/*
+ * Helper function to make sysfs_emit_at() happy. Align buf to
+ * the current page boundary and record the offset.
+ */
+static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
+{
+ if (!*buf || !offset)
+ return;
+
+ *offset = offset_in_page(*buf);
+ *buf -= *offset;
+}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
+
#endif
#endif
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 6bfaefa01818..1e30eaeb0e1b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1300,18 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
return flags;
}
-static enum drm_connector_status ast_connector_detect(struct drm_connector
- *connector, bool force)
-{
- int r;
-
- r = ast_get_modes(connector);
- if (r <= 0)
- return connector_status_disconnected;
-
- return connector_status_connected;
-}
-
static void ast_connector_destroy(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
@@ -1327,7 +1315,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
static const struct drm_connector_funcs ast_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
- .detect = ast_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = ast_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1355,8 +1342,7 @@ static int ast_connector_init(struct drm_device *dev)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_attach_encoder(connector, encoder);
@@ -1425,8 +1411,6 @@ int ast_mode_config_init(struct ast_private *ast)
drm_mode_config_reset(dev);
- drm_kms_helper_poll_init(dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6325877c5fd6..ea9a79bc9583 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1834,11 +1834,20 @@ static void connector_bad_edid(struct drm_connector *connector,
u8 *edid, int num_blocks)
{
int i;
- u8 num_of_ext = edid[0x7e];
+ u8 last_block;
+
+ /*
+ * 0x7e in the EDID is the number of extension blocks. The EDID
+ * is 1 (base block) + num_ext_blocks big. That means we can think
+ * of 0x7e in the EDID of the _index_ of the last block in the
+ * combined chunk of memory.
+ */
+ last_block = edid[0x7e];
/* Calculate real checksum for the last edid extension block data */
- connector->real_edid_checksum =
- drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
+ if (last_block < num_blocks)
+ connector->real_edid_checksum =
+ drm_edid_block_checksum(edid + last_block * EDID_LENGTH);
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3ab078321045..8e7a124d6c5a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1506,6 +1506,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_config *config = &dev->mode_config;
int ret = 0;
int crtc_count = 0;
struct drm_connector_list_iter conn_iter;
@@ -1663,6 +1664,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* Handle our overallocation */
sizes.surface_height *= drm_fbdev_overalloc;
sizes.surface_height /= 100;
+ if (sizes.surface_height > config->max_height) {
+ drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n",
+ config->max_height);
+ sizes.surface_height = config->max_height;
+ }
/* push down into drivers */
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 76d38561c910..cf741c5c82d2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (switch_mmu_context) {
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
- etnaviv_iommu_context_get(mmu_context);
- gpu->mmu_context = mmu_context;
+ gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
etnaviv_iommu_context_put(old_context);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 8f1b5af47dd6..f0b2540e60e4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node);
}
- etnaviv_iommu_context_get(mmu_context);
- mapping->context = mmu_context;
+ mapping->context = etnaviv_iommu_context_get(mmu_context);
mapping->use = 1;
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 4dd7d9d541c0..486259e154af 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects;
submit->ctx = file->driver_priv;
- etnaviv_iommu_context_get(submit->ctx->mmu);
- submit->mmu_context = submit->ctx->mmu;
+ submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->exec_state = args->exec_state;
submit->flags = args->flags;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index c297fffe06eb..cc5b07f86346 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu);
+ gpu->fe_running = false;
+ gpu->exec_state = -1;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = NULL;
+
return 0;
}
@@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
}
+
+ gpu->fe_running = true;
}
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
- &gpu->mmu_context->cmdbuf_mapping);
u16 prefetch;
+ u32 address;
/* setup the MMU */
- etnaviv_iommu_restore(gpu, gpu->mmu_context);
+ etnaviv_iommu_restore(gpu, context);
/* Start command processor */
prefetch = etnaviv_buffer_init(gpu);
+ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch);
}
@@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Now program the hardware */
mutex_lock(&gpu->lock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
- gpu->mmu_context = NULL;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
- if (!gpu->mmu_context) {
- etnaviv_iommu_context_get(submit->mmu_context);
- gpu->mmu_context = submit->mmu_context;
- etnaviv_gpu_start_fe_idleloop(gpu);
- } else {
- etnaviv_iommu_context_get(gpu->mmu_context);
- submit->prev_mmu_context = gpu->mmu_context;
- }
+ if (!gpu->fe_running)
+ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->initialized && gpu->mmu_context) {
+ if (gpu->initialized && gpu->fe_running) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
@@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
*/
etnaviv_gpu_wait_idle(gpu, 100);
- etnaviv_iommu_context_put(gpu->mmu_context);
- gpu->mmu_context = NULL;
+ gpu->fe_running = false;
}
gpu->exec_state = -1;
@@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu);
#endif
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+
if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8ea48697d132..1c75c8ed5bce 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -101,6 +101,7 @@ struct etnaviv_gpu {
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
bool initialized;
+ bool fe_running;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 1a7c89a67bea..afe5dd6a9925 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
/* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f8bf488e9d71..d664ae29ae20 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index dab1b58006d8..9fb1a2aadbcb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
etnaviv_iommu_remove_mapping(context, m);
+ etnaviv_iommu_context_put(m->context);
m->context = NULL;
list_del_init(&m->mmu_node);
list_del_init(&m->scan_node);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index d1d6902fd13b..e4a0b7d09c2e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
struct etnaviv_cmdbuf_suballoc *suballoc);
-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
{
kref_get(&ctx->refcount);
+ return ctx;
}
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 9870c4e6af36..b5001db7a95c 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -793,7 +793,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct decon_context *ctx;
- struct resource *res;
int ret;
int i;
@@ -818,8 +817,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->clks[i] = clk;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->addr = devm_ioremap_resource(dev, res);
+ ctx->addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->addr))
return PTR_ERR(ctx->addr);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e39fac889edc..8d137857818c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1738,7 +1738,6 @@ static const struct component_ops exynos_dsi_component_ops = {
static int exynos_dsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct exynos_dsi *dsi;
int ret, i;
@@ -1789,8 +1788,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
}
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->reg_base = devm_ioremap_resource(dev, res);
+ dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->reg_base))
return PTR_ERR(dsi->reg_base);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index a3c718148c45..ecfd82d0afb7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -85,7 +85,6 @@ struct fimc_scaler {
/*
* A structure of fimc context.
*
- * @regs_res: register resources.
* @regs: memory mapped io registers.
* @lock: locking of operations.
* @clocks: fimc clocks.
@@ -103,7 +102,6 @@ struct fimc_context {
struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
- struct resource *regs_res;
void __iomem *regs;
spinlock_t lock;
struct clk *clocks[FIMC_CLKS_MAX];
@@ -1327,8 +1325,7 @@ static int fimc_probe(struct platform_device *pdev)
ctx->num_formats = num_formats;
/* resource memory */
- ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ ctx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 700ca4fa6665..c735e53939d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1202,9 +1202,7 @@ static int fimd_probe(struct platform_device *pdev)
return PTR_ERR(ctx->lcd_clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- ctx->regs = devm_ioremap_resource(dev, res);
+ ctx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index b00230626c6a..471fd6c8135f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1449,7 +1449,6 @@ static const struct component_ops g2d_component_ops = {
static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct g2d_data *g2d;
int ret;
@@ -1491,9 +1490,7 @@ static int g2d_probe(struct platform_device *pdev)
clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- g2d->regs = devm_ioremap_resource(dev, res);
+ g2d->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g2d->regs)) {
ret = PTR_ERR(g2d->regs);
goto err_put_clk;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 90d7bf906885..166a80262896 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -86,7 +86,6 @@ struct gsc_scaler {
/*
* A structure of gsc context.
*
- * @regs_res: register resources.
* @regs: memory mapped io registers.
* @gsc_clk: gsc gate clock.
* @sc: scaler infomations.
@@ -103,7 +102,6 @@ struct gsc_context {
struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
- struct resource *regs_res;
void __iomem *regs;
const char **clk_names;
struct clk *clocks[GSC_MAX_CLOCKS];
@@ -1272,9 +1270,7 @@ static int gsc_probe(struct platform_device *pdev)
}
}
- /* resource memory */
- ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ ctx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index ee61be4cf152..dec7df35baa9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -278,7 +278,6 @@ static const struct component_ops rotator_component_ops = {
static int rotator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *regs_res;
struct rot_context *rot;
const struct rot_variant *variant;
int irq;
@@ -292,8 +291,7 @@ static int rotator_probe(struct platform_device *pdev)
rot->formats = variant->formats;
rot->num_formats = variant->num_formats;
rot->dev = dev;
- regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rot->regs = devm_ioremap_resource(dev, regs_res);
+ rot->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rot->regs))
return PTR_ERR(rot->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index f9ae5b038d59..3a7851b7dc66 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -485,7 +485,6 @@ static const struct component_ops scaler_component_ops = {
static int scaler_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *regs_res;
struct scaler_context *scaler;
int irq;
int ret, i;
@@ -498,8 +497,7 @@ static int scaler_probe(struct platform_device *pdev)
(struct scaler_data *)of_device_get_match_data(dev);
scaler->dev = dev;
- regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scaler->regs = devm_ioremap_resource(dev, regs_res);
+ scaler->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(scaler->regs))
return PTR_ERR(scaler->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index c769dec576de..7655142a4651 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1957,7 +1957,6 @@ static int hdmi_probe(struct platform_device *pdev)
struct hdmi_audio_infoframe *audio_infoframe;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
- struct resource *res;
int ret;
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
@@ -1979,8 +1978,7 @@ static int hdmi_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdata->regs = devm_ioremap_resource(dev, res);
+ hdata->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdata->regs)) {
ret = PTR_ERR(hdata->regs);
return ret;
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h
index 886add4f9cd0..d2d8582b36df 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm.h
+++ b/drivers/gpu/drm/hyperv/hyperv_drm.h
@@ -46,6 +46,7 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv);
int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp);
int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
u32 w, u32 h, u32 pitch);
+int hyperv_hide_hw_ptr(struct hv_device *hdev);
int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect);
int hyperv_connect_vsp(struct hv_device *hdev);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 6dd4717d3e1e..8c97a20dfe23 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -101,6 +101,7 @@ static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ hyperv_hide_hw_ptr(hv->hdev);
hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay,
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
index 6d4bdccfbd1a..c0155c6271bf 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -299,6 +299,55 @@ int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
return 0;
}
+/*
+ * Hyper-V supports a hardware cursor feature. It's not used by Linux VM,
+ * but the Hyper-V host still draws a point as an extra mouse pointer,
+ * which is unwanted, especially when Xorg is running.
+ *
+ * The hyperv_fb driver uses synthvid_send_ptr() to hide the unwanted
+ * pointer, by setting msg.ptr_pos.is_visible = 1 and setting the
+ * msg.ptr_shape.data. Note: setting msg.ptr_pos.is_visible to 0 doesn't
+ * work in tests.
+ *
+ * Copy synthvid_send_ptr() to hyperv_drm and rename it to
+ * hyperv_hide_hw_ptr(). Note: hyperv_hide_hw_ptr() is also called in the
+ * handler of the SYNTHVID_FEATURE_CHANGE event, otherwise the host still
+ * draws an extra unwanted mouse pointer after the VM Connection window is
+ * closed and reopened.
+ */
+int hyperv_hide_hw_ptr(struct hv_device *hdev)
+{
+ struct synthvid_msg msg;
+
+ memset(&msg, 0, sizeof(struct synthvid_msg));
+ msg.vid_hdr.type = SYNTHVID_POINTER_POSITION;
+ msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_pointer_position);
+ msg.ptr_pos.is_visible = 1;
+ msg.ptr_pos.video_output = 0;
+ msg.ptr_pos.image_x = 0;
+ msg.ptr_pos.image_y = 0;
+ hyperv_sendpacket(hdev, &msg);
+
+ memset(&msg, 0, sizeof(struct synthvid_msg));
+ msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE;
+ msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_pointer_shape);
+ msg.ptr_shape.part_idx = SYNTHVID_CURSOR_COMPLETE;
+ msg.ptr_shape.is_argb = 1;
+ msg.ptr_shape.width = 1;
+ msg.ptr_shape.height = 1;
+ msg.ptr_shape.hot_x = 0;
+ msg.ptr_shape.hot_y = 0;
+ msg.ptr_shape.data[0] = 0;
+ msg.ptr_shape.data[1] = 1;
+ msg.ptr_shape.data[2] = 1;
+ msg.ptr_shape.data[3] = 1;
+ hyperv_sendpacket(hdev, &msg);
+
+ return 0;
+}
+
int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
{
struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
@@ -392,8 +441,11 @@ static void hyperv_receive_sub(struct hv_device *hdev)
return;
}
- if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE)
+ if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) {
hv->dirt_needed = msg->feature_chg.is_dirt_needed;
+ if (hv->dirt_needed)
+ hyperv_hide_hw_ptr(hv->hdev);
+ }
}
static void hyperv_receive(void *ctx)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 642a5b5a1b81..335ba9f43d8f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
# clang warnings
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 43ec7fcd3f5d..a3eae3f3eadc 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1577,8 +1577,14 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *intel_crtc;
+ enum pipe pipe;
+
+ if (!crtc_state)
+ return;
+
+ intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ pipe = intel_crtc->pipe;
/* wa verify 1409054076:icl,jsl,ehl */
if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 7cfe91fc05f2..68abeaf2d7d4 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -186,13 +186,16 @@ void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
acpi_handle dhandle;
+ union acpi_object *obj;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return;
- acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
- INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+ obj = acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
+ INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+ if (obj)
+ ACPI_FREE(obj);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 532237588511..4e0f96bf6158 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -1308,8 +1308,9 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
else
aud_freq = aud_freq_init;
- /* use BIOS provided value for TGL unless it is a known bad value */
- if (IS_TIGERLAKE(dev_priv) && aud_freq_init != AUD_FREQ_TGL_BROKEN)
+ /* use BIOS provided value for TGL and RKL unless it is a known bad value */
+ if ((IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) &&
+ aud_freq_init != AUD_FREQ_TGL_BROKEN)
aud_freq = aud_freq_init;
drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index e86e6ed2d3bf..fd71346aac7b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -451,13 +451,23 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
- if (bdb->version >= 191 &&
- get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
- const struct lfp_backlight_control_method *method;
+ if (bdb->version >= 191) {
+ size_t exp_size;
- method = &backlight_data->backlight_control[panel_type];
- i915->vbt.backlight.type = method->type;
- i915->vbt.backlight.controller = method->controller;
+ if (bdb->version >= 236)
+ exp_size = sizeof(struct bdb_lfp_backlight_data);
+ else if (bdb->version >= 234)
+ exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
+ else
+ exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
+
+ if (get_blocksize(backlight_data) >= exp_size) {
+ const struct lfp_backlight_control_method *method;
+
+ method = &backlight_data->backlight_control[panel_type];
+ i915->vbt.backlight.type = method->type;
+ i915->vbt.backlight.controller = method->controller;
+ }
}
i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index e91e0e0191fb..4b94256d7319 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -222,31 +222,42 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
struct intel_sa_info {
u16 displayrtids;
- u8 deburst, deprogbwlimit;
+ u8 deburst, deprogbwlimit, derating;
};
static const struct intel_sa_info icl_sa_info = {
.deburst = 8,
.deprogbwlimit = 25, /* GB/s */
.displayrtids = 128,
+ .derating = 10,
};
static const struct intel_sa_info tgl_sa_info = {
.deburst = 16,
.deprogbwlimit = 34, /* GB/s */
.displayrtids = 256,
+ .derating = 10,
};
static const struct intel_sa_info rkl_sa_info = {
.deburst = 16,
.deprogbwlimit = 20, /* GB/s */
.displayrtids = 128,
+ .derating = 10,
};
static const struct intel_sa_info adls_sa_info = {
.deburst = 16,
.deprogbwlimit = 38, /* GB/s */
.displayrtids = 256,
+ .derating = 10,
+};
+
+static const struct intel_sa_info adlp_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+ .derating = 20,
};
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
@@ -302,7 +313,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
bi->deratedbw[j] = min(maxdebw,
- bw * 9 / 10); /* 90% */
+ bw * (100 - sa->derating) / 100);
drm_dbg_kms(&dev_priv->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
@@ -400,7 +411,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (IS_DG2(dev_priv))
dg2_get_bw_info(dev_priv);
- else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+ else if (IS_ALDERLAKE_P(dev_priv))
+ icl_get_bw_info(dev_priv, &adlp_sa_info);
+ else if (IS_ALDERLAKE_S(dev_priv))
icl_get_bw_info(dev_priv, &adls_sa_info);
else if (IS_ROCKETLAKE(dev_priv))
icl_get_bw_info(dev_priv, &rkl_sa_info);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9903a78df896..bd184325d0c7 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3807,7 +3807,13 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
static void intel_ddi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- if (intel_crtc_has_dp_encoder(crtc_state))
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_tc(i915, phy))
+ intel_tc_port_sanitize(enc_to_dig_port(encoder));
+
+ if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
intel_dp_sync_state(encoder, crtc_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 134a6acbd8fb..17f44ffea586 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -13082,18 +13082,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
readout_plane_state(dev_priv);
for_each_intel_encoder(dev, encoder) {
+ struct intel_crtc_state *crtc_state = NULL;
+
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- struct intel_crtc_state *crtc_state;
-
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
intel_encoder_get_config(encoder, crtc_state);
- if (encoder->sync_state)
- encoder->sync_state(encoder, crtc_state);
/* read out to slave crtc as well for bigjoiner */
if (crtc_state->bigjoiner) {
@@ -13108,6 +13106,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
encoder->base.crtc = NULL;
}
+ if (encoder->sync_state)
+ encoder->sync_state(encoder, crtc_state);
+
drm_dbg_kms(&dev_priv->drm,
"[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id, encoder->base.name,
@@ -13390,17 +13391,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
-
- /* Sanitize the TypeC port mode upfront, encoders depend on this */
- for_each_intel_encoder(dev, encoder) {
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- /* We need to sanitize only the MST primary port. */
- if (encoder->type != INTEL_OUTPUT_DP_MST &&
- intel_phy_is_tc(dev_priv, phy))
- intel_tc_port_sanitize(enc_to_dig_port(encoder));
- }
-
get_encoder_power_domains(dev_priv);
if (HAS_PCH_IBX(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 3c3c6cb5c0df..b3c8e1c450ef 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -805,11 +805,14 @@ void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
*/
void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
{
+ int id;
+
if (!HAS_DMC(dev_priv))
return;
intel_dmc_ucode_suspend(dev_priv);
drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
- kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload);
+ for (id = 0; id < DMC_FW_MAX; id++)
+ kfree(dev_priv->dmc.dmc_info[id].payload);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 04175f359fd6..abe3d61b6243 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
- sizeof(intel_dp->edp_dpcd))
+ sizeof(intel_dp->edp_dpcd)) {
drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
(int)sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
+ intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
+ }
+
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 053a3c2f7267..508a514c5e37 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
}
if (ret)
- intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+ ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 330077c2e588..a2108a8f544d 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -814,6 +814,11 @@ struct lfp_brightness_level {
u16 reserved;
} __packed;
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
+ offsetof(struct bdb_lfp_backlight_data, brightness_level)
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
+ offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
+
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index cff72679ad7c..166bb46408a9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -937,6 +937,10 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
unsigned int n;
e = alloc_engines(num_engines);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+ e->num_engines = num_engines;
+
for (n = 0; n < num_engines; n++) {
struct intel_context *ce;
int ret;
@@ -970,7 +974,6 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
goto free_engines;
}
}
- e->num_engines = num_engines;
return e;
@@ -986,6 +989,9 @@ void i915_gem_context_release(struct kref *ref)
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ if (ctx->syncobj)
+ drm_syncobj_put(ctx->syncobj);
+
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -1205,9 +1211,6 @@ static void context_close(struct i915_gem_context *ctx)
if (vm)
i915_vm_close(vm);
- if (ctx->syncobj)
- drm_syncobj_put(ctx->syncobj);
-
ctx->file_priv = ERR_PTR(-EBADF);
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index e382b7f2353b..5ab136ffdeb2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -118,7 +118,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
- int err;
+ int err = 0;
/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
@@ -242,12 +242,15 @@ skip:
list_splice_tail(&still_in_list, phase->list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (err)
- return err;
+ break;
}
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ if (err)
+ return err;
+
if (nr_scanned)
*nr_scanned += scanned;
return count;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 35eedc14f522..6ea13159bffc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -356,11 +356,8 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (likely(obj)) {
- /* This releases all gem object bindings to the backend. */
+ if (likely(obj))
i915_ttm_free_cached_io_st(obj);
- __i915_gem_free_object(obj);
- }
}
static struct intel_memory_region *
@@ -875,8 +872,12 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ /* This releases all gem object bindings to the backend. */
+ __i915_gem_free_object(obj);
+
i915_gem_object_release_memory_region(obj);
mutex_destroy(&obj->ttm.get_io_page.lock);
+
if (obj->ttm.created)
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index ffae7df5e4d7..4a6bb64c3a35 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
err = PTR_ERR(import);
goto out_dmabuf;
}
+ import_obj = to_intel_bo(import);
if (import != &obj->base) {
pr_err("i915_gem_prime_import created a new object!\n");
err = -EINVAL;
goto out_import;
}
- import_obj = to_intel_bo(import);
i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj);
@@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
PTR_ERR(import));
err = PTR_ERR(import);
+ } else {
+ err = 0;
}
dma_buf_put(dmabuf);
@@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
err = PTR_ERR(import);
goto out_dmabuf;
}
+ import_obj = to_intel_bo(import);
if (import == &obj->base) {
pr_err("i915_gem_prime_import reused gem object!\n");
@@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_import;
}
- import_obj = to_intel_bo(import);
-
i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj);
if (err) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index b20f5621f62b..a2c34e5a1c54 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
return I915_MMAP_TYPE_GTT;
}
+static struct drm_i915_gem_object *
+create_sys_or_internal(struct drm_i915_private *i915,
+ unsigned long size)
+{
+ if (HAS_LMEM(i915)) {
+ struct intel_memory_region *sys_region =
+ i915->mm.regions[INTEL_REGION_SMEM];
+
+ return __i915_gem_object_create_user(i915, size, &sys_region, 1);
+ }
+
+ return i915_gem_object_create_internal(i915, size);
+}
+
static bool assert_mmap_offset(struct drm_i915_private *i915,
unsigned long size,
int expected)
@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
u64 offset;
int ret;
- obj = i915_gem_object_create_internal(i915, size);
+ obj = create_sys_or_internal(i915, size);
if (IS_ERR(obj))
return expected && expected == PTR_ERR(obj);
@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_mm_node *hole, *next;
int loop, err = 0;
u64 offset;
+ int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
/* Disable background reaper */
disable_retire_worker(i915);
@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
}
/* Too large */
- if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL;
goto out;
}
/* Fill the hole, further allocation attempts should then fail */
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = create_sys_or_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
pr_err("Unable to create object for reclaimed hole\n");
@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto err_obj;
}
- if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
err = -EINVAL;
goto err_obj;
@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map;
- if (HAS_LMEM(i915))
+ if (obj->ops->mmap_offset)
return type == I915_MMAP_TYPE_FIXED;
else if (type == I915_MMAP_TYPE_FIXED)
return false;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 745e84c72c90..17ca4dc4d0cb 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -362,8 +362,9 @@ static int __intel_context_active(struct i915_active *active)
return 0;
}
-static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
- enum i915_sw_fence_notify state)
+static int __i915_sw_fence_call
+sw_fence_dummy_notify(struct i915_sw_fence *sf,
+ enum i915_sw_fence_notify state)
{
return NOTIFY_DONE;
}
@@ -420,6 +421,7 @@ void intel_context_fini(struct intel_context *ce)
mutex_destroy(&ce->pin_mutex);
i915_active_fini(&ce->active);
+ i915_sw_fence_fini(&ce->guc_blocked);
}
void i915_context_module_exit(void)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index d812b27835f8..0a03fbed9f9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -882,8 +882,6 @@ void intel_rps_park(struct intel_rps *rps)
if (!intel_rps_is_enabled(rps))
return;
- GEM_BUG_ON(atomic_read(&rps->num_waiters));
-
if (!intel_rps_clear_active(rps))
return;
@@ -1973,8 +1971,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
u32 intel_rps_read_punit_req(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+ intel_wakeref_t wakeref;
+ u32 freq = 0;
- return intel_uncore_read(uncore, GEN6_RPNSWREQ);
+ with_intel_runtime_pm_if_in_use(rpm, wakeref)
+ freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+
+ return freq;
}
static u32 intel_rps_get_req(u32 pureq)
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
index 99e1fad5ca20..c9086a600bce 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
@@ -102,11 +102,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
* | +-------+--------------------------------------------------------------+
* | | 7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message |
* +---+-------+--------------------------------------------------------------+
- * | 1 | 31:0 | +--------------------------------------------------------+ |
- * +---+-------+ | | |
- * |...| | | Embedded `HXG Message`_ | |
- * +---+-------+ | | |
- * | n | 31:0 | +--------------------------------------------------------+ |
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | [Embedded `HXG Message`_] |
+ * +---+-------+ |
+ * | n | 31:0 | |
* +---+-------+--------------------------------------------------------------+
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
index bbf1ddb77434..9baa3cb07d13 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
@@ -38,11 +38,11 @@
* +---+-------+--------------------------------------------------------------+
* | | Bits | Description |
* +===+=======+==============================================================+
- * | 0 | 31:0 | +--------------------------------------------------------+ |
- * +---+-------+ | | |
- * |...| | | Embedded `HXG Message`_ | |
- * +---+-------+ | | |
- * | n | 31:0 | +--------------------------------------------------------+ |
+ * | 0 | 31:0 | |
+ * +---+-------+ |
+ * |...| | [Embedded `HXG Message`_] |
+ * +---+-------+ |
+ * | n | 31:0 | |
* +---+-------+--------------------------------------------------------------+
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index b104fb7607eb..86c318516e14 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
__uc_free_load_err_log(uc);
}
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
- return intel_guc_ct_enabled(&guc->ct);
-}
-
/*
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15
* register using the same bits used in the CT message payload. Since our
@@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
static void guc_handle_mmio_msg(struct intel_guc *guc)
{
/* we need communication to be enabled to reply to GuC */
- GEM_BUG_ON(!guc_communication_enabled(guc));
+ GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
spin_lock_irq(&guc->irq_lock);
if (guc->mmio_msg) {
@@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
struct drm_i915_private *i915 = gt->i915;
int ret;
- GEM_BUG_ON(guc_communication_enabled(guc));
+ GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
ret = i915_inject_probe_error(i915, -ENXIO);
if (ret)
@@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
return 0;
/* Make sure we enable communication if and only if it's disabled */
- GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+ GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
if (enable_communication)
guc_enable_communication(guc);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b56a8e37a3cd..1bb1be5c48c8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -576,7 +576,7 @@ retry:
/* No one is going to touch shadow bb from now on. */
i915_gem_object_flush_map(bb->obj);
- i915_gem_object_unlock(bb->obj);
+ i915_gem_ww_ctx_fini(&ww);
}
}
return 0;
@@ -630,7 +630,7 @@ retry:
return ret;
}
- i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
+ i915_gem_ww_ctx_fini(&ww);
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 664970f2bc62..4037030f0984 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8193,6 +8193,11 @@ enum {
#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
#define HSW_FBCQ_DIS (1 << 22)
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
+#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0)
+#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
+#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
+#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
+#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define _CHICKEN_TRANS_A 0x420c0
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index ce446716d092..79da5eca60af 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -829,8 +829,6 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->submit, submit_notify);
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
- dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
-
rq->capture_list = NULL;
init_llist_head(&rq->execute_cb);
@@ -905,17 +903,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
- kref_init(&rq->fence.refcount);
- rq->fence.flags = 0;
- rq->fence.error = 0;
- INIT_LIST_HEAD(&rq->fence.cb_list);
-
ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
- rq->fence.context = tl->fence_context;
- rq->fence.seqno = seqno;
+ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+ tl->fence_context, seqno);
RCU_INIT_POINTER(rq->timeline, tl);
rq->hwsp_seqno = tl->hwsp_seqno;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 65bc3709f54c..a725792d5248 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -76,6 +76,8 @@ struct intel_wm_config {
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ enum pipe pipe;
+
if (HAS_LLC(dev_priv)) {
/*
* WaCompressedResourceDisplayNewHashMode:skl,kbl
@@ -89,6 +91,16 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
SKL_DE_COMPRESSED_HASH_MODE);
}
+ for_each_pipe(dev_priv, pipe) {
+ /*
+ * "Plane N strech max must be programmed to 11b (x1)
+ * when Async flips are enabled on that plane."
+ */
+ if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active())
+ intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+ SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1);
+ }
+
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c
index 44327bc629ca..06613ffeaaf8 100644
--- a/drivers/gpu/drm/kmb/kmb_crtc.c
+++ b/drivers/gpu/drm/kmb/kmb_crtc.c
@@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = {
.disable_vblank = kmb_crtc_disable_vblank,
};
-static void kmb_crtc_set_mode(struct drm_crtc *crtc)
+static void kmb_crtc_set_mode(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *m = &crtc->state->adjusted_mode;
@@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc)
unsigned int val = 0;
/* Initialize mipi */
- kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
+ kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
drm_info(dev,
"vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
m->crtc_vsync_start - m->crtc_vdisplay,
@@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
clk_prepare_enable(kmb->kmb_clk.clk_lcd);
- kmb_crtc_set_mode(crtc);
+ kmb_crtc_set_mode(crtc, state);
drm_crtc_vblank_on(crtc);
}
@@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
}
+static enum drm_mode_status
+ kmb_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ int refresh;
+ struct drm_device *dev = crtc->dev;
+ int vfp = mode->vsync_start - mode->vdisplay;
+
+ if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
+ drm_dbg(dev, "height = %d less than %d",
+ mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
+ return MODE_BAD_VVALUE;
+ }
+ if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
+ drm_dbg(dev, "width = %d less than %d",
+ mode->hdisplay, KMB_CRTC_MAX_WIDTH);
+ return MODE_BAD_HVALUE;
+ }
+ refresh = drm_mode_vrefresh(mode);
+ if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
+ drm_dbg(dev, "refresh = %d less than %d or greater than %d",
+ refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
+ return MODE_BAD;
+ }
+
+ if (vfp < KMB_CRTC_MIN_VFP) {
+ drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
.atomic_begin = kmb_crtc_atomic_begin,
.atomic_enable = kmb_crtc_atomic_enable,
.atomic_disable = kmb_crtc_atomic_disable,
.atomic_flush = kmb_crtc_atomic_flush,
+ .mode_valid = kmb_crtc_mode_valid,
};
int kmb_setup_crtc(struct drm_device *drm)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 1c2f4799f421..961ac6fb5fcf 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -172,10 +172,10 @@ static int kmb_setup_mode_config(struct drm_device *drm)
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
- drm->mode_config.min_width = KMB_MIN_WIDTH;
- drm->mode_config.min_height = KMB_MIN_HEIGHT;
- drm->mode_config.max_width = KMB_MAX_WIDTH;
- drm->mode_config.max_height = KMB_MAX_HEIGHT;
+ drm->mode_config.min_width = KMB_FB_MIN_WIDTH;
+ drm->mode_config.min_height = KMB_FB_MIN_HEIGHT;
+ drm->mode_config.max_width = KMB_FB_MAX_WIDTH;
+ drm->mode_config.max_height = KMB_FB_MAX_HEIGHT;
drm->mode_config.funcs = &kmb_mode_config_funcs;
ret = kmb_setup_crtc(drm);
@@ -380,7 +380,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
- if (val & LAYER3_DMA_FIFO_UNDERFLOW)
+ if (val & LAYER3_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
}
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index ebbaa5f422d5..bf085e95b28f 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -20,6 +20,18 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
+/* Platform definitions */
+#define KMB_CRTC_MIN_VFP 4
+#define KMB_CRTC_MAX_WIDTH 1920 /* max width in pixels */
+#define KMB_CRTC_MAX_HEIGHT 1080 /* max height in pixels */
+#define KMB_CRTC_MIN_WIDTH 1920
+#define KMB_CRTC_MIN_HEIGHT 1080
+#define KMB_FB_MAX_WIDTH 1920
+#define KMB_FB_MAX_HEIGHT 1080
+#define KMB_FB_MIN_WIDTH 1
+#define KMB_FB_MIN_HEIGHT 1
+#define KMB_MIN_VREFRESH 59 /*vertical refresh in Hz */
+#define KMB_MAX_VREFRESH 60 /*vertical refresh in Hz */
#define KMB_LCD_DEFAULT_CLK 200000000
#define KMB_SYS_CLK_MHZ 500
@@ -45,6 +57,7 @@ struct kmb_drm_private {
spinlock_t irq_lock;
int irq_lcd;
int sys_clk_mhz;
+ struct disp_cfg init_disp_cfg[KMB_MAX_PLANES];
struct layer_status plane_status[KMB_MAX_PLANES];
int kmb_under_flow;
int kmb_flush_done;
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c
index 1793cd31b117..f6071882054c 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.c
+++ b/drivers/gpu/drm/kmb/kmb_dsi.c
@@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
return 0;
}
+#define CLK_DIFF_LOW 50
+#define CLK_DIFF_HI 60
+#define SYSCLK_500 500
+
static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
struct mipi_tx_frame_timing_cfg *fg_cfg)
{
@@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
/* 500 Mhz system clock minus 50 to account for the difference in
* MIPI clock speed in RTL tests
*/
- sysclk = kmb_dsi->sys_clk_mhz - 50;
+ if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
+ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
+ } else {
+ /* 700 Mhz clk*/
+ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
+ }
/* PPL-Pixel Packing Layer, LLP-Low Level Protocol
* Frame genartor timing parameters are clocked on the system clock,
@@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
return 0;
}
-static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
+static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
+ struct drm_atomic_state *old_state)
{
struct regmap *msscam;
@@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
return;
}
-
+ drm_atomic_bridge_chain_enable(adv_bridge, old_state);
/* DISABLE MIPI->CIF CONNECTION */
regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
@@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
}
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
- int sys_clk_mhz)
+ int sys_clk_mhz, struct drm_atomic_state *old_state)
{
u64 data_rate;
@@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
mipi_tx_init_cfg.lane_rate_mbps = data_rate;
}
- kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0);
-
/* Initialize mipi controller */
mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
/* Dphy initialization */
mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
- connect_lcd_to_mipi(kmb_dsi);
+ connect_lcd_to_mipi(kmb_dsi, old_state);
dev_info(kmb_dsi->dev, "mipi hw initialized");
return 0;
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.h b/drivers/gpu/drm/kmb/kmb_dsi.h
index 66b7c500d9bc..09dc88743d77 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.h
+++ b/drivers/gpu/drm/kmb/kmb_dsi.h
@@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev);
struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
- int sys_clk_mhz);
+ int sys_clk_mhz, struct drm_atomic_state *old_state);
int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index ecee6782612d..00404ba4126d 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = {
static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
{
+ struct kmb_drm_private *kmb;
+ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
int i;
+ int plane_id = kmb_plane->id;
+ struct disp_cfg init_disp_cfg;
+ kmb = to_kmb(plane->dev);
+ init_disp_cfg = kmb->init_disp_cfg[plane_id];
+ /* Due to HW limitations, changing pixel format after initial
+ * plane configuration is not supported.
+ */
+ if (init_disp_cfg.format && init_disp_cfg.format != format) {
+ drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
+ return -EINVAL;
+ }
for (i = 0; i < plane->format_count; i++) {
if (plane->format_types[i] == format)
return 0;
@@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct kmb_drm_private *kmb;
+ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
+ int plane_id = kmb_plane->id;
+ struct disp_cfg init_disp_cfg;
struct drm_framebuffer *fb;
int ret;
struct drm_crtc_state *crtc_state;
bool can_position;
+ kmb = to_kmb(plane->dev);
+ init_disp_cfg = kmb->init_disp_cfg[plane_id];
fb = new_plane_state->fb;
if (!fb || !new_plane_state->crtc)
return 0;
@@ -94,10 +113,21 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
if (ret)
return ret;
- if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT)
+ if (new_plane_state->crtc_w > KMB_FB_MAX_WIDTH ||
+ new_plane_state->crtc_h > KMB_FB_MAX_HEIGHT ||
+ new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
+ new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
return -EINVAL;
- if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT)
+
+ /* Due to HW limitations, changing plane height or width after
+ * initial plane configuration is not supported.
+ */
+ if ((init_disp_cfg.width && init_disp_cfg.height) &&
+ (init_disp_cfg.width != fb->width ||
+ init_disp_cfg.height != fb->height)) {
+ drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
return -EINVAL;
+ }
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
drm_atomic_get_existing_crtc_state(state,
@@ -277,6 +307,44 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id)
kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]);
}
+static void kmb_plane_set_alpha(struct kmb_drm_private *kmb,
+ const struct drm_plane_state *state,
+ unsigned char plane_id,
+ unsigned int *val)
+{
+ u16 plane_alpha = state->alpha;
+ u16 pixel_blend_mode = state->pixel_blend_mode;
+ int has_alpha = state->fb->format->has_alpha;
+
+ if (plane_alpha != DRM_BLEND_ALPHA_OPAQUE)
+ *val |= LCD_LAYER_ALPHA_STATIC;
+
+ if (has_alpha) {
+ switch (pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ *val |= LCD_LAYER_ALPHA_EMBED | LCD_LAYER_ALPHA_PREMULT;
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ *val |= LCD_LAYER_ALPHA_EMBED;
+ break;
+ default:
+ DRM_DEBUG("Missing pixel blend mode case (%s == %ld)\n",
+ __stringify(pixel_blend_mode),
+ (long)pixel_blend_mode);
+ break;
+ }
+ }
+
+ if (plane_alpha == DRM_BLEND_ALPHA_OPAQUE && !has_alpha) {
+ *val &= LCD_LAYER_ALPHA_DISABLED;
+ return;
+ }
+
+ kmb_write_lcd(kmb, LCD_LAYERn_ALPHA(plane_id), plane_alpha);
+}
+
static void kmb_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -296,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
unsigned char plane_id;
int num_planes;
static dma_addr_t addr[MAX_SUB_PLANES];
+ struct disp_cfg *init_disp_cfg;
if (!plane || !new_plane_state || !old_plane_state)
return;
@@ -303,11 +372,12 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
fb = new_plane_state->fb;
if (!fb)
return;
+
num_planes = fb->format->num_planes;
kmb_plane = to_kmb_plane(plane);
- plane_id = kmb_plane->id;
kmb = to_kmb(plane->dev);
+ plane_id = kmb_plane->id;
spin_lock_irq(&kmb->irq_lock);
if (kmb->kmb_under_flow || kmb->kmb_flush_done) {
@@ -317,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
}
spin_unlock_irq(&kmb->irq_lock);
- src_w = (new_plane_state->src_w >> 16);
+ init_disp_cfg = &kmb->init_disp_cfg[plane_id];
+ src_w = new_plane_state->src_w >> 16;
src_h = new_plane_state->src_h >> 16;
crtc_x = new_plane_state->crtc_x;
crtc_y = new_plane_state->crtc_y;
@@ -400,20 +471,32 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
config_csc(kmb, plane_id);
}
+ kmb_plane_set_alpha(kmb, plane->state, plane_id, &val);
+
kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val);
+ /* Configure LCD_CONTROL */
+ ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+
+ /* Set layer blending config */
+ ctrl &= ~LCD_CTRL_ALPHA_ALL;
+ ctrl |= LCD_CTRL_ALPHA_BOTTOM_VL1 |
+ LCD_CTRL_ALPHA_BLEND_VL2;
+
+ ctrl &= ~LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE;
+
switch (plane_id) {
case LAYER_0:
- ctrl = LCD_CTRL_VL1_ENABLE;
+ ctrl |= LCD_CTRL_VL1_ENABLE;
break;
case LAYER_1:
- ctrl = LCD_CTRL_VL2_ENABLE;
+ ctrl |= LCD_CTRL_VL2_ENABLE;
break;
case LAYER_2:
- ctrl = LCD_CTRL_GL1_ENABLE;
+ ctrl |= LCD_CTRL_GL1_ENABLE;
break;
case LAYER_3:
- ctrl = LCD_CTRL_GL2_ENABLE;
+ ctrl |= LCD_CTRL_GL2_ENABLE;
break;
}
@@ -425,7 +508,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
*/
ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL;
- kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
+ kmb_write_lcd(kmb, LCD_CONTROL, ctrl);
/* Enable pipeline AXI read transactions for the DMA
* after setting graphics layers. This must be done
@@ -448,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
/* Enable DMA */
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
+
+ /* Save initial display config */
+ if (!init_disp_cfg->width ||
+ !init_disp_cfg->height ||
+ !init_disp_cfg->format) {
+ init_disp_cfg->width = width;
+ init_disp_cfg->height = height;
+ init_disp_cfg->format = fb->format->format;
+ }
+
drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
@@ -490,6 +583,9 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
enum drm_plane_type plane_type;
const u32 *plane_formats;
int num_plane_formats;
+ unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
for (i = 0; i < KMB_MAX_PLANES; i++) {
plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL);
@@ -521,8 +617,16 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
drm_dbg(drm, "%s : %d i=%d type=%d",
__func__, __LINE__,
i, plane_type);
+ drm_plane_create_alpha_property(&plane->base_plane);
+
+ drm_plane_create_blend_mode_property(&plane->base_plane,
+ blend_caps);
+
+ drm_plane_create_zpos_immutable_property(&plane->base_plane, i);
+
drm_plane_helper_add(&plane->base_plane,
&kmb_plane_helper_funcs);
+
if (plane_type == DRM_PLANE_TYPE_PRIMARY) {
primary = plane;
kmb->plane = plane;
diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h
index 486490f7a3ec..b51144044fe8 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.h
+++ b/drivers/gpu/drm/kmb/kmb_plane.h
@@ -35,6 +35,9 @@
#define POSSIBLE_CRTCS 1
#define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane)
+#define POSSIBLE_CRTCS 1
+#define KMB_MAX_PLANES 2
+
enum layer_id {
LAYER_0,
LAYER_1,
@@ -43,8 +46,6 @@ enum layer_id {
/* KMB_MAX_PLANES */
};
-#define KMB_MAX_PLANES 1
-
enum sub_plane_id {
Y_PLANE,
U_PLANE,
@@ -62,6 +63,12 @@ struct layer_status {
u32 ctrl;
};
+struct disp_cfg {
+ unsigned int width;
+ unsigned int height;
+ unsigned int format;
+};
+
struct kmb_plane *kmb_plane_init(struct drm_device *drm);
void kmb_plane_destroy(struct drm_plane *plane);
#endif /* __KMB_PLANE_H__ */
diff --git a/drivers/gpu/drm/kmb/kmb_regs.h b/drivers/gpu/drm/kmb/kmb_regs.h
index 48150569f702..9756101b0d32 100644
--- a/drivers/gpu/drm/kmb/kmb_regs.h
+++ b/drivers/gpu/drm/kmb/kmb_regs.h
@@ -43,8 +43,10 @@
#define LCD_CTRL_OUTPUT_ENABLED BIT(19)
#define LCD_CTRL_BPORCH_ENABLE BIT(21)
#define LCD_CTRL_FPORCH_ENABLE BIT(22)
+#define LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE BIT(23)
#define LCD_CTRL_PIPELINE_DMA BIT(28)
#define LCD_CTRL_VHSYNC_IDLE_LVL BIT(31)
+#define LCD_CTRL_ALPHA_ALL (0xff << 6)
/* interrupts */
#define LCD_INT_STATUS (0x4 * 0x001)
@@ -115,6 +117,7 @@
#define LCD_LAYER_ALPHA_EMBED BIT(5)
#define LCD_LAYER_ALPHA_COMBI (LCD_LAYER_ALPHA_STATIC | \
LCD_LAYER_ALPHA_EMBED)
+#define LCD_LAYER_ALPHA_DISABLED ~(LCD_LAYER_ALPHA_COMBI)
/* RGB multiplied with alpha */
#define LCD_LAYER_ALPHA_PREMULT BIT(6)
#define LCD_LAYER_INVERT_COL BIT(7)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 5f81489fc60c..a4e80e499674 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -4,8 +4,6 @@
*/
#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/mailbox_controller.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
@@ -52,11 +50,8 @@ struct mtk_drm_crtc {
bool pending_async_planes;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct mbox_client cmdq_cl;
- struct mbox_chan *cmdq_chan;
- struct cmdq_pkt cmdq_handle;
+ struct cmdq_client *cmdq_client;
u32 cmdq_event;
- u32 cmdq_vblank_cnt;
#endif
struct device *mmsys_dev;
@@ -227,79 +222,9 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt,
- size_t size)
+static void ddp_cmdq_cb(struct cmdq_cb_data data)
{
- struct device *dev;
- dma_addr_t dma_addr;
-
- pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base) {
- kfree(pkt);
- return -ENOMEM;
- }
- pkt->buf_size = size;
-
- dev = chan->mbox->dev;
- dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
- kfree(pkt->va_base);
- kfree(pkt);
- return -ENOMEM;
- }
-
- pkt->pa_base = dma_addr;
-
- return 0;
-}
-
-static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
-{
- dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size,
- DMA_TO_DEVICE);
- kfree(pkt->va_base);
- kfree(pkt);
-}
-
-static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
-{
- struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
- struct cmdq_cb_data *data = mssg;
- struct mtk_crtc_state *state;
- unsigned int i;
-
- state = to_mtk_crtc_state(mtk_crtc->base.state);
-
- state->pending_config = false;
-
- if (mtk_crtc->pending_planes) {
- for (i = 0; i < mtk_crtc->layer_nr; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
-
- plane_state->pending.config = false;
- }
- mtk_crtc->pending_planes = false;
- }
-
- if (mtk_crtc->pending_async_planes) {
- for (i = 0; i < mtk_crtc->layer_nr; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
-
- plane_state->pending.async_config = false;
- }
- mtk_crtc->pending_async_planes = false;
- }
-
- mtk_crtc->cmdq_vblank_cnt = 0;
- mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
+ cmdq_pkt_destroy(data.data);
}
#endif
@@ -453,8 +378,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
state->pending_vrefresh, 0,
cmdq_handle);
- if (!cmdq_handle)
- state->pending_config = false;
+ state->pending_config = false;
}
if (mtk_crtc->pending_planes) {
@@ -474,12 +398,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- if (!cmdq_handle)
- plane_state->pending.config = false;
+ plane_state->pending.config = false;
}
-
- if (!cmdq_handle)
- mtk_crtc->pending_planes = false;
+ mtk_crtc->pending_planes = false;
}
if (mtk_crtc->pending_async_planes) {
@@ -499,12 +420,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- if (!cmdq_handle)
- plane_state->pending.async_config = false;
+ plane_state->pending.async_config = false;
}
-
- if (!cmdq_handle)
- mtk_crtc->pending_async_planes = false;
+ mtk_crtc->pending_async_planes = false;
}
}
@@ -512,7 +430,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
bool needs_vblank)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
+ struct cmdq_pkt *cmdq_handle;
#endif
struct drm_crtc *crtc = &mtk_crtc->base;
struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -550,24 +468,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
mtk_mutex_release(mtk_crtc->mutex);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (mtk_crtc->cmdq_chan) {
- mbox_flush(mtk_crtc->cmdq_chan, 2000);
- cmdq_handle->cmd_buf_size = 0;
+ if (mtk_crtc->cmdq_client) {
+ mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
+ cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle);
- dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev,
- cmdq_handle->pa_base,
- cmdq_handle->cmd_buf_size,
- DMA_TO_DEVICE);
- /*
- * CMDQ command should execute in next vblank,
- * If it fail to execute in next 2 vblank, timeout happen.
- */
- mtk_crtc->cmdq_vblank_cnt = 2;
- mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
- mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
+ cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
}
#endif
mtk_crtc->config_updating = false;
@@ -581,15 +489,12 @@ static void mtk_crtc_ddp_irq(void *data)
struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan)
- mtk_crtc_ddp_config(crtc, NULL);
- else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
- DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
- drm_crtc_index(&mtk_crtc->base));
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
#else
if (!priv->data->shadow_register)
- mtk_crtc_ddp_config(crtc, NULL);
#endif
+ mtk_crtc_ddp_config(crtc, NULL);
+
mtk_drm_finish_page_flip(mtk_crtc);
}
@@ -924,20 +829,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mutex_init(&mtk_crtc->hw_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev;
- mtk_crtc->cmdq_cl.tx_block = false;
- mtk_crtc->cmdq_cl.knows_txdone = true;
- mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb;
- mtk_crtc->cmdq_chan =
- mbox_request_channel(&mtk_crtc->cmdq_cl,
- drm_crtc_index(&mtk_crtc->base));
- if (IS_ERR(mtk_crtc->cmdq_chan)) {
+ mtk_crtc->cmdq_client =
+ cmdq_mbox_create(mtk_crtc->mmsys_dev,
+ drm_crtc_index(&mtk_crtc->base));
+ if (IS_ERR(mtk_crtc->cmdq_client)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base));
- mtk_crtc->cmdq_chan = NULL;
+ mtk_crtc->cmdq_client = NULL;
}
- if (mtk_crtc->cmdq_chan) {
+ if (mtk_crtc->cmdq_client) {
ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
@@ -945,18 +846,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
drm_crtc_index(&mtk_crtc->base));
- mbox_free_channel(mtk_crtc->cmdq_chan);
- mtk_crtc->cmdq_chan = NULL;
- } else {
- ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan,
- &mtk_crtc->cmdq_handle,
- PAGE_SIZE);
- if (ret) {
- dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
- drm_crtc_index(&mtk_crtc->base));
- mbox_free_channel(mtk_crtc->cmdq_chan);
- mtk_crtc->cmdq_chan = NULL;
- }
+ cmdq_mbox_destroy(mtk_crtc->cmdq_client);
+ mtk_crtc->cmdq_client = NULL;
}
}
#endif
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index e9c6af78b1d7..3ddf739a6f9b 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -17,7 +17,7 @@ config DRM_MSM
select DRM_SCHED
select SHMEM
select TMPFS
- select QCOM_SCM if ARCH_QCOM
+ select QCOM_SCM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
@@ -55,7 +55,7 @@ config DRM_MSM_GPU_SUDO
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
- depends on DRM_MSM && QCOM_SCM
+ depends on DRM_MSM
default y
help
Choose this option to enable HDCP state machine
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 4534633fe7cd..8fb847c174ff 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -571,13 +571,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
- ret = IS_ERR(icc_path);
- if (ret)
+ if (IS_ERR(icc_path)) {
+ ret = PTR_ERR(icc_path);
goto fail;
+ }
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
- ret = IS_ERR(ocmem_icc_path);
- if (ret) {
+ if (IS_ERR(ocmem_icc_path)) {
+ ret = PTR_ERR(ocmem_icc_path);
/* allow -ENODATA, ocmem icc is optional */
if (ret != -ENODATA)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 82bebb40234d..a96ee79cc5e0 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -699,13 +699,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
- ret = IS_ERR(icc_path);
- if (ret)
+ if (IS_ERR(icc_path)) {
+ ret = PTR_ERR(icc_path);
goto fail;
+ }
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
- ret = IS_ERR(ocmem_icc_path);
- if (ret) {
+ if (IS_ERR(ocmem_icc_path)) {
+ ret = PTR_ERR(ocmem_icc_path);
/* allow -ENODATA, ocmem icc is optional */
if (ret != -ENODATA)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index a7c58018959f..8b73f70766a4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
u32 val;
int request, ack;
+ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return -EINVAL;
@@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
int bit;
+ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return;
@@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (!pdev)
return -ENODEV;
+ mutex_init(&gmu->lock);
+
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 3c74f64e3126..84bd516f01e8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -44,6 +44,9 @@ struct a6xx_gmu_bo {
struct a6xx_gmu {
struct device *dev;
+ /* For serializing communication with the GMU: */
+ struct mutex lock;
+
struct msm_gem_address_space *aspace;
void * __iomem mmio;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 40c9fef457a4..267a880811d6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
- if (ctx == a6xx_gpu->cur_ctx)
+ if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -139,7 +139,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31);
- a6xx_gpu->cur_ctx = ctx;
+ a6xx_gpu->cur_ctx_seqno = ctx->seqno;
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
@@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
-static int a6xx_hw_init(struct msm_gpu *gpu)
+static int hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -1081,7 +1081,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
- a6xx_gpu->cur_ctx = NULL;
+ a6xx_gpu->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@@ -1135,6 +1135,19 @@ out:
return ret;
}
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = hw_init(gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return ret;
+}
+
static void a6xx_dump(struct msm_gpu *gpu)
{
DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
@@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
trace_msm_gpu_resume(0);
+ mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_resume(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret)
return ret;
@@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
msm_devfreq_suspend(gpu);
+ mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_stop(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret)
return ret;
@@ -1547,18 +1564,19 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- static DEFINE_MUTEX(perfcounter_oob);
- mutex_lock(&perfcounter_oob);
+ mutex_lock(&a6xx_gpu->gmu.lock);
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
- REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
- mutex_unlock(&perfcounter_oob);
+
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
return 0;
}
@@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
+void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ a6xx_gmu_set_freq(gpu, opp);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+}
+
static struct msm_gem_address_space *
a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
@@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
- .gpu_set_freq = a6xx_gmu_set_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
@@ -1810,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
adreno_gpu->base.hw_apriv = true;
+ /*
+ * For now only clamp to idle freq for devices where this is known not
+ * to cause power supply issues:
+ */
+ if (info && (info->revn == 618))
+ gpu->clamp_to_idle = true;
+
a6xx_llc_slices_init(pdev, a6xx_gpu);
ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 0bc2d062f54a..8e5527c881b1 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -19,7 +19,16 @@ struct a6xx_gpu {
uint64_t sqe_iova;
struct msm_ringbuffer *cur_ring;
- struct msm_file_private *cur_ctx;
+
+ /**
+ * cur_ctx_seqno:
+ *
+ * The ctx->seqno value of the context with current pgtables
+ * installed. Tracked by seqno rather than pointer value to
+ * avoid dangling pointers, and cases where a ctx can be freed
+ * and a new one created with the same address.
+ */
+ int cur_ctx_seqno;
struct a6xx_gmu gmu;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index b131fd376192..700d65e39feb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -794,7 +794,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
-1),
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
-1),
};
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index f482e0911d03..bb7d066618e6 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1125,6 +1125,20 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
}
+static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
@@ -1313,6 +1327,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+ cursor_plane ?
+ &mdp5_crtc_no_lm_cursor_funcs :
&mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index fbe4c2cd52a3..a0392e4d8134 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -1309,14 +1309,14 @@ static int dp_pm_resume(struct device *dev)
* can not declared display is connected unless
* HDMI cable is plugged in and sink_count of
* dongle become 1
+ * also only signal audio when disconnected
*/
- if (dp->link->sink_count)
+ if (dp->link->sink_count) {
dp->dp_display.is_connected = true;
- else
+ } else {
dp->dp_display.is_connected = false;
-
- dp_display_handle_plugged_change(g_dp_display,
- dp->dp_display.is_connected);
+ dp_display_handle_plugged_change(g_dp_display, false);
+ }
DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
dp->link->sink_count, dp->dp_display.is_connected,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 614dc7f26f2c..75ae3008b68f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -215,8 +215,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+ if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
+ ret = -EINVAL;
goto fail;
+ }
msm_dsi->encoder = encoder;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index e269df285136..c86b5090fae6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -451,7 +451,7 @@ static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
return 0;
err:
- for (; i > 0; i--)
+ while (--i >= 0)
clk_disable_unprepare(msm_host->bus_clks[i]);
return ret;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index d13552b2213b..5b4e991f220d 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -110,14 +110,13 @@ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
u32 nb_tries, u32 timeout_us)
{
- bool pll_locked = false;
+ bool pll_locked = false, pll_ready = false;
void __iomem *base = pll_14nm->phy->pll_base;
u32 tries, val;
tries = nb_tries;
while (tries--) {
- val = dsi_phy_read(base +
- REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
pll_locked = !!(val & BIT(5));
if (pll_locked)
@@ -126,23 +125,24 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
udelay(timeout_us);
}
- if (!pll_locked) {
- tries = nb_tries;
- while (tries--) {
- val = dsi_phy_read(base +
- REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
- pll_locked = !!(val & BIT(0));
+ if (!pll_locked)
+ goto out;
- if (pll_locked)
- break;
+ tries = nb_tries;
+ while (tries--) {
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ pll_ready = !!(val & BIT(0));
- udelay(timeout_us);
- }
+ if (pll_ready)
+ break;
+
+ udelay(timeout_us);
}
- DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+out:
+ DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* ");
- return pll_locked;
+ return pll_locked && pll_ready;
}
static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index aaa37456f4ee..71ed4aa0dc67 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -428,7 +428,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
+ snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
bytediv_init.name = clk_name;
bytediv_init.ops = &clk_bytediv_ops;
@@ -442,7 +442,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
return ret;
provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
+ snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
/* DIV3 */
hw = devm_clk_hw_register_divider(dev, clk_name,
parent_name, 0, pll_28nm->phy->pll_base +
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 4fb397ee7c84..fe1366b4c49f 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -1116,7 +1116,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
int msm_edp_ctrl_init(struct msm_edp *edp)
{
struct edp_ctrl *ctrl = NULL;
- struct device *dev = &edp->pdev->dev;
+ struct device *dev;
int ret;
if (!edp) {
@@ -1124,6 +1124,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
return -EINVAL;
}
+ dev = &edp->pdev->dev;
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 2e6fc185e54d..d4e09703a87d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -630,10 +630,11 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
- ret = msm_disp_snapshot_init(ddev);
- if (ret)
- DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
-
+ if (kms) {
+ ret = msm_disp_snapshot_init(ddev);
+ if (ret)
+ DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+ }
drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -682,6 +683,7 @@ static void load_gpu(struct drm_device *dev)
static int context_init(struct drm_device *dev, struct drm_file *file)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx;
@@ -689,12 +691,17 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
if (!ctx)
return -ENOMEM;
+ INIT_LIST_HEAD(&ctx->submitqueues);
+ rwlock_init(&ctx->queuelock);
+
kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx);
ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx;
+ ctx->seqno = atomic_inc_return(&ident);
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 8b005d1ac899..c552f0c3890c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -53,14 +53,6 @@ struct msm_disp_state;
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
-struct msm_file_private {
- rwlock_t queuelock;
- struct list_head submitqueues;
- int queueid;
- struct msm_gem_address_space *aspace;
- struct kref ref;
-};
-
enum msm_mdp_plane_property {
PLANE_PROP_ZPOS,
PLANE_PROP_ALPHA,
@@ -488,41 +480,6 @@ void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
void msm_rmw(void __iomem *addr, u32 mask, u32 or);
-struct msm_gpu_submitqueue;
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
- u32 id);
-int msm_submitqueue_create(struct drm_device *drm,
- struct msm_file_private *ctx,
- u32 prio, u32 flags, u32 *id);
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
- struct drm_msm_submitqueue_query *args);
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
-void msm_submitqueue_close(struct msm_file_private *ctx);
-
-void msm_submitqueue_destroy(struct kref *kref);
-
-static inline void __msm_file_private_destroy(struct kref *kref)
-{
- struct msm_file_private *ctx = container_of(kref,
- struct msm_file_private, ref);
-
- msm_gem_address_space_put(ctx->aspace);
- kfree(ctx);
-}
-
-static inline void msm_file_private_put(struct msm_file_private *ctx)
-{
- kref_put(&ctx->ref, __msm_file_private_destroy);
-}
-
-static inline struct msm_file_private *msm_file_private_get(
- struct msm_file_private *ctx)
-{
- kref_get(&ctx->ref);
- return ctx;
-}
-
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
@@ -547,7 +504,7 @@ static inline int align_pitch(int width, int bpp)
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
{
ktime_t now = ktime_get();
- unsigned long remaining_jiffies;
+ s64 remaining_jiffies;
if (ktime_compare(*timeout, now) < 0) {
remaining_jiffies = 0;
@@ -556,7 +513,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
}
- return remaining_jiffies;
+ return clamp(remaining_jiffies, 0LL, (s64)INT_MAX);
}
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index fdc5367aecaa..151d19e4453c 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (!submit)
return ERR_PTR(-ENOMEM);
- ret = drm_sched_job_init(&submit->base, &queue->entity, queue);
+ ret = drm_sched_job_init(&submit->base, queue->entity, queue);
if (ret) {
kfree(submit);
return ERR_PTR(ret);
@@ -171,7 +171,8 @@ out:
static int submit_lookup_cmds(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
- unsigned i, sz;
+ unsigned i;
+ size_t sz;
int ret = 0;
for (i = 0; i < args->nr_cmds; i++) {
@@ -907,7 +908,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
- drm_sched_entity_push_job(&submit->base, &queue->entity);
+ drm_sched_entity_push_job(&submit->base, queue->entity);
args->fence = submit->fence_id;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 0e4b45bff2e6..ee25d556c8a1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -203,6 +203,10 @@ struct msm_gpu {
uint32_t suspend_count;
struct msm_gpu_state *crashstate;
+
+ /* Enable clamping to idle freq when inactive: */
+ bool clamp_to_idle;
+
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
@@ -258,6 +262,39 @@ struct msm_gpu_perfcntr {
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
/**
+ * struct msm_file_private - per-drm_file context
+ *
+ * @queuelock: synchronizes access to submitqueues list
+ * @submitqueues: list of &msm_gpu_submitqueue created by userspace
+ * @queueid: counter incremented each time a submitqueue is created,
+ * used to assign &msm_gpu_submitqueue.id
+ * @aspace: the per-process GPU address-space
+ * @ref: reference count
+ * @seqno: unique per process seqno
+ */
+struct msm_file_private {
+ rwlock_t queuelock;
+ struct list_head submitqueues;
+ int queueid;
+ struct msm_gem_address_space *aspace;
+ struct kref ref;
+ int seqno;
+
+ /**
+ * entities:
+ *
+ * Table of per-priority-level sched entities used by submitqueues
+ * associated with this &drm_file. Because some userspace apps
+ * make assumptions about rendering from multiple gl contexts
+ * (of the same priority) within the process happening in FIFO
+ * order without requiring any fencing beyond MakeCurrent(), we
+ * create at most one &drm_sched_entity per-process per-priority-
+ * level.
+ */
+ struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
+};
+
+/**
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
*
* @gpu: the gpu instance
@@ -304,6 +341,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
}
/**
+ * struct msm_gpu_submitqueues - Userspace created context.
+ *
* A submitqueue is associated with a gl context or vk queue (or equiv)
* in userspace.
*
@@ -321,7 +360,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* seqno, protected by submitqueue lock
* @lock: submitqueue lock
* @ref: reference count
- * @entity: the submit job-queue
+ * @entity: the submit job-queue
*/
struct msm_gpu_submitqueue {
int id;
@@ -333,7 +372,7 @@ struct msm_gpu_submitqueue {
struct idr fence_idr;
struct mutex lock;
struct kref ref;
- struct drm_sched_entity entity;
+ struct drm_sched_entity *entity;
};
struct msm_gpu_state_bo {
@@ -421,6 +460,33 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id);
+int msm_submitqueue_create(struct drm_device *drm,
+ struct msm_file_private *ctx,
+ u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args);
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
+void msm_submitqueue_close(struct msm_file_private *ctx);
+
+void msm_submitqueue_destroy(struct kref *kref);
+
+void __msm_file_private_destroy(struct kref *kref);
+
+static inline void msm_file_private_put(struct msm_file_private *ctx)
+{
+ kref_put(&ctx->ref, __msm_file_private_destroy);
+}
+
+static inline struct msm_file_private *msm_file_private_get(
+ struct msm_file_private *ctx)
+{
+ kref_get(&ctx->ref);
+ return ctx;
+}
+
void msm_devfreq_init(struct msm_gpu *gpu);
void msm_devfreq_cleanup(struct msm_gpu *gpu);
void msm_devfreq_resume(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 0a1ee20296a2..20006d060b5b 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -151,6 +151,9 @@ void msm_devfreq_active(struct msm_gpu *gpu)
unsigned int idle_time;
unsigned long target_freq = df->idle_freq;
+ if (!df->devfreq)
+ return;
+
/*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
@@ -186,6 +189,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
struct msm_gpu_devfreq *df = &gpu->devfreq;
unsigned long idle_freq, target_freq = 0;
+ if (!df->devfreq)
+ return;
+
/*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
@@ -194,7 +200,8 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
idle_freq = get_freq(gpu);
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+ if (gpu->clamp_to_idle)
+ msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
df->idle_freq = idle_freq;
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 32a55d81b58b..b8621c6e0554 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -7,6 +7,24 @@
#include "msm_gpu.h"
+void __msm_file_private_destroy(struct kref *kref)
+{
+ struct msm_file_private *ctx = container_of(kref,
+ struct msm_file_private, ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
+ if (!ctx->entities[i])
+ continue;
+
+ drm_sched_entity_destroy(ctx->entities[i]);
+ kfree(ctx->entities[i]);
+ }
+
+ msm_gem_address_space_put(ctx->aspace);
+ kfree(ctx);
+}
+
void msm_submitqueue_destroy(struct kref *kref)
{
struct msm_gpu_submitqueue *queue = container_of(kref,
@@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr);
- drm_sched_entity_destroy(&queue->entity);
-
msm_file_private_put(queue->ctx);
kfree(queue);
@@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
}
}
+static struct drm_sched_entity *
+get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
+ unsigned ring_nr, enum drm_sched_priority sched_prio)
+{
+ static DEFINE_MUTEX(entity_lock);
+ unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
+
+ /* We should have already validated that the requested priority is
+ * valid by the time we get here.
+ */
+ if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&entity_lock);
+
+ if (!ctx->entities[idx]) {
+ struct drm_sched_entity *entity;
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ int ret;
+
+ entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
+
+ ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
+ if (ret) {
+ kfree(entity);
+ return ERR_PTR(ret);
+ }
+
+ ctx->entities[idx] = entity;
+ }
+
+ mutex_unlock(&entity_lock);
+
+ return ctx->entities[idx];
+}
+
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id)
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
- struct msm_ringbuffer *ring;
- struct drm_gpu_scheduler *sched;
enum drm_sched_priority sched_prio;
unsigned ring_nr;
int ret;
@@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags;
queue->ring_nr = ring_nr;
- ring = priv->gpu->rb[ring_nr];
- sched = &ring->sched;
-
- ret = drm_sched_entity_init(&queue->entity,
- sched_prio, &sched, 1, NULL);
- if (ret) {
+ queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+ ring_nr, sched_prio);
+ if (IS_ERR(queue->entity)) {
+ ret = PTR_ERR(queue->entity);
kfree(queue);
return ret;
}
@@ -140,10 +188,6 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
*/
default_prio = DIV_ROUND_UP(max_priority, 2);
- INIT_LIST_HEAD(&ctx->submitqueues);
-
- rwlock_init(&ctx->queuelock);
-
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index ec0432fe1bdf..86d78634a979 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
struct mxsfb_drm_private *mxsfb = drm->dev_private;
mxsfb_enable_axi_clk(mxsfb);
- mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
+
+ /* Disable and clear VBLANK IRQ */
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+
mxsfb_disable_axi_clk(mxsfb);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index b8c31b697797..66f32d965c72 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -704,6 +704,7 @@ static const struct file_operations nv50_crc_flip_threshold_fops = {
.open = nv50_crc_debugfs_flip_threshold_open,
.read = seq_read,
.write = nv50_crc_debugfs_flip_threshold_set,
+ .release = single_release,
};
int nv50_head_crc_late_register(struct nv50_head *head)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d66f97280282..72099d1e4816 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -52,6 +52,7 @@ nv50_head_flush_clr(struct nv50_head *head,
void
nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
{
+ if (asyh->set.curs ) head->func->curs_set(head, asyh);
if (asyh->set.olut ) {
asyh->olut.offset = nv50_lut_load(&head->olut,
asyh->olut.buffer,
@@ -67,7 +68,6 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
if (asyh->set.view ) head->func->view (head, asyh);
if (asyh->set.mode ) head->func->mode (head, asyh);
if (asyh->set.core ) head->func->core_set(head, asyh);
- if (asyh->set.curs ) head->func->curs_set(head, asyh);
if (asyh->set.base ) head->func->base (head, asyh);
if (asyh->set.ovly ) head->func->ovly (head, asyh);
if (asyh->set.dither ) head->func->dither (head, asyh);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index c68cc957248e..a582c0cb0cb0 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -71,6 +71,7 @@
#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
#define VOLTA_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c36f
#define TURING_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c46f
+#define AMPERE_CHANNEL_GPFIFO_B /* clc36f.h */ 0x0000c76f
#define NV50_DISP /* cl5070.h */ 0x00005070
#define G82_DISP /* cl5070.h */ 0x00008270
@@ -200,6 +201,7 @@
#define PASCAL_DMA_COPY_B 0x0000c1b5
#define VOLTA_DMA_COPY_A 0x0000c3b5
#define TURING_DMA_COPY_A 0x0000c5b5
+#define AMPERE_DMA_COPY_B 0x0000c7b5
#define FERMI_DECOMPRESS 0x000090b8
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 54fab7cc36c1..64ee82c7c1be 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -77,4 +77,5 @@ int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int ga102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6d07e653f82d..c58bcdba2c7a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -844,6 +844,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_resource *, struct ttm_resource *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 80099ef75702..ea7769135b0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -250,7 +250,8 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u64 runlist, bool priv, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
+ static const u16 oclasses[] = { AMPERE_CHANNEL_GPFIFO_B,
+ TURING_CHANNEL_GPFIFO_A,
VOLTA_CHANNEL_GPFIFO_A,
PASCAL_CHANNEL_GPFIFO_A,
MAXWELL_CHANNEL_GPFIFO_A,
@@ -386,7 +387,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
nvif_object_map(&chan->user, NULL, 0);
- if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
+ if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO &&
+ chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) {
ret = nvif_notify_ctor(&chan->user, "abi16ChanKilled",
nouveau_channel_killed,
true, NV906F_V0_NTFY_KILLED,
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index c2bc05eb2e54..1cbe01048b93 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = {
.open = nouveau_debugfs_pstate_open,
.read = seq_read,
.write = nouveau_debugfs_pstate_set,
+ .release = single_release,
};
static struct drm_info_list nouveau_debugfs_list[] = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1f828c9f691c..6109cd9e3399 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -345,6 +345,9 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
u32 arg0, arg1;
int ret;
+ if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE)
+ return;
+
/* Allocate channel that has access to the graphics engine. */
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
@@ -469,6 +472,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
case PASCAL_CHANNEL_GPFIFO_A:
case VOLTA_CHANNEL_GPFIFO_A:
case TURING_CHANNEL_GPFIFO_A:
+ case AMPERE_CHANNEL_GPFIFO_B:
ret = nvc0_fence_create(drm);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5b27845075a1..8c2ecc282723 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -247,10 +247,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
}
ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
- if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
+ if (ret)
return ret;
- }
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time. not possibly on
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 7c9c928c3196..c3526a8622e3 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -204,7 +204,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
- priv->base.uevent = true;
+ priv->base.uevent = drm->client.device.info.family < NV_DEVICE_INFO_V0_AMPERE;
mutex_init(&priv->mutex);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 93ddf63d1114..ca75c5f6ecaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2602,6 +2602,7 @@ nv172_chipset = {
.top = { 0x00000001, ga100_top_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
};
static const struct nvkm_device_chip
@@ -2622,6 +2623,7 @@ nv174_chipset = {
.top = { 0x00000001, ga100_top_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
};
static const struct nvkm_device_chip
@@ -2642,6 +2644,7 @@ nv177_chipset = {
.top = { 0x00000001, ga100_top_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
};
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index b0ece71aefde..ce774579c89d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
args->v0.count = 0;
args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
- args->v0.pwrsrc = -ENOSYS;
+ args->v0.pwrsrc = -ENODEV;
args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 3209eb7af65f..5e831d347a95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/fifo/gp100.o
nvkm-y += nvkm/engine/fifo/gp10b.o
nvkm-y += nvkm/engine/fifo/gv100.o
nvkm-y += nvkm/engine/fifo/tu102.o
+nvkm-y += nvkm/engine/fifo/ga102.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index 353b77d9b3dc..3492c561f2cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -82,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
if (offset < 0)
return 0;
- engn = fifo->base.func->engine_id(&fifo->base, engine);
+ engn = fifo->base.func->engine_id(&fifo->base, engine) - 1;
save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
done = nvkm_msec(device, 2000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
new file mode 100644
index 000000000000..c630dbd2911a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define ga102_fifo(p) container_of((p), struct ga102_fifo, base.engine)
+#define ga102_chan(p) container_of((p), struct ga102_chan, object)
+#include <engine/fifo.h>
+#include "user.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+#include <subdev/top.h>
+
+#include <nvif/cl0080.h>
+#include <nvif/clc36f.h>
+#include <nvif/class.h>
+
+struct ga102_fifo {
+ struct nvkm_fifo base;
+};
+
+struct ga102_chan {
+ struct nvkm_object object;
+
+ struct {
+ u32 runl;
+ u32 chan;
+ } ctrl;
+
+ struct nvkm_memory *mthd;
+ struct nvkm_memory *inst;
+ struct nvkm_memory *user;
+ struct nvkm_memory *runl;
+
+ struct nvkm_vmm *vmm;
+};
+
+static int
+ga102_chan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
+{
+ if (index == 0) {
+ oclass->ctor = nvkm_object_new;
+ oclass->base = (struct nvkm_sclass) { -1, -1, AMPERE_DMA_COPY_B };
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+ga102_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+ u64 bar2 = nvkm_memory_bar2(chan->user);
+
+ if (bar2 == ~0ULL)
+ return -EFAULT;
+
+ *type = NVKM_OBJECT_MAP_IO;
+ *addr = device->func->resource_addr(device, 3) + bar2;
+ *size = 0x1000;
+ return 0;
+}
+
+static int
+ga102_chan_fini(struct nvkm_object *object, bool suspend)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+
+ nvkm_wr32(device, chan->ctrl.chan, 0x00000003);
+
+ nvkm_wr32(device, chan->ctrl.runl + 0x098, 0x01000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, chan->ctrl.runl + 0x098) & 0x00100000))
+ break;
+ );
+
+ nvkm_wr32(device, chan->ctrl.runl + 0x088, 0);
+
+ nvkm_wr32(device, chan->ctrl.chan, 0xffffffff);
+ return 0;
+}
+
+static int
+ga102_chan_init(struct nvkm_object *object)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+
+ nvkm_mask(device, chan->ctrl.runl + 0x300, 0x80000000, 0x80000000);
+
+ nvkm_wr32(device, chan->ctrl.runl + 0x080, lower_32_bits(nvkm_memory_addr(chan->runl)));
+ nvkm_wr32(device, chan->ctrl.runl + 0x084, upper_32_bits(nvkm_memory_addr(chan->runl)));
+ nvkm_wr32(device, chan->ctrl.runl + 0x088, 2);
+
+ nvkm_wr32(device, chan->ctrl.chan, 0x00000002);
+ nvkm_wr32(device, chan->ctrl.runl + 0x0090, 0);
+ return 0;
+}
+
+static void *
+ga102_chan_dtor(struct nvkm_object *object)
+{
+ struct ga102_chan *chan = ga102_chan(object);
+
+ if (chan->vmm) {
+ nvkm_vmm_part(chan->vmm, chan->inst);
+ nvkm_vmm_unref(&chan->vmm);
+ }
+
+ nvkm_memory_unref(&chan->runl);
+ nvkm_memory_unref(&chan->user);
+ nvkm_memory_unref(&chan->inst);
+ nvkm_memory_unref(&chan->mthd);
+ return chan;
+}
+
+static const struct nvkm_object_func
+ga102_chan = {
+ .dtor = ga102_chan_dtor,
+ .init = ga102_chan_init,
+ .fini = ga102_chan_fini,
+ .map = ga102_chan_map,
+ .sclass = ga102_chan_sclass,
+};
+
+static int
+ga102_chan_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ struct volta_channel_gpfifo_a_v0 *args = argv;
+ struct nvkm_top_device *tdev;
+ struct nvkm_vmm *vmm;
+ struct ga102_chan *chan;
+ int ret;
+
+ if (argc != sizeof(*args))
+ return -ENOSYS;
+
+ vmm = nvkm_uvmm_search(oclass->client, args->vmm);
+ if (IS_ERR(vmm))
+ return PTR_ERR(vmm);
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&ga102_chan, oclass, &chan->object);
+ *pobject = &chan->object;
+
+ list_for_each_entry(tdev, &device->top->device, head) {
+ if (tdev->type == NVKM_ENGINE_CE) {
+ chan->ctrl.runl = tdev->runlist;
+ break;
+ }
+ }
+
+ if (!chan->ctrl.runl)
+ return -ENODEV;
+
+ chan->ctrl.chan = nvkm_rd32(device, chan->ctrl.runl + 0x004) & 0xfffffff0;
+
+ args->chid = 0;
+ args->inst = 0;
+ args->token = nvkm_rd32(device, chan->ctrl.runl + 0x008) & 0xffff0000;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->mthd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->inst);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x010, 0x0000face);
+ nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
+ nvkm_wo32(chan->inst, 0x048, lower_32_bits(args->ioffset));
+ nvkm_wo32(chan->inst, 0x04c, upper_32_bits(args->ioffset) |
+ (order_base_2(args->ilength / 8) << 16));
+ nvkm_wo32(chan->inst, 0x084, 0x20400000);
+ nvkm_wo32(chan->inst, 0x094, 0x30000001);
+ nvkm_wo32(chan->inst, 0x0ac, 0x00020000);
+ nvkm_wo32(chan->inst, 0x0e4, 0x00000000);
+ nvkm_wo32(chan->inst, 0x0e8, 0);
+ nvkm_wo32(chan->inst, 0x0f4, 0x00001000);
+ nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
+ nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
+ nvkm_wo32(chan->inst, 0x220, lower_32_bits(nvkm_memory_bar2(chan->mthd)));
+ nvkm_wo32(chan->inst, 0x224, upper_32_bits(nvkm_memory_bar2(chan->mthd)));
+ nvkm_done(chan->inst);
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->user);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->runl);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->runl);
+ nvkm_wo32(chan->runl, 0x00, 0x80030001);
+ nvkm_wo32(chan->runl, 0x04, 1);
+ nvkm_wo32(chan->runl, 0x08, 0);
+ nvkm_wo32(chan->runl, 0x0c, 0x00000000);
+ nvkm_wo32(chan->runl, 0x10, lower_32_bits(nvkm_memory_addr(chan->user)));
+ nvkm_wo32(chan->runl, 0x14, upper_32_bits(nvkm_memory_addr(chan->user)));
+ nvkm_wo32(chan->runl, 0x18, lower_32_bits(nvkm_memory_addr(chan->inst)));
+ nvkm_wo32(chan->runl, 0x1c, upper_32_bits(nvkm_memory_addr(chan->inst)));
+ nvkm_done(chan->runl);
+
+ ret = nvkm_vmm_join(vmm, chan->inst);
+ if (ret)
+ return ret;
+
+ chan->vmm = nvkm_vmm_ref(vmm);
+ return 0;
+}
+
+static const struct nvkm_device_oclass
+ga102_chan_oclass = {
+ .ctor = ga102_chan_new,
+};
+
+static int
+ga102_user_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ return tu102_fifo_user_new(oclass, argv, argc, pobject);
+}
+
+static const struct nvkm_device_oclass
+ga102_user_oclass = {
+ .ctor = ga102_user_new,
+};
+
+static int
+ga102_fifo_sclass(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
+{
+ if (index == 0) {
+ oclass->base = (struct nvkm_sclass) { -1, -1, VOLTA_USERMODE_A };
+ *class = &ga102_user_oclass;
+ return 0;
+ } else
+ if (index == 1) {
+ oclass->base = (struct nvkm_sclass) { 0, 0, AMPERE_CHANNEL_GPFIFO_B };
+ *class = &ga102_chan_oclass;
+ return 0;
+ }
+
+ return 2;
+}
+
+static int
+ga102_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
+{
+ switch (mthd) {
+ case NV_DEVICE_HOST_CHANNELS: *data = 1; return 0;
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+static void *
+ga102_fifo_dtor(struct nvkm_engine *engine)
+{
+ return ga102_fifo(engine);
+}
+
+static const struct nvkm_engine_func
+ga102_fifo = {
+ .dtor = ga102_fifo_dtor,
+ .info = ga102_fifo_info,
+ .base.sclass = ga102_fifo_sclass,
+};
+
+int
+ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ struct ga102_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_engine_ctor(&ga102_fifo, device, type, inst, true, &fifo->base.engine);
+ *pfifo = &fifo->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
index 31933f3e5a07..c982d834c8d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
@@ -54,7 +54,7 @@ ga100_top_oneinit(struct nvkm_top *top)
info->reset = (data & 0x0000001f);
break;
case 2:
- info->runlist = (data & 0x0000fc00) >> 10;
+ info->runlist = (data & 0x00fffc00);
info->engine = (data & 0x00000003);
break;
default:
@@ -85,9 +85,10 @@ ga100_top_oneinit(struct nvkm_top *top)
}
nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
- "runlist %2d engine %2d reset %2d\n", type, inst,
+ "runlist %6x engine %2d reset %2d\n", type, inst,
info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
- info->addr, info->fault, info->runlist, info->engine, info->reset);
+ info->addr, info->fault, info->runlist < 0 ? 0 : info->runlist,
+ info->engine, info->reset);
info = NULL;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index beb581b96ecd..418638e6e3b0 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -295,6 +295,7 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO
depends on OF
depends on I2C
depends on BACKLIGHT_CLASS_DEVICE
+ select CRC32
help
The panel is used with different sizes LCDs, from 480x272 to
1280x800, and 24 bit per pixel.
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index 2d8794d495d0..3d8a9ab47cae 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -146,8 +146,8 @@ static const struct reg_sequence y030xx067a_init_sequence[] = {
{ 0x09, REG09_SUB_BRIGHT_R(0x20) },
{ 0x0a, REG0A_SUB_BRIGHT_B(0x20) },
{ 0x0b, REG0B_HD_FREERUN | REG0B_VD_FREERUN },
- { 0x0c, REG0C_CONTRAST_R(0x10) },
- { 0x0d, REG0D_CONTRAST_G(0x10) },
+ { 0x0c, REG0C_CONTRAST_R(0x00) },
+ { 0x0d, REG0D_CONTRAST_G(0x00) },
{ 0x0e, REG0E_CONTRAST_B(0x10) },
{ 0x0f, 0 },
{ 0x10, REG10_BRIGHT(0x7f) },
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 0145129d7c66..534dd7414d42 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.clock = 69700,
.hdisplay = 800,
- .hsync_start = 800 + 6,
- .hsync_end = 800 + 6 + 15,
- .htotal = 800 + 6 + 15 + 16,
+ .hsync_start = 800 + 52,
+ .hsync_end = 800 + 52 + 8,
+ .htotal = 800 + 52 + 8 + 48,
.vdisplay = 1280,
- .vsync_start = 1280 + 8,
- .vsync_end = 1280 + 8 + 48,
- .vtotal = 1280 + 8 + 48 + 52,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 6,
+ .vtotal = 1280 + 16 + 6 + 15,
.width_mm = 135,
.height_mm = 217,
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index 0ecccf25a3c7..d2a0f5394fef 100644
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -214,7 +214,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
}
ret = 0;
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_X86
wbinvd();
#else
mb();
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0473583dcdac..482fb0ae6cb5 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
#endif
if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
- rdev->agp = radeon_agp_head_init(rdev->ddev);
+ rdev->agp = radeon_agp_head_init(dev);
if (rdev->agp) {
rdev->agp->agp_mtrr = arch_phys_wc_add(
rdev->agp->agp_info.aper_base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 0daa8bba50f5..4bf4e25d7f01 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -86,12 +86,20 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
}
/*
- * Create and initialize the encoder. On Gen3 skip the LVDS1 output if
+ * Create and initialize the encoder. On Gen3, skip the LVDS1 output if
* the LVDS1 encoder is used as a companion for LVDS0 in dual-link
- * mode.
+ * mode, or any LVDS output if it isn't connected. The latter may happen
+ * on D3 or E3 as the LVDS encoders are needed to provide the pixel
+ * clock to the DU, even when the LVDS outputs are not used.
*/
- if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) {
- if (rcar_lvds_dual_link(bridge))
+ if (rcdu->info->gen >= 3) {
+ if (output == RCAR_DU_OUTPUT_LVDS1 &&
+ rcar_lvds_dual_link(bridge))
+ return -ENOLINK;
+
+ if ((output == RCAR_DU_OUTPUT_LVDS0 ||
+ output == RCAR_DU_OUTPUT_LVDS1) &&
+ !rcar_lvds_is_connected(bridge))
return -ENOLINK;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index d061b8de748f..b672c5bd72ee 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -576,6 +576,9 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+ if (!lvds->next_bridge)
+ return 0;
+
return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
flags);
}
@@ -598,6 +601,14 @@ bool rcar_lvds_dual_link(struct drm_bridge *bridge)
}
EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
+bool rcar_lvds_is_connected(struct drm_bridge *bridge)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+ return lvds->next_bridge != NULL;
+}
+EXPORT_SYMBOL_GPL(rcar_lvds_is_connected);
+
/* -----------------------------------------------------------------------------
* Probe & Remove
*/
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h
index 222ec0e60785..eb7c6ef03b00 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h
@@ -16,6 +16,7 @@ struct drm_bridge;
int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
void rcar_lvds_clk_disable(struct drm_bridge *bridge);
bool rcar_lvds_dual_link(struct drm_bridge *bridge);
+bool rcar_lvds_is_connected(struct drm_bridge *bridge);
#else
static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
unsigned long freq)
@@ -27,6 +28,10 @@ static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{
return false;
}
+static inline bool rcar_lvds_is_connected(struct drm_bridge *bridge)
+{
+ return false;
+}
#endif /* CONFIG_DRM_RCAR_LVDS */
#endif /* __RCAR_LVDS_H__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index ba9e14da41b4..a25b98b7f5bd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1174,26 +1174,24 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
*
* Action plan:
*
- * 1. When DRM gives us a mode, we should add 999 Hz to it. That way
- * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
- * make 60000 kHz then the clock framework will actually give us
- * the right clock.
+ * 1. Try to set the exact rate first, and confirm the clock framework
+ * can provide it.
*
- * NOTE: if the PLL (maybe through a divider) could actually make
- * a clock rate 999 Hz higher instead of the one we want then this
- * could be a problem. Unfortunately there's not much we can do
- * since it's baked into DRM to use kHz. It shouldn't matter in
- * practice since Rockchip PLLs are controlled by tables and
- * even if there is a divider in the middle I wouldn't expect PLL
- * rates in the table that are just a few kHz different.
+ * 2. If the clock framework cannot provide the exact rate, we should
+ * add 999 Hz to the requested rate. That way if the clock we need
+ * is 60000001 Hz (~60 MHz) and DRM tells us to make 60000 kHz then
+ * the clock framework will actually give us the right clock.
*
- * 2. Get the clock framework to round the rate for us to tell us
+ * 3. Get the clock framework to round the rate for us to tell us
* what it will actually make.
*
- * 3. Store the rounded up rate so that we don't need to worry about
+ * 4. Store the rounded up rate so that we don't need to worry about
* this in the actual clk_set_rate().
*/
- rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+ rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000);
+ if (rate / 1000 != adjusted_mode->clock)
+ rate = clk_round_rate(vop->dclk,
+ adjusted_mode->clock * 1000 + 999);
adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
return true;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index f75fb157f2ff..016b877051da 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -216,11 +216,13 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_clk_tmds;
}
+ ret = sun8i_hdmi_phy_init(hdmi->phy);
+ if (ret)
+ goto err_disable_clk_tmds;
+
drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
- sun8i_hdmi_phy_init(hdmi->phy);
-
plat_data->mode_valid = hdmi->quirks->mode_valid;
plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
@@ -262,6 +264,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(hdmi->hdmi);
+ sun8i_hdmi_phy_deinit(hdmi->phy);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
gpiod_set_value(hdmi->ddc_en, 0);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 74f6ed0e2570..bffe1b9cd3dc 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -169,6 +169,7 @@ struct sun8i_hdmi_phy {
struct clk *clk_phy;
struct clk *clk_pll0;
struct clk *clk_pll1;
+ struct device *dev;
unsigned int rcal;
struct regmap *regs;
struct reset_control *rst_phy;
@@ -205,7 +206,8 @@ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy);
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
struct dw_hdmi_plat_data *plat_data);
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index c9239708d398..b64d93da651d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -506,9 +506,60 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2;
}
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
{
+ int ret;
+
+ ret = reset_control_deassert(phy->rst_phy);
+ if (ret) {
+ dev_err(phy->dev, "Cannot deassert phy reset control: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(phy->clk_bus);
+ if (ret) {
+ dev_err(phy->dev, "Cannot enable bus clock: %d\n", ret);
+ goto err_assert_rst_phy;
+ }
+
+ ret = clk_prepare_enable(phy->clk_mod);
+ if (ret) {
+ dev_err(phy->dev, "Cannot enable mod clock: %d\n", ret);
+ goto err_disable_clk_bus;
+ }
+
+ if (phy->variant->has_phy_clk) {
+ ret = sun8i_phy_clk_create(phy, phy->dev,
+ phy->variant->has_second_pll);
+ if (ret) {
+ dev_err(phy->dev, "Couldn't create the PHY clock\n");
+ goto err_disable_clk_mod;
+ }
+
+ clk_prepare_enable(phy->clk_phy);
+ }
+
phy->variant->phy_init(phy);
+
+ return 0;
+
+err_disable_clk_mod:
+ clk_disable_unprepare(phy->clk_mod);
+err_disable_clk_bus:
+ clk_disable_unprepare(phy->clk_bus);
+err_assert_rst_phy:
+ reset_control_assert(phy->rst_phy);
+
+ return ret;
+}
+
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy)
+{
+ clk_disable_unprepare(phy->clk_mod);
+ clk_disable_unprepare(phy->clk_bus);
+ clk_disable_unprepare(phy->clk_phy);
+
+ reset_control_assert(phy->rst_phy);
}
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
@@ -638,6 +689,7 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
return -ENOMEM;
phy->variant = (struct sun8i_hdmi_phy_variant *)match->data;
+ phy->dev = dev;
ret = of_address_to_resource(node, 0, &res);
if (ret) {
@@ -696,47 +748,10 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
goto err_put_clk_pll1;
}
- ret = reset_control_deassert(phy->rst_phy);
- if (ret) {
- dev_err(dev, "Cannot deassert phy reset control: %d\n", ret);
- goto err_put_rst_phy;
- }
-
- ret = clk_prepare_enable(phy->clk_bus);
- if (ret) {
- dev_err(dev, "Cannot enable bus clock: %d\n", ret);
- goto err_deassert_rst_phy;
- }
-
- ret = clk_prepare_enable(phy->clk_mod);
- if (ret) {
- dev_err(dev, "Cannot enable mod clock: %d\n", ret);
- goto err_disable_clk_bus;
- }
-
- if (phy->variant->has_phy_clk) {
- ret = sun8i_phy_clk_create(phy, dev,
- phy->variant->has_second_pll);
- if (ret) {
- dev_err(dev, "Couldn't create the PHY clock\n");
- goto err_disable_clk_mod;
- }
-
- clk_prepare_enable(phy->clk_phy);
- }
-
platform_set_drvdata(pdev, phy);
return 0;
-err_disable_clk_mod:
- clk_disable_unprepare(phy->clk_mod);
-err_disable_clk_bus:
- clk_disable_unprepare(phy->clk_bus);
-err_deassert_rst_phy:
- reset_control_assert(phy->rst_phy);
-err_put_rst_phy:
- reset_control_put(phy->rst_phy);
err_put_clk_pll1:
clk_put(phy->clk_pll1);
err_put_clk_pll0:
@@ -753,12 +768,6 @@ static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
{
struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
- clk_disable_unprepare(phy->clk_mod);
- clk_disable_unprepare(phy->clk_bus);
- clk_disable_unprepare(phy->clk_phy);
-
- reset_control_assert(phy->rst_phy);
-
reset_control_put(phy->rst_phy);
clk_put(phy->clk_pll0);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 16c7aabb94d3..a29d64f87563 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1845,7 +1845,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
bool prepare_bandwidth_transition)
{
const struct tegra_plane_state *old_tegra_state, *new_tegra_state;
- const struct tegra_dc_state *old_dc_state, *new_dc_state;
u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw;
const struct drm_plane_state *old_plane_state;
const struct drm_crtc_state *old_crtc_state;
@@ -1858,8 +1857,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
return;
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
- old_dc_state = to_const_dc_state(old_crtc_state);
- new_dc_state = to_const_dc_state(crtc->state);
if (!crtc->state->active) {
if (!old_crtc_state->active)
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index f0cb691852a1..40378308d527 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -35,12 +35,6 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
return NULL;
}
-static inline const struct tegra_dc_state *
-to_const_dc_state(const struct drm_crtc_state *state)
-{
- return to_dc_state((struct drm_crtc_state *)state);
-}
-
struct tegra_dc_stats {
unsigned long frames;
unsigned long vblank;
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
index dc16a24f4dbe..690a339c52ec 100644
--- a/drivers/gpu/drm/tegra/uapi.c
+++ b/drivers/gpu/drm/tegra/uapi.c
@@ -222,7 +222,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
mapping->iova = sg_dma_address(mapping->sgt->sgl);
}
- mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size;
+ mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 4a1115043114..ed8a4b7f8b6e 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
-
if (vc4_hdmi->hpd_gpio &&
gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
connected = true;
@@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
}
- pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
- pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
@@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
- struct drm_crtc *crtc = cstate->crtc;
+ struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
union hdmi_infoframe frame;
int ret;
@@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_connector_state *cstate = connector->state;
- struct drm_crtc *crtc = cstate->crtc;
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
if (!vc4_hdmi_supports_scrambling(encoder, mode))
return;
@@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_connector_state *cstate = connector->state;
+ struct drm_crtc *crtc = encoder->crtc;
/*
- * At boot, connector->state will be NULL. Since we don't know the
+ * At boot, encoder->crtc will be NULL. Since we don't know the
* state of the scrambler and in order to avoid any
* inconsistency, let's disable it all the time.
*/
- if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode))
+ if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
return;
- if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode))
+ if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
return;
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
@@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
vc4_hdmi->variant->phy_disable(vc4_hdmi);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long bvb_rate, pixel_rate, hsm_rate;
int ret;
@@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
return;
}
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ return;
+ }
+
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (pixel_rate > 297000000)
@@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_connector_state *conn_state =
- vc4_hdmi_encoder_get_connector_state(encoder, state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_connector_state *conn_state =
- vc4_hdmi_encoder_get_connector_state(encoder, state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
{
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_crtc *crtc = connector->state->crtc;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+ struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 n, cts;
u64 tmp;
@@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
/*
* If the HDMI encoder hasn't probed, or the encoder is
* currently in DVI mode, treat the codec dai as missing.
*/
- if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
+ if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE))
return -ENODEV;
@@ -1403,14 +1395,6 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
return 0;
}
-static const struct snd_soc_dapm_widget vc4_hdmi_audio_widgets[] = {
- SND_SOC_DAPM_OUTPUT("TX"),
-};
-
-static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = {
- { "TX", NULL, "Playback" },
-};
-
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
.name = "vc4-hdmi-cpu-dai-component",
};
@@ -2114,29 +2098,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-#ifdef CONFIG_PM
-static int vc4_hdmi_runtime_suspend(struct device *dev)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
-
- return 0;
-}
-
-static int vc4_hdmi_runtime_resume(struct device *dev)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
- if (ret)
- return ret;
-
- return 0;
-}
-#endif
-
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2391,18 +2352,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{}
};
-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
- SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
- vc4_hdmi_runtime_resume,
- NULL)
-};
-
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
- .pm = &vc4_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c
index 6941add95d0f..ecab72882192 100644
--- a/drivers/gpu/host1x/fence.c
+++ b/drivers/gpu/host1x/fence.c
@@ -15,7 +15,7 @@
#include "intr.h"
#include "syncpt.h"
-DEFINE_SPINLOCK(lock);
+static DEFINE_SPINLOCK(lock);
struct host1x_syncpt_fence {
struct dma_fence base;
@@ -152,8 +152,10 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
return ERR_PTR(-ENOMEM);
fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
- if (!fence->waiter)
+ if (!fence->waiter) {
+ kfree(fence);
return ERR_PTR(-ENOMEM);
+ }
fence->sp = sp;
fence->threshold = threshold;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 79b138fd4261..05c007b213f2 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -255,13 +255,13 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (!privdata->cl_data)
return -ENOMEM;
- rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
+ mp2_select_ops(privdata);
+
+ rc = amd_sfh_hid_client_init(privdata);
if (rc)
return rc;
- mp2_select_ops(privdata);
-
- return amd_sfh_hid_client_init(privdata);
+ return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
}
static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 833fcf07ff35..6ccfa0cb997a 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -336,12 +336,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
/*
* MacBook JIS keyboard has wrong logical maximum
+ * Magic Keyboard JIS has wrong logical maximum
*/
static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
+ if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
+ hid_info(hdev,
+ "fixing up Magic Keyboard JIS report descriptor\n");
+ rdesc[64] = rdesc[70] = 0xe7;
+ }
+
if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
rdesc[53] == 0x65 && rdesc[59] == 0x65) {
hid_info(hdev,
diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
index 0790fbd3fc9a..467d789f9bc2 100644
--- a/drivers/hid/hid-betopff.c
+++ b/drivers/hid/hid-betopff.c
@@ -56,15 +56,22 @@ static int betopff_init(struct hid_device *hid)
{
struct betopff_device *betopff;
struct hid_report *report;
- struct hid_input *hidinput =
- list_first_entry(&hid->inputs, struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int field_count = 0;
int error;
int i, j;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c
index 95e0807878c7..d70cd3d7f583 100644
--- a/drivers/hid/hid-u2fzero.c
+++ b/drivers/hid/hid-u2fzero.c
@@ -198,7 +198,9 @@ static int u2fzero_rng_read(struct hwrng *rng, void *data,
}
ret = u2fzero_recv(dev, &req, &resp);
- if (ret < 0)
+
+ /* ignore errors or packets without data */
+ if (ret < offsetof(struct u2f_hid_msg, init.data))
return 0;
/* only take the minimum amount of data it is safe to take */
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index fd51769d0994..33a6908995b1 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -4746,6 +4746,12 @@ static const struct wacom_features wacom_features_0x393 =
{ "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
.touch_max = 10 };
+static const struct wacom_features wacom_features_0x3c6 =
+ { "Wacom Intuos BT S", 15200, 9500, 4095, 63,
+ INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+static const struct wacom_features wacom_features_0x3c8 =
+ { "Wacom Intuos BT M", 21600, 13500, 4095, 63,
+ INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4919,6 +4925,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x37A) },
{ USB_DEVICE_WACOM(0x37B) },
{ BT_DEVICE_WACOM(0x393) },
+ { BT_DEVICE_WACOM(0x3c6) },
+ { BT_DEVICE_WACOM(0x3c8) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 96d0eccca3aa..21f11a5b965b 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -1055,14 +1055,16 @@ static const struct net_device_ops ssip_pn_ops = {
static void ssip_pn_setup(struct net_device *dev)
{
+ static const u8 addr = PN_MEDIA_SOS;
+
dev->features = 0;
dev->netdev_ops = &ssip_pn_ops;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = SSIP_DEFAULT_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_SOS;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
dev->tx_queue_len = SSIP_TXQUEUE_LEN;
dev->needs_free_netdev = true;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 38bc35ac8135..3618a924e78e 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -362,12 +362,6 @@ static const struct hwmon_channel_info *k10temp_info[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
- HWMON_CHANNEL_INFO(in,
- HWMON_I_INPUT | HWMON_I_LABEL,
- HWMON_I_INPUT | HWMON_I_LABEL),
- HWMON_CHANNEL_INFO(curr,
- HWMON_C_INPUT | HWMON_C_LABEL,
- HWMON_C_INPUT | HWMON_C_LABEL),
NULL
};
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
index bb3f7749a0b0..5423466de697 100644
--- a/drivers/hwmon/ltc2947-core.c
+++ b/drivers/hwmon/ltc2947-core.c
@@ -989,8 +989,12 @@ static int ltc2947_setup(struct ltc2947_data *st)
return ret;
/* check external clock presence */
- extclk = devm_clk_get(st->dev, NULL);
- if (!IS_ERR(extclk)) {
+ extclk = devm_clk_get_optional(st->dev, NULL);
+ if (IS_ERR(extclk))
+ return dev_err_probe(st->dev, PTR_ERR(extclk),
+ "Failed to get external clock\n");
+
+ if (extclk) {
unsigned long rate_hz;
u8 pre = 0, div, tbctl;
u64 aux;
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index 116681fde33d..89fe7b9fe26b 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -315,8 +315,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
{
struct mlxreg_fan *fan = cdev->devdata;
unsigned long cur_state;
+ int i, config = 0;
u32 regval;
- int i;
int err;
/*
@@ -329,6 +329,12 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
* overwritten.
*/
if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
+ /*
+ * This is configuration change, which is only supported through sysfs.
+ * For configuration non-zero value is to be returned to avoid thermal
+ * statistics update.
+ */
+ config = 1;
state -= MLXREG_FAN_MAX_STATE;
for (i = 0; i < state; i++)
fan->cooling_levels[i] = state;
@@ -343,7 +349,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
if (state < cur_state)
- return 0;
+ return config;
state = cur_state;
}
@@ -359,7 +365,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
dev_err(fan->dev, "Failed to write PWM duty\n");
return err;
}
- return 0;
+ return config;
}
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 0d68a78be980..ae664613289c 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -340,18 +340,11 @@ static ssize_t occ_show_temp_10(struct device *dev,
if (val == OCC_TEMP_SENSOR_FAULT)
return -EREMOTEIO;
- /*
- * VRM doesn't return temperature, only alarm bit. This
- * attribute maps to tempX_alarm instead of tempX_input for
- * VRM
- */
- if (temp->fru_type != OCC_FRU_TYPE_VRM) {
- /* sensor not ready */
- if (val == 0)
- return -EAGAIN;
+ /* sensor not ready */
+ if (val == 0)
+ return -EAGAIN;
- val *= 1000;
- }
+ val *= 1000;
break;
case 2:
val = temp->fru_type;
@@ -886,7 +879,7 @@ static int occ_setup_sensor_attrs(struct occ *occ)
0, i);
attr++;
- if (sensors->temp.version > 1 &&
+ if (sensors->temp.version == 2 &&
temp->fru_type == OCC_FRU_TYPE_VRM) {
snprintf(attr->name, sizeof(attr->name),
"temp%d_alarm", s);
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index df712ce4b164..53f7d1418bc9 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -171,8 +171,14 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
cmd = CFFPS_SN_CMD;
break;
case CFFPS_DEBUGFS_MAX_POWER_OUT:
- rc = i2c_smbus_read_word_swapped(psu->client,
- CFFPS_MAX_POWER_OUT_CMD);
+ if (psu->version == cffps1) {
+ rc = i2c_smbus_read_word_swapped(psu->client,
+ CFFPS_MAX_POWER_OUT_CMD);
+ } else {
+ rc = i2c_smbus_read_word_data(psu->client,
+ CFFPS_MAX_POWER_OUT_CMD);
+ }
+
if (rc < 0)
return rc;
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index eb94bd5f4e2a..51986adfbf47 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -54,7 +54,7 @@
#define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
- PMBUS_PHASE_VIRTUAL)
+ PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL)
struct mp2975_data {
struct pmbus_driver_info info;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index ede66ea6a730..b963a369c5ab 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -100,71 +100,81 @@ struct tmp421_data {
s16 temp[4];
};
-static int temp_from_s16(s16 reg)
+static int temp_from_raw(u16 reg, bool extended)
{
/* Mask out status bits */
int temp = reg & ~0xf;
- return (temp * 1000 + 128) / 256;
-}
-
-static int temp_from_u16(u16 reg)
-{
- /* Mask out status bits */
- int temp = reg & ~0xf;
-
- /* Add offset for extended temperature range. */
- temp -= 64 * 256;
+ if (extended)
+ temp = temp - 64 * 256;
+ else
+ temp = (s16)temp;
- return (temp * 1000 + 128) / 256;
+ return DIV_ROUND_CLOSEST(temp * 1000, 256);
}
-static struct tmp421_data *tmp421_update_device(struct device *dev)
+static int tmp421_update_device(struct tmp421_data *data)
{
- struct tmp421_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
+ int ret = 0;
int i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
!data->valid) {
- data->config = i2c_smbus_read_byte_data(client,
- TMP421_CONFIG_REG_1);
+ ret = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
+ if (ret < 0)
+ goto exit;
+ data->config = ret;
for (i = 0; i < data->channels; i++) {
- data->temp[i] = i2c_smbus_read_byte_data(client,
- TMP421_TEMP_MSB[i]) << 8;
- data->temp[i] |= i2c_smbus_read_byte_data(client,
- TMP421_TEMP_LSB[i]);
+ ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
+ if (ret < 0)
+ goto exit;
+ data->temp[i] = ret << 8;
+
+ ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
+ if (ret < 0)
+ goto exit;
+ data->temp[i] |= ret;
}
data->last_updated = jiffies;
data->valid = 1;
}
+exit:
mutex_unlock(&data->update_lock);
- return data;
+ if (ret < 0) {
+ data->valid = 0;
+ return ret;
+ }
+
+ return 0;
}
static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
- struct tmp421_data *tmp421 = tmp421_update_device(dev);
+ struct tmp421_data *tmp421 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = tmp421_update_device(tmp421);
+ if (ret)
+ return ret;
switch (attr) {
case hwmon_temp_input:
- if (tmp421->config & TMP421_CONFIG_RANGE)
- *val = temp_from_u16(tmp421->temp[channel]);
- else
- *val = temp_from_s16(tmp421->temp[channel]);
+ *val = temp_from_raw(tmp421->temp[channel],
+ tmp421->config & TMP421_CONFIG_RANGE);
return 0;
case hwmon_temp_fault:
/*
- * The OPEN bit signals a fault. This is bit 0 of the temperature
- * register (low byte).
+ * Any of OPEN or /PVLD bits indicate a hardware mulfunction
+ * and the conversion result may be incorrect
*/
- *val = tmp421->temp[channel] & 0x01;
+ *val = !!(tmp421->temp[channel] & 0x03);
return 0;
default:
return -EOPNOTSUPP;
@@ -177,9 +187,6 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
{
switch (attr) {
case hwmon_temp_fault:
- if (channel == 0)
- return 0;
- return 0444;
case hwmon_temp_input:
return 0444;
default:
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 37b25a1474c4..3c1be2c11fdf 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -273,9 +273,6 @@ struct w83791d_data {
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- /* array of 2 pointers to subclients */
- struct i2c_client *lm75[2];
-
/* volts */
u8 in[NUMBER_OF_VIN]; /* Register value */
u8 in_max[NUMBER_OF_VIN]; /* Register value */
@@ -1257,7 +1254,6 @@ static const struct attribute_group w83791d_group_fanpwm45 = {
static int w83791d_detect_subclients(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
- struct w83791d_data *data = i2c_get_clientdata(client);
int address = client->addr;
int i, id;
u8 val;
@@ -1280,22 +1276,19 @@ static int w83791d_detect_subclients(struct i2c_client *client)
}
val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
- if (!(val & 0x08))
- data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
- 0x48 + (val & 0x7));
- if (!(val & 0x80)) {
- if (!IS_ERR(data->lm75[0]) &&
- ((val & 0x7) == ((val >> 4) & 0x7))) {
- dev_err(&client->dev,
- "duplicate addresses 0x%x, "
- "use force_subclient\n",
- data->lm75[0]->addr);
- return -ENODEV;
- }
- data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
- 0x48 + ((val >> 4) & 0x7));
+
+ if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
+ dev_err(&client->dev,
+ "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
+ return -ENODEV;
}
+ if (!(val & 0x08))
+ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (val & 0x7));
+
+ if (!(val & 0x80))
+ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
+
return 0;
}
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index abd5c3a722b9..1f175f381350 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -264,9 +264,6 @@ struct w83792d_data {
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- /* array of 2 pointers to subclients */
- struct i2c_client *lm75[2];
-
u8 in[9]; /* Register value */
u8 in_max[9]; /* Register value */
u8 in_min[9]; /* Register value */
@@ -927,7 +924,6 @@ w83792d_detect_subclients(struct i2c_client *new_client)
int address = new_client->addr;
u8 val;
struct i2c_adapter *adapter = new_client->adapter;
- struct w83792d_data *data = i2c_get_clientdata(new_client);
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
@@ -946,21 +942,19 @@ w83792d_detect_subclients(struct i2c_client *new_client)
}
val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
- if (!(val & 0x08))
- data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
- 0x48 + (val & 0x7));
- if (!(val & 0x80)) {
- if (!IS_ERR(data->lm75[0]) &&
- ((val & 0x7) == ((val >> 4) & 0x7))) {
- dev_err(&new_client->dev,
- "duplicate addresses 0x%x, use force_subclient\n",
- data->lm75[0]->addr);
- return -ENODEV;
- }
- data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
- 0x48 + ((val >> 4) & 0x7));
+
+ if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
+ dev_err(&new_client->dev,
+ "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
+ return -ENODEV;
}
+ if (!(val & 0x08))
+ devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + (val & 0x7));
+
+ if (!(val & 0x80))
+ devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
+
return 0;
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index e7d0484eabe4..1d2854de1cfc 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -202,7 +202,6 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
}
struct w83793_data {
- struct i2c_client *lm75[2];
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
@@ -1566,7 +1565,6 @@ w83793_detect_subclients(struct i2c_client *client)
int address = client->addr;
u8 tmp;
struct i2c_adapter *adapter = client->adapter;
- struct w83793_data *data = i2c_get_clientdata(client);
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
@@ -1586,21 +1584,19 @@ w83793_detect_subclients(struct i2c_client *client)
}
tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
- if (!(tmp & 0x08))
- data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
- 0x48 + (tmp & 0x7));
- if (!(tmp & 0x80)) {
- if (!IS_ERR(data->lm75[0])
- && ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
- dev_err(&client->dev,
- "duplicate addresses 0x%x, "
- "use force_subclients\n", data->lm75[0]->addr);
- return -ENODEV;
- }
- data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
- 0x48 + ((tmp >> 4) & 0x7));
+
+ if (!(tmp & 0x88) && (tmp & 0x7) == ((tmp >> 4) & 0x7)) {
+ dev_err(&client->dev,
+ "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (tmp & 0x7));
+ return -ENODEV;
}
+ if (!(tmp & 0x08))
+ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (tmp & 0x7));
+
+ if (!(tmp & 0x80))
+ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((tmp >> 4) & 0x7));
+
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index fc0760f55c53..43054568430f 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -5,6 +5,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "coresight-config.h"
#include "coresight-etm-perf.h"
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 4e0b7c2882ce..015e11c4663f 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -49,7 +49,7 @@
#define MLXCPLD_LPCI2C_NACK_IND 2
#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0f
+#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c
#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
enum mlxcpld_i2c_frequency {
@@ -495,7 +495,7 @@ mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv,
return err;
/* Set frequency only if it is not 100KHz, which is default. */
- switch ((data->reg & data->mask) >> data->bit) {
+ switch ((regval & data->mask) >> data->bit) {
case MLXCPLD_I2C_FREQ_1000KHZ:
freq = MLXCPLD_I2C_FREQ_1000KHZ_SET;
break;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 477480d1de6b..7d4b3eb7077a 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -41,6 +41,8 @@
#define I2C_HANDSHAKE_RST 0x0020
#define I2C_FIFO_ADDR_CLR 0x0001
#define I2C_DELAY_LEN 0x0002
+#define I2C_ST_START_CON 0x8001
+#define I2C_FS_START_CON 0x1800
#define I2C_TIME_CLR_VALUE 0x0000
#define I2C_TIME_DEFAULT_VALUE 0x0003
#define I2C_WRRD_TRANAC_VALUE 0x0002
@@ -480,6 +482,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
u16 intr_stat_reg;
+ u16 ext_conf_val;
mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
@@ -518,8 +521,13 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
if (i2c->dev_comp->ltiming_adjust)
mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
+ if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
+ ext_conf_val = I2C_ST_START_CON;
+ else
+ ext_conf_val = I2C_FS_START_CON;
+
if (i2c->dev_comp->timing_adjust) {
- mtk_i2c_writew(i2c, i2c->ac_timing.ext, OFFSET_EXT_CONF);
+ ext_conf_val = i2c->ac_timing.ext;
mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div,
OFFSET_CLOCK_DIV);
mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE,
@@ -544,6 +552,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
OFFSET_HS_STA_STO_AC_TIMING);
}
}
+ mtk_i2c_writew(i2c, ext_conf_val, OFFSET_EXT_CONF);
/* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
if (i2c->have_pmic)
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index aaeeacc12121..546cc935e035 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -454,6 +454,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
break;
i2c_acpi_register_device(adapter, adev, &info);
+ put_device(&adapter->dev);
break;
case ACPI_RECONFIG_DEVICE_REMOVE:
if (!acpi_device_enumerated(adev))
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 0019f1ea7df2..f41db9e0249a 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -738,7 +738,7 @@ static irqreturn_t fxls8962af_interrupt(int irq, void *p)
if (reg & FXLS8962AF_INT_STATUS_SRC_BUF) {
ret = fxls8962af_fifo_flush(indio_dev);
- if (ret)
+ if (ret < 0)
return IRQ_NONE;
return IRQ_HANDLED;
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index ee8ed9481025..2121a812b0c3 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -293,6 +293,7 @@ static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
.has_registers = true,
.addr_shift = 3,
.read_mask = BIT(6),
+ .irq_flags = IRQF_TRIGGER_FALLING,
};
static const struct ad_sd_calib_data ad7192_calib_arr[8] = {
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index 42bb952f4738..b6e8c8abf6f4 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -203,7 +203,7 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
.set_mode = ad7780_set_mode,
.postprocess_sample = ad7780_postprocess_sample,
.has_registers = false,
- .irq_flags = IRQF_TRIGGER_LOW,
+ .irq_flags = IRQF_TRIGGER_FALLING,
};
#define _AD7780_CHANNEL(_bits, _wordsize, _mask_all) \
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index ef3e2d3ecb0c..0e7ab3fb072a 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -206,7 +206,7 @@ static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
.has_registers = true,
.addr_shift = 3,
.read_mask = BIT(6),
- .irq_flags = IRQF_TRIGGER_LOW,
+ .irq_flags = IRQF_TRIGGER_FALLING,
};
static const struct ad_sd_calib_data ad7793_calib_arr[6] = {
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 19efaa41bc34..34ec0c28b2df 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -183,6 +183,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
data = iio_priv(indio_dev);
data->dev = &pdev->dev;
+ platform_set_drvdata(pdev, indio_dev);
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 655ab02d03d8..b753658bb41e 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
.sign = 'u', \
.realbits = depth, \
.storagebits = 16, \
- .shift = 2, \
+ .shift = (depth == 10) ? 2 : 0, \
.endianness = IIO_BE, \
}, \
}
@@ -142,7 +142,6 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
MAX1027_V_CHAN(11, depth)
#define MAX1X31_CHANNELS(depth) \
- MAX1X27_CHANNELS(depth), \
MAX1X29_CHANNELS(depth), \
MAX1027_V_CHAN(12, depth), \
MAX1027_V_CHAN(13, depth), \
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 79c1dd68b909..d4fccd52ef08 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -82,6 +82,10 @@ static const struct iio_chan_spec mt6577_auxadc_iio_channels[] = {
MT6577_AUXADC_CHANNEL(15),
};
+/* For Voltage calculation */
+#define VOLTAGE_FULL_RANGE 1500 /* VA voltage */
+#define AUXADC_PRECISE 4096 /* 12 bits */
+
static int mt_auxadc_get_cali_data(int rawdata, bool enable_cali)
{
return rawdata;
@@ -191,6 +195,10 @@ static int mt6577_auxadc_read_raw(struct iio_dev *indio_dev,
}
if (adc_dev->dev_comp->sample_data_cali)
*val = mt_auxadc_get_cali_data(*val, true);
+
+ /* Convert adc raw data to voltage: 0 - 1500 mV */
+ *val = *val * VOLTAGE_FULL_RANGE / AUXADC_PRECISE;
+
return IIO_VAL_INT;
default:
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index 9996d5eef289..32fbf57c362f 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -401,7 +401,7 @@ static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
exit_hw_init:
clk_disable_unprepare(adc->pclk);
- return 0;
+ return ret;
}
static void rzg2l_adc_pm_runtime_disable(void *data)
@@ -570,8 +570,10 @@ static int __maybe_unused rzg2l_adc_pm_runtime_resume(struct device *dev)
return ret;
ret = clk_prepare_enable(adc->adclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(adc->pclk);
return ret;
+ }
rzg2l_adc_pwr(adc, true);
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index 3143f35a6509..83c1ae07b3e9 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -171,7 +171,13 @@ static int adc128_probe(struct spi_device *spi)
mutex_init(&adc->lock);
ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_disable_regulator;
+ return 0;
+
+err_disable_regulator:
+ regulator_disable(adc->reg);
return ret;
}
diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c
index 4864c38b8d1c..769bd9280524 100644
--- a/drivers/iio/common/ssp_sensors/ssp_spi.c
+++ b/drivers/iio/common/ssp_sensors/ssp_spi.c
@@ -137,7 +137,7 @@ static int ssp_print_mcu_debug(char *data_frame, int *data_index,
if (length > received_len - *data_index || length <= 0) {
ssp_dbg("[SSP]: MSG From MCU-invalid debug length(%d/%d)\n",
length, received_len);
- return length ? length : -EPROTO;
+ return -EPROTO;
}
ssp_dbg("[SSP]: MSG From MCU - %s\n", &data_frame[*data_index]);
@@ -273,6 +273,8 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
for (idx = 0; idx < len;) {
switch (dataframe[idx++]) {
case SSP_MSG2AP_INST_BYPASS_DATA:
+ if (idx >= len)
+ return -EPROTO;
sd = dataframe[idx++];
if (sd < 0 || sd >= SSP_SENSOR_MAX) {
dev_err(SSP_DEV,
@@ -282,10 +284,13 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
if (indio_devs[sd]) {
spd = iio_priv(indio_devs[sd]);
- if (spd->process_data)
+ if (spd->process_data) {
+ if (idx >= len)
+ return -EPROTO;
spd->process_data(indio_devs[sd],
&dataframe[idx],
data->timestamp);
+ }
} else {
dev_err(SSP_DEV, "no client for frame\n");
}
@@ -293,6 +298,8 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
idx += ssp_offset_map[sd];
break;
case SSP_MSG2AP_INST_DEBUG_DATA:
+ if (idx >= len)
+ return -EPROTO;
sd = ssp_print_mcu_debug(dataframe, &idx, len);
if (sd) {
dev_err(SSP_DEV,
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 2a5ba1b08a1d..546a4cf6c5ef 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -350,6 +350,7 @@ static int dac5571_probe(struct i2c_client *client,
data->dac5571_pwrdwn = dac5571_pwrdwn_quad;
break;
default:
+ ret = -EINVAL;
goto err;
}
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index eb48102f9424..287fff39a927 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -353,10 +353,11 @@ static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
if (dec > st->info->max_dec)
dec = st->info->max_dec;
- ret = adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
+ ret = __adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
if (ret)
goto error;
+ adis_dev_unlock(&st->adis);
/*
* If decimation is used, then gyro and accel data will have meaningful
* bits on the LSB registers. This info is used on the trigger handler.
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index a869a6e52a16..ed129321a14d 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -144,6 +144,7 @@ struct adis16480_chip_info {
unsigned int max_dec_rate;
const unsigned int *filter_freqs;
bool has_pps_clk_mode;
+ bool has_sleep_cnt;
const struct adis_data adis_data;
};
@@ -939,6 +940,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
.max_dec_rate = 2048,
+ .has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16375, &adis16485_timeouts, 0),
},
@@ -952,6 +954,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
.max_dec_rate = 2048,
+ .has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16480, &adis16480_timeouts, 0),
},
@@ -965,6 +968,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
.max_dec_rate = 2048,
+ .has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0),
},
@@ -978,6 +982,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
.max_dec_rate = 2048,
+ .has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0),
},
@@ -1425,9 +1430,12 @@ static int adis16480_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = devm_add_action_or_reset(&spi->dev, adis16480_stop, indio_dev);
- if (ret)
- return ret;
+ if (st->chip_info->has_sleep_cnt) {
+ ret = devm_add_action_or_reset(&spi->dev, adis16480_stop,
+ indio_dev);
+ if (ret)
+ return ret;
+ }
ret = adis16480_config_irq_pin(spi->dev.of_node, st);
if (ret)
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 52963da401a7..1880bd5bb258 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -276,6 +276,8 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
ret = wait_event_timeout(opt->result_ready_queue,
opt->result_ready,
msecs_to_jiffies(OPT3001_RESULT_READY_LONG));
+ if (ret == 0)
+ return -ETIMEDOUT;
} else {
/* Sleep for result ready time */
timeout = (opt->int_time == OPT3001_INT_TIME_SHORT) ?
@@ -312,9 +314,7 @@ err:
/* Disallow IRQ to access the device while lock is active */
opt->ok_to_ignore_lock = false;
- if (ret == 0)
- return -ETIMEDOUT;
- else if (ret < 0)
+ if (ret < 0)
return ret;
if (opt->use_irq) {
diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile
index f1099b495301..467519a2027e 100644
--- a/drivers/iio/test/Makefile
+++ b/drivers/iio/test/Makefile
@@ -5,3 +5,4 @@
# Keep in alphabetical order
obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
+CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index c40791baced5..704ce595542c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1746,15 +1746,16 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
}
}
-static void cma_cancel_listens(struct rdma_id_private *id_priv)
+static void _cma_cancel_listens(struct rdma_id_private *id_priv)
{
struct rdma_id_private *dev_id_priv;
+ lockdep_assert_held(&lock);
+
/*
* Remove from listen_any_list to prevent added devices from spawning
* additional listen requests.
*/
- mutex_lock(&lock);
list_del(&id_priv->list);
while (!list_empty(&id_priv->listen_list)) {
@@ -1768,6 +1769,12 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
rdma_destroy_id(&dev_id_priv->id);
mutex_lock(&lock);
}
+}
+
+static void cma_cancel_listens(struct rdma_id_private *id_priv)
+{
+ mutex_lock(&lock);
+ _cma_cancel_listens(id_priv);
mutex_unlock(&lock);
}
@@ -1776,6 +1783,14 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
{
switch (state) {
case RDMA_CM_ADDR_QUERY:
+ /*
+ * We can avoid doing the rdma_addr_cancel() based on state,
+ * only RDMA_CM_ADDR_QUERY has a work that could still execute.
+ * Notice that the addr_handler work could still be exiting
+ * outside this state, however due to the interaction with the
+ * handler_mutex the work is guaranteed not to touch id_priv
+ * during exit.
+ */
rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
break;
case RDMA_CM_ROUTE_QUERY:
@@ -1810,6 +1825,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
static void destroy_mc(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
+ bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
+
if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
ib_sa_free_multicast(mc->sa_mc);
@@ -1826,7 +1843,10 @@ static void destroy_mc(struct rdma_id_private *id_priv,
cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
&mgid);
- cma_igmp_send(ndev, &mgid, false);
+
+ if (!send_only)
+ cma_igmp_send(ndev, &mgid, false);
+
dev_put(ndev);
}
@@ -2574,7 +2594,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
return 0;
err_listen:
- list_del(&id_priv->list);
+ _cma_cancel_listens(id_priv);
mutex_unlock(&lock);
if (to_destroy)
rdma_destroy_id(&to_destroy->id);
@@ -3413,6 +3433,21 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
+ /*
+ * The FSM can return back to RDMA_CM_ADDR_BOUND after
+ * rdma_resolve_ip() is called, eg through the error
+ * path in addr_handler(). If this happens the existing
+ * request must be canceled before issuing a new one.
+ * Since canceling a request is a bit slow and this
+ * oddball path is rare, keep track once a request has
+ * been issued. The track turns out to be a permanent
+ * state since this is the only cancel as it is
+ * immediately before rdma_resolve_ip().
+ */
+ if (id_priv->used_resolve_ip)
+ rdma_addr_cancel(&id->route.addr.dev_addr);
+ else
+ id_priv->used_resolve_ip = 1;
ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
&id->route.addr.dev_addr,
timeout_ms, addr_handler,
@@ -3771,9 +3806,13 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
int ret;
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
+ struct sockaddr_in any_in = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ };
+
/* For a well behaved ULP state will be RDMA_CM_IDLE */
- id->route.addr.src_addr.ss_family = AF_INET;
- ret = rdma_bind_addr(id, cma_src_addr(id_priv));
+ ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
if (ret)
return ret;
if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index 5c463da99845..f92f101ea981 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -91,6 +91,7 @@ struct rdma_id_private {
u8 afonly;
u8 timeout;
u8 min_rnr_timer;
+ u8 used_resolve_ip;
enum ib_gid_type gid_type;
/*
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index e74ddbe46589..15b0cb0f363f 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -876,14 +876,14 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
struct hfi1_ipoib_txq *txq = &priv->txqs[q];
u64 completed = atomic64_read(&txq->complete_txreqs);
- dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
- (unsigned long long)txq, q,
+ dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
+ txq, q,
__netif_subqueue_stopped(dev, txq->q_idx),
atomic_read(&txq->stops),
atomic_read(&txq->no_desc),
atomic_read(&txq->ring_full));
- dd_dev_info(priv->dd, "sde %llx engine %u\n",
- (unsigned long long)txq->sde,
+ dd_dev_info(priv->dd, "sde %p engine %u\n",
+ txq->sde,
txq->sde ? txq->sde->this_idx : 0);
dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 1e9c3c5bee68..d763f097599f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -326,19 +326,30 @@ static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
INIT_LIST_HEAD(&hr_cq->rq_list);
}
-static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
- struct hns_roce_ib_create_cq *ucmd)
+static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+ struct hns_roce_ib_create_cq *ucmd)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
- if (udata) {
- if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
- hr_cq->cqe_size = ucmd->cqe_size;
- else
- hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
- } else {
+ if (!udata) {
hr_cq->cqe_size = hr_dev->caps.cqe_sz;
+ return 0;
+ }
+
+ if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
+ if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
+ ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid cqe size %u.\n", ucmd->cqe_size);
+ return -EINVAL;
+ }
+
+ hr_cq->cqe_size = ucmd->cqe_size;
+ } else {
+ hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
}
+
+ return 0;
}
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
@@ -366,7 +377,9 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
- set_cqe_size(hr_cq, udata, &ucmd);
+ ret = set_cqe_size(hr_cq, udata, &ucmd);
+ if (ret)
+ return ret;
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
if (ret) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 5b9953105752..d5f3faa1627a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -3299,7 +3299,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
hr_cq->ib_cq.cqe);
owner_bit = hr_reg_read(dest, CQE_OWNER);
- memcpy(dest, cqe, sizeof(*cqe));
+ memcpy(dest, cqe, hr_cq->cqe_size);
hr_reg_write(dest, CQE_OWNER, owner_bit);
}
}
@@ -4397,7 +4397,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_qp->path_mtu = ib_mtu;
mtu = ib_mtu_enum_to_int(ib_mtu);
- if (WARN_ON(mtu < 0))
+ if (WARN_ON(mtu <= 0))
+ return -EINVAL;
+#define MAX_LP_MSG_LEN 65536
+ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
+ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
+ if (WARN_ON(lp_pktn_ini >= 0xF))
return -EINVAL;
if (attr_mask & IB_QP_PATH_MTU) {
@@ -4405,10 +4410,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_MTU);
}
-#define MAX_LP_MSG_LEN 65536
- /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
- lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
-
hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 6b62299abfbb..6dea0a49d171 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -3496,7 +3496,7 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
last_ae == IRDMA_AE_BAD_CLOSE ||
- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->reset)) {
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
issue_close = 1;
iwqp->cm_id = NULL;
qp->term_flags = 0;
@@ -4250,7 +4250,7 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
teardown_entry);
attr.qp_state = IB_QPS_ERR;
irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- if (iwdev->reset)
+ if (iwdev->rf->reset)
irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node);
}
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 00de5ee9a260..7de525a5ccf8 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -176,6 +176,14 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
qp->flush_code = FLUSH_GENERAL_ERR;
break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp->flush_code = FLUSH_RETRY_EXC_ERR;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ qp->flush_code = FLUSH_MW_BIND_ERR;
+ break;
default:
qp->flush_code = FLUSH_FATAL_ERR;
break;
@@ -1489,7 +1497,7 @@ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
if (irdma_initialize_ieq(iwdev)) {
- iwdev->reset = true;
+ iwdev->rf->reset = true;
rf->gen_ops.request_reset(rf);
}
}
@@ -1632,13 +1640,13 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
case IEQ_CREATED:
if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
- iwdev->reset);
+ iwdev->rf->reset);
fallthrough;
case ILQ_CREATED:
if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi,
IRDMA_PUDA_RSRC_TYPE_ILQ,
- iwdev->reset);
+ iwdev->rf->reset);
break;
default:
ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index bddf88194d09..d219f64b2c3d 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -55,7 +55,7 @@ static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
iwdev = to_iwdev(ibdev);
if (reset)
- iwdev->reset = true;
+ iwdev->rf->reset = true;
iwdev->iw_status = 0;
irdma_port_ibevent(iwdev);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 743d9e143a99..b678fe712447 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -346,7 +346,6 @@ struct irdma_device {
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb:1;
- bool reset:1;
bool iw_ooo:1;
enum init_completion_state init_state;
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index ff705f323233..3dcbb1fbf2c6 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -102,6 +102,8 @@ enum irdma_flush_opcode {
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
+ FLUSH_RETRY_EXC_ERR,
+ FLUSH_MW_BIND_ERR,
};
enum irdma_cmpl_status {
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index e94470991fe0..ac91ea5296db 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -2507,7 +2507,7 @@ void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
struct ib_qp_attr attr;
- if (qp->iwdev->reset)
+ if (qp->iwdev->rf->reset)
return;
attr.qp_state = IB_QPS_ERR;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 4fc323402073..7110ebf834f9 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -535,8 +535,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
- if (!iwdev->reset)
- irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+ irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
if (!iwqp->user_mode) {
if (iwqp->iwscq) {
@@ -2035,7 +2034,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
/* Kmode allocations */
int rsize;
- if (entries > rf->max_cqe) {
+ if (entries < 1 || entries > rf->max_cqe) {
err_code = -EINVAL;
goto cq_free_rsrc;
}
@@ -3353,6 +3352,10 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_LOC_LEN_ERR;
case FLUSH_GENERAL_ERR:
return IB_WC_WR_FLUSH_ERR;
+ case FLUSH_RETRY_EXC_ERR:
+ return IB_WC_RETRY_EXC_ERR;
+ case FLUSH_MW_BIND_ERR:
+ return IB_WC_MW_BIND_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f367f4a4abff..f3fa2fe6a88a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2275,7 +2275,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
u64 release_mac = MLX4_IB_INVALID_MAC;
struct mlx4_ib_qp *qp;
- new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ new_smac = ether_addr_to_u64(dev->dev_addr);
atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8662f462e2a5..aea4182f33a4 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1853,7 +1853,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
u16 vlan_id, u8 *smac)
{
return _mlx4_set_path(dev, &qp->ah_attr,
- mlx4_mac_to_u64(smac),
+ ether_addr_to_u64(smac),
vlan_id,
path, &mqp->pri, port);
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d0d98e584ebc..81147d774dd2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1559,6 +1559,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 755930be01b8..dc203f3d0f25 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -272,7 +272,7 @@ static int qedr_register_device(struct qedr_dev *dev)
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 452e2355d24e..0a3b28142c05 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -403,7 +403,7 @@ static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
}
#define QIB_DIAGC_ATTR(N) \
- static_assert(&((struct qib_ibport *)0)->rvp.n_##N != (u64 *)NULL); \
+ static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64)); \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
.counter = \
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h
index 84dd682d2334..b350081aeb5a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib.h
@@ -90,7 +90,7 @@ struct usnic_ib_dev {
struct usnic_ib_vf {
struct usnic_ib_dev *pf;
- spinlock_t lock;
+ struct mutex lock;
struct usnic_vnic *vnic;
unsigned int qp_grp_ref_cnt;
struct usnic_ib_pd *pd;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 228e9a36dad0..d346dd48e731 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -572,7 +572,7 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
}
vf->pf = pf;
- spin_lock_init(&vf->lock);
+ mutex_init(&vf->lock);
mutex_lock(&pf->usdev_lock);
list_add_tail(&vf->link, &pf->vf_dev_list);
/*
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 06a4e9d4545d..756a83bcff58 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -196,7 +196,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
for (i = 0; dev_list[i]; i++) {
dev = dev_list[i];
vf = dev_get_drvdata(dev);
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
vnic = vf->vnic;
if (!usnic_vnic_check_room(vnic, res_spec)) {
usnic_dbg("Found used vnic %s from %s\n",
@@ -208,10 +208,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
vf, pd, res_spec,
trans_spec);
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
goto qp_grp_check;
}
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
usnic_uiom_free_dev_list(dev_list);
@@ -220,7 +220,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
/* Try to find resources on an unused vf */
list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
vnic = vf->vnic;
if (vf->qp_grp_ref_cnt == 0 &&
usnic_vnic_check_room(vnic, res_spec) == 0) {
@@ -228,10 +228,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
vf, pd, res_spec,
trans_spec);
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
goto qp_grp_check;
}
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
usnic_info("No free qp grp found on %s\n",
@@ -253,9 +253,9 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
WARN_ON(qp_grp->state != IB_QPS_RESET);
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
usnic_ib_qp_grp_destroy(qp_grp);
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 29de8412e416..4c914f75a902 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -334,6 +334,7 @@ static const struct xpad_device {
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
@@ -451,6 +452,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
{ }
};
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 2f5e3ab5ed63..65286762b02a 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -3,6 +3,7 @@
// Driver for the IMX SNVS ON/OFF Power Key
// Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -99,6 +100,11 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void imx_snvs_pwrkey_disable_clk(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static void imx_snvs_pwrkey_act(void *pdata)
{
struct pwrkey_drv_data *pd = pdata;
@@ -111,6 +117,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
struct pwrkey_drv_data *pdata;
struct input_dev *input;
struct device_node *np;
+ struct clk *clk;
int error;
u32 vid;
@@ -134,6 +141,28 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
}
+ clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
+ return PTR_ERR(clk);
+ }
+
+ error = clk_prepare_enable(clk);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
+ ERR_PTR(error));
+ return error;
+ }
+
+ error = devm_add_action_or_reset(&pdev->dev,
+ imx_snvs_pwrkey_disable_clk, clk);
+ if (error) {
+ dev_err(&pdev->dev,
+ "Failed to register clock cleanup handler (%pe)\n",
+ ERR_PTR(error));
+ return error;
+ }
+
pdata->wakeup = of_property_read_bool(np, "wakeup-source");
pdata->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/input/touchscreen.c b/drivers/input/touchscreen.c
index dd18cb917c4d..4620e20d0190 100644
--- a/drivers/input/touchscreen.c
+++ b/drivers/input/touchscreen.c
@@ -80,27 +80,27 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-x",
input_abs_get_min(input, axis_x),
- &minimum) |
- touchscreen_get_prop_u32(dev, "touchscreen-size-x",
- input_abs_get_max(input,
- axis_x) + 1,
- &maximum) |
- touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
- input_abs_get_fuzz(input, axis_x),
- &fuzz);
+ &minimum);
+ data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-x",
+ input_abs_get_max(input,
+ axis_x) + 1,
+ &maximum);
+ data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
+ input_abs_get_fuzz(input, axis_x),
+ &fuzz);
if (data_present)
touchscreen_set_params(input, axis_x, minimum, maximum - 1, fuzz);
data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-y",
input_abs_get_min(input, axis_y),
- &minimum) |
- touchscreen_get_prop_u32(dev, "touchscreen-size-y",
- input_abs_get_max(input,
- axis_y) + 1,
- &maximum) |
- touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
- input_abs_get_fuzz(input, axis_y),
- &fuzz);
+ &minimum);
+ data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-y",
+ input_abs_get_max(input,
+ axis_y) + 1,
+ &maximum);
+ data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
+ input_abs_get_fuzz(input, axis_y),
+ &fuzz);
if (data_present)
touchscreen_set_params(input, axis_y, minimum, maximum - 1, fuzz);
@@ -108,11 +108,11 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
data_present = touchscreen_get_prop_u32(dev,
"touchscreen-max-pressure",
input_abs_get_max(input, axis),
- &maximum) |
- touchscreen_get_prop_u32(dev,
- "touchscreen-fuzz-pressure",
- input_abs_get_fuzz(input, axis),
- &fuzz);
+ &maximum);
+ data_present |= touchscreen_get_prop_u32(dev,
+ "touchscreen-fuzz-pressure",
+ input_abs_get_fuzz(input, axis),
+ &fuzz);
if (data_present)
touchscreen_set_params(input, axis, 0, maximum, fuzz);
diff --git a/drivers/input/touchscreen/resistive-adc-touch.c b/drivers/input/touchscreen/resistive-adc-touch.c
index 744544a723b7..6f754a8d30b1 100644
--- a/drivers/input/touchscreen/resistive-adc-touch.c
+++ b/drivers/input/touchscreen/resistive-adc-touch.c
@@ -71,19 +71,22 @@ static int grts_cb(const void *data, void *private)
unsigned int z2 = touch_info[st->ch_map[GRTS_CH_Z2]];
unsigned int Rt;
- Rt = z2;
- Rt -= z1;
- Rt *= st->x_plate_ohms;
- Rt = DIV_ROUND_CLOSEST(Rt, 16);
- Rt *= x;
- Rt /= z1;
- Rt = DIV_ROUND_CLOSEST(Rt, 256);
- /*
- * On increased pressure the resistance (Rt) is decreasing
- * so, convert values to make it looks as real pressure.
- */
- if (Rt < GRTS_DEFAULT_PRESSURE_MAX)
- press = GRTS_DEFAULT_PRESSURE_MAX - Rt;
+ if (likely(x && z1)) {
+ Rt = z2;
+ Rt -= z1;
+ Rt *= st->x_plate_ohms;
+ Rt = DIV_ROUND_CLOSEST(Rt, 16);
+ Rt *= x;
+ Rt /= z1;
+ Rt = DIV_ROUND_CLOSEST(Rt, 256);
+ /*
+ * On increased pressure the resistance (Rt) is
+ * decreasing so, convert values to make it looks as
+ * real pressure.
+ */
+ if (Rt < GRTS_DEFAULT_PRESSURE_MAX)
+ press = GRTS_DEFAULT_PRESSURE_MAX - Rt;
+ }
}
if ((!x && !y) || (st->pressure && (press < st->pressure_min))) {
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index 632dbdd21915..fb23a5b780a4 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -44,9 +44,9 @@
#define NOC_PERM_MODE_BYPASS (1 << NOC_QOS_MODE_BYPASS)
#define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
-#define NOC_QOS_PRIORITY_MASK 0xf
+#define NOC_QOS_PRIORITY_P1_MASK 0xc
+#define NOC_QOS_PRIORITY_P0_MASK 0x3
#define NOC_QOS_PRIORITY_P1_SHIFT 0x2
-#define NOC_QOS_PRIORITY_P0_SHIFT 0x3
#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
#define NOC_QOS_MODEn_MASK 0x3
@@ -173,6 +173,16 @@ static const struct clk_bulk_data bus_mm_clocks[] = {
{ .id = "iface" },
};
+static const struct clk_bulk_data bus_a2noc_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+ { .id = "ipa" },
+ { .id = "ufs_axi" },
+ { .id = "aggre2_ufs_axi" },
+ { .id = "aggre2_usb3_axi" },
+ { .id = "cfg_noc_usb2_axi" },
+};
+
/**
* struct qcom_icc_provider - Qualcomm specific interconnect provider
* @provider: generic interconnect provider
@@ -307,7 +317,7 @@ DEFINE_QNODE(slv_bimc_cfg, SDM660_SLAVE_BIMC_CFG, 4, -1, 56, true, -1, 0, -1, 0)
DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0);
DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0);
DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_BLSP_1, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_CNOC_MNOC_CFG, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0);
DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0);
DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0);
@@ -624,13 +634,12 @@ static int qcom_icc_noc_set_qos_priority(struct regmap *rmap,
/* Must be updated one at a time, P1 first, P0 last */
val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
- NOC_QOS_PRIORITY_MASK, val);
+ NOC_QOS_PRIORITY_P1_MASK, val);
if (rc)
return rc;
- val = qos->prio_level << NOC_QOS_PRIORITY_P0_SHIFT;
return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
- NOC_QOS_PRIORITY_MASK, val);
+ NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
}
static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
@@ -810,6 +819,10 @@ static int qnoc_probe(struct platform_device *pdev)
qp->bus_clks = devm_kmemdup(dev, bus_mm_clocks,
sizeof(bus_mm_clocks), GFP_KERNEL);
qp->num_clks = ARRAY_SIZE(bus_mm_clocks);
+ } else if (of_device_is_compatible(dev->of_node, "qcom,sdm660-a2noc")) {
+ qp->bus_clks = devm_kmemdup(dev, bus_a2noc_clocks,
+ sizeof(bus_a2noc_clocks), GFP_KERNEL);
+ qp->num_clks = ARRAY_SIZE(bus_a2noc_clocks);
} else {
if (of_device_is_compatible(dev->of_node, "qcom,sdm660-bimc"))
qp->is_bimc_node = true;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 124c41adeca1..3eb68fa1b8cc 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -308,7 +308,6 @@ config APPLE_DART
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
- depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@@ -356,6 +355,14 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
'arm-smmu.disable_bypass' will continue to override this
config.
+config ARM_SMMU_QCOM
+ def_tristate y
+ depends on ARM_SMMU && ARCH_QCOM
+ select QCOM_SCM
+ help
+ When running on a Qualcomm platform that has the custom variant
+ of the ARM SMMU, this needs to be built into the SMMU driver.
+
config ARM_SMMU_V3
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
@@ -438,7 +445,7 @@ config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
- depends on QCOM_SCM=y
+ select QCOM_SCM
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 559db9259e65..fdfa39ec2a4d 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -183,7 +183,6 @@ struct apple_dart_master_cfg {
static struct platform_driver apple_dart_driver;
static const struct iommu_ops apple_dart_iommu_ops;
-static const struct iommu_flush_ops apple_dart_tlb_ops;
static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
{
@@ -338,22 +337,6 @@ static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
apple_dart_domain_flush_tlb(to_dart_domain(domain));
}
-static void apple_dart_tlb_flush_all(void *cookie)
-{
- apple_dart_domain_flush_tlb(cookie);
-}
-
-static void apple_dart_tlb_flush_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- apple_dart_domain_flush_tlb(cookie);
-}
-
-static const struct iommu_flush_ops apple_dart_tlb_ops = {
- .tlb_flush_all = apple_dart_tlb_flush_all,
- .tlb_flush_walk = apple_dart_tlb_flush_walk,
-};
-
static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -435,7 +418,6 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
.ias = 32,
.oas = 36,
.coherent_walk = 1,
- .tlb = &apple_dart_tlb_ops,
.iommu_dev = dart->dev,
};
@@ -661,16 +643,34 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
return -EINVAL;
}
+static DEFINE_MUTEX(apple_dart_groups_lock);
+
+static void apple_dart_release_group(void *iommu_data)
+{
+ int i, sid;
+ struct apple_dart_stream_map *stream_map;
+ struct apple_dart_master_cfg *group_master_cfg = iommu_data;
+
+ mutex_lock(&apple_dart_groups_lock);
+
+ for_each_stream_map(i, group_master_cfg, stream_map)
+ for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ stream_map->dart->sid2group[sid] = NULL;
+
+ kfree(iommu_data);
+ mutex_unlock(&apple_dart_groups_lock);
+}
+
static struct iommu_group *apple_dart_device_group(struct device *dev)
{
- static DEFINE_MUTEX(lock);
int i, sid;
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_stream_map *stream_map;
+ struct apple_dart_master_cfg *group_master_cfg;
struct iommu_group *group = NULL;
struct iommu_group *res = ERR_PTR(-EINVAL);
- mutex_lock(&lock);
+ mutex_lock(&apple_dart_groups_lock);
for_each_stream_map(i, cfg, stream_map) {
for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
@@ -698,6 +698,20 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
#endif
group = generic_device_group(dev);
+ res = ERR_PTR(-ENOMEM);
+ if (!group)
+ goto out;
+
+ group_master_cfg = kzalloc(sizeof(*group_master_cfg), GFP_KERNEL);
+ if (!group_master_cfg) {
+ iommu_group_put(group);
+ goto out;
+ }
+
+ memcpy(group_master_cfg, cfg, sizeof(*group_master_cfg));
+ iommu_group_set_iommudata(group, group_master_cfg,
+ apple_dart_release_group);
+
for_each_stream_map(i, cfg, stream_map)
for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
stream_map->dart->sid2group[sid] = group;
@@ -705,7 +719,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
res = group;
out:
- mutex_unlock(&lock);
+ mutex_unlock(&apple_dart_groups_lock);
return res;
}
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
index e240a7bcf310..b0cc01aa20c9 100644
--- a/drivers/iommu/arm/arm-smmu/Makefile
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
-arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 9f465e146799..2c25cce38060 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -215,7 +215,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
of_device_is_compatible(np, "nvidia,tegra186-smmu"))
return nvidia_smmu_impl_init(smmu);
- smmu = qcom_smmu_impl_init(smmu);
+ if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM))
+ smmu = qcom_smmu_impl_init(smmu);
if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
smmu->impl = &mrvl_mmu500_impl;
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 0ec5514c9980..b7708b93f3fa 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1942,18 +1942,18 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
reason = dmar_get_fault_reason(fault_reason, &fault_type);
if (fault_type == INTR_REMAP)
- pr_err("[INTR-REMAP] Request device [0x%02x:0x%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
+ pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
source_id >> 8, PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr >> 48,
fault_reason, reason);
else if (pasid == INVALID_IOASID)
- pr_err("[%s NO_PASID] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
+ pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
type ? "DMA Read" : "DMA Write",
source_id >> 8, PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr,
fault_reason, reason);
else
- pr_err("[%s PASID 0x%x] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
+ pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
type ? "DMA Read" : "DMA Write", pasid,
source_id >> 8, PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr,
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index c14e65a5d38f..c709861198e5 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -33,6 +33,7 @@ struct ipoctal_channel {
unsigned int pointer_read;
unsigned int pointer_write;
struct tty_port tty_port;
+ bool tty_registered;
union scc2698_channel __iomem *regs;
union scc2698_block __iomem *block_regs;
unsigned int board_id;
@@ -81,22 +82,34 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
return 0;
}
-static int ipoctal_open(struct tty_struct *tty, struct file *file)
+static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct ipoctal_channel *channel = dev_get_drvdata(tty->dev);
struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index);
- int err;
-
- tty->driver_data = channel;
+ int res;
if (!ipack_get_carrier(ipoctal->dev))
return -EBUSY;
- err = tty_port_open(&channel->tty_port, tty, file);
- if (err)
- ipack_put_carrier(ipoctal->dev);
+ res = tty_standard_install(driver, tty);
+ if (res)
+ goto err_put_carrier;
+
+ tty->driver_data = channel;
+
+ return 0;
+
+err_put_carrier:
+ ipack_put_carrier(ipoctal->dev);
+
+ return res;
+}
+
+static int ipoctal_open(struct tty_struct *tty, struct file *file)
+{
+ struct ipoctal_channel *channel = tty->driver_data;
- return err;
+ return tty_port_open(&channel->tty_port, tty, file);
}
static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -264,7 +277,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
int res;
int i;
struct tty_driver *tty;
- char name[20];
struct ipoctal_channel *channel;
struct ipack_region *region;
void __iomem *addr;
@@ -355,8 +367,11 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
/* Fill struct tty_driver with ipoctal data */
tty->owner = THIS_MODULE;
tty->driver_name = KBUILD_MODNAME;
- sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
- tty->name = name;
+ tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
+ if (!tty->name) {
+ res = -ENOMEM;
+ goto err_put_driver;
+ }
tty->major = 0;
tty->minor_start = 0;
@@ -371,8 +386,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
res = tty_register_driver(tty);
if (res) {
dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");
- tty_driver_kref_put(tty);
- return res;
+ goto err_free_name;
}
/* Save struct tty_driver for use it when uninstalling the device */
@@ -383,7 +397,9 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
channel = &ipoctal->channel[i];
tty_port_init(&channel->tty_port);
- tty_port_alloc_xmit_buf(&channel->tty_port);
+ res = tty_port_alloc_xmit_buf(&channel->tty_port);
+ if (res)
+ continue;
channel->tty_port.ops = &ipoctal_tty_port_ops;
ipoctal_reset_stats(&channel->stats);
@@ -391,13 +407,15 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
spin_lock_init(&channel->lock);
channel->pointer_read = 0;
channel->pointer_write = 0;
- tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL);
+ tty_dev = tty_port_register_device_attr(&channel->tty_port, tty,
+ i, NULL, channel, NULL);
if (IS_ERR(tty_dev)) {
dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");
+ tty_port_free_xmit_buf(&channel->tty_port);
tty_port_destroy(&channel->tty_port);
continue;
}
- dev_set_drvdata(tty_dev, channel);
+ channel->tty_registered = true;
}
/*
@@ -409,6 +427,13 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal_irq_handler, ipoctal);
return 0;
+
+err_free_name:
+ kfree(tty->name);
+err_put_driver:
+ tty_driver_kref_put(tty);
+
+ return res;
}
static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
@@ -648,6 +673,7 @@ static void ipoctal_cleanup(struct tty_struct *tty)
static const struct tty_operations ipoctal_fops = {
.ioctl = NULL,
+ .install = ipoctal_install,
.open = ipoctal_open,
.close = ipoctal_close,
.write = ipoctal_write_tty,
@@ -690,12 +716,17 @@ static void __ipoctal_remove(struct ipoctal *ipoctal)
for (i = 0; i < NR_CHANNELS; i++) {
struct ipoctal_channel *channel = &ipoctal->channel[i];
+
+ if (!channel->tty_registered)
+ continue;
+
tty_unregister_device(ipoctal->tty_drv, i);
tty_port_free_xmit_buf(&channel->tty_port);
tty_port_destroy(&channel->tty_port);
}
tty_unregister_driver(ipoctal->tty_drv);
+ kfree(ipoctal->tty_drv->name);
tty_driver_kref_put(ipoctal->tty_drv);
kfree(ipoctal);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 4d5924e9f766..aca7b595c4c7 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -409,6 +409,7 @@ config MESON_IRQ_GPIO
config GOLDFISH_PIC
bool "Goldfish programmable interrupt controller"
depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
help
Say yes here to enable Goldfish interrupt controller driver used
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 7557ab551295..53e0fb0562c1 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -359,16 +359,16 @@ static void armada_370_xp_ipi_send_mask(struct irq_data *d,
ARMADA_370_XP_SW_TRIG_INT_OFFS);
}
-static void armada_370_xp_ipi_eoi(struct irq_data *d)
+static void armada_370_xp_ipi_ack(struct irq_data *d)
{
writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
}
static struct irq_chip ipi_irqchip = {
.name = "IPI",
+ .irq_ack = armada_370_xp_ipi_ack,
.irq_mask = armada_370_xp_ipi_mask,
.irq_unmask = armada_370_xp_ipi_unmask,
- .irq_eoi = armada_370_xp_ipi_eoi,
.ipi_send_mask = armada_370_xp_ipi_send_mask,
};
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 7f40dca8cda5..eb0882d15366 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4501,7 +4501,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
if (err) {
if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i - 1);
+ its_vpe_irq_domain_free(domain, virq, i);
its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d329ec3d64d8..5f22c9d65e57 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -107,6 +107,8 @@ static DEFINE_RAW_SPINLOCK(cpu_map_lock);
#endif
+static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
+
/*
* The GIC mapping of CPU interfaces does not necessarily match
* the logical CPU numbering. Let's use a mapping as returned
@@ -774,6 +776,25 @@ static int gic_pm_init(struct gic_chip_data *gic)
#endif
#ifdef CONFIG_SMP
+static void rmw_writeb(u8 bval, void __iomem *addr)
+{
+ static DEFINE_RAW_SPINLOCK(rmw_lock);
+ unsigned long offset = (unsigned long)addr & 3UL;
+ unsigned long shift = offset * 8;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&rmw_lock, flags);
+
+ addr -= offset;
+ val = readl_relaxed(addr);
+ val &= ~GENMASK(shift + 7, shift);
+ val |= bval << shift;
+ writel_relaxed(val, addr);
+
+ raw_spin_unlock_irqrestore(&rmw_lock, flags);
+}
+
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
@@ -788,7 +809,10 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
- writeb_relaxed(gic_cpu_map[cpu], reg);
+ if (static_branch_unlikely(&needs_rmw_access))
+ rmw_writeb(gic_cpu_map[cpu], reg);
+ else
+ writeb_relaxed(gic_cpu_map[cpu], reg);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE;
@@ -1375,6 +1399,30 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
return true;
}
+static bool gic_enable_rmw_access(void *data)
+{
+ /*
+ * The EMEV2 class of machines has a broken interconnect, and
+ * locks up on accesses that are less than 32bit. So far, only
+ * the affinity setting requires it.
+ */
+ if (of_machine_is_compatible("renesas,emev2")) {
+ static_branch_enable(&needs_rmw_access);
+ return true;
+ }
+
+ return false;
+}
+
+static const struct gic_quirk gic_quirks[] = {
+ {
+ .desc = "broken byte access",
+ .compatible = "arm,pl390",
+ .init = gic_enable_rmw_access,
+ },
+ { },
+};
+
static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
{
if (!gic || !node)
@@ -1391,6 +1439,8 @@ static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
gic->percpu_offset = 0;
+ gic_enable_of_quirks(node, gic_quirks, gic);
+
return 0;
error:
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index f565317a3da3..12df2162108e 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -25,7 +25,7 @@
/* The maximum IRQ pin number of mbigen chip(start from 0) */
#define MAXIMUM_IRQ_PIN_NUM 1407
-/**
+/*
* In mbigen vector register
* bit[21:12]: event id value
* bit[11:0]: device id
@@ -39,14 +39,14 @@
/* offset of vector register in mbigen node */
#define REG_MBIGEN_VEC_OFFSET 0x200
-/**
+/*
* offset of clear register in mbigen node
* This register is used to clear the status
* of interrupt
*/
#define REG_MBIGEN_CLEAR_OFFSET 0xa000
-/**
+/*
* offset of interrupt type register
* This register is used to configure interrupt
* trigger type
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index b0d46ac42b89..72c06e883d1c 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -223,12 +223,12 @@ static int rza1_irqc_probe(struct platform_device *pdev)
goto out_put_node;
}
- priv->chip.name = "rza1-irqc",
- priv->chip.irq_mask = irq_chip_mask_parent,
- priv->chip.irq_unmask = irq_chip_unmask_parent,
- priv->chip.irq_eoi = rza1_irqc_eoi,
- priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy,
- priv->chip.irq_set_type = rza1_irqc_set_type,
+ priv->chip.name = "rza1-irqc";
+ priv->chip.irq_mask = irq_chip_mask_parent;
+ priv->chip.irq_unmask = irq_chip_unmask_parent;
+ priv->chip.irq_eoi = rza1_irqc_eoi;
+ priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy;
+ priv->chip.irq_set_type = rza1_irqc_set_type;
priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index cb0afe897162..7313454e403a 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -480,6 +480,11 @@ int detach_capi_ctr(struct capi_ctr *ctr)
ctr_down(ctr, CAPI_CTR_DETACHED);
+ if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) {
+ err = -EINVAL;
+ goto unlock_out;
+ }
+
if (capi_controller[ctr->cnr - 1] != ctr) {
err = -EINVAL;
goto unlock_out;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e501cb03f211..bd087cca1c1d 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1994,14 +1994,14 @@ setup_hw(struct hfc_pci *hc)
pci_set_master(hc->pdev);
if (!hc->irq) {
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
- return 1;
+ return -EINVAL;
}
hc->hw.pci_io =
(char __iomem *)(unsigned long)hc->pdev->resource[1].start;
if (!hc->hw.pci_io) {
printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
- return 1;
+ return -ENOMEM;
}
/* Allocate memory for FIFOS */
/* the memory needs to be on a 32k boundary within the first 4G */
@@ -2012,7 +2012,7 @@ setup_hw(struct hfc_pci *hc)
if (!buffer) {
printk(KERN_WARNING
"HFC-PCI: Error allocating memory for FIFO!\n");
- return 1;
+ return -ENOMEM;
}
hc->hw.fifos = buffer;
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
@@ -2022,7 +2022,7 @@ setup_hw(struct hfc_pci *hc)
"HFC-PCI: Error in ioremap for PCI!\n");
dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
hc->hw.dmahandle);
- return 1;
+ return -ENOMEM;
}
printk(KERN_INFO
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 2a1ddd47a096..a52f275f8263 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -949,8 +949,8 @@ nj_release(struct tiger_hw *card)
nj_disable_hwirq(card);
mode_tiger(&card->bc[0], ISDN_P_NONE);
mode_tiger(&card->bc[1], ISDN_P_NONE);
- card->isac.release(&card->isac);
spin_unlock_irqrestore(&card->lock, flags);
+ card->isac.release(&card->isac);
release_region(card->base, card->base_s);
card->base_s = 0;
}
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index edf4ee6eff25..cf128b3471d7 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -275,8 +275,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
if (bus_nr < 0) {
- rc = bus_nr;
- goto err_free;
+ kfree(bus);
+ return ERR_PTR(bus_nr);
}
bus->bus_nr = bus_nr;
@@ -291,12 +291,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
dev_set_name(&bus->dev, "mcb:%d", bus_nr);
rc = device_add(&bus->dev);
if (rc)
- goto err_free;
+ goto err_put;
return bus;
-err_free:
- put_device(carrier);
- kfree(bus);
+
+err_put:
+ put_device(&bus->dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 84dbe08ad205..edd22e4d65df 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -161,7 +161,7 @@ static const char *clone_device_name(struct clone *clone)
static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
{
- const char *descs[] = {
+ static const char * const descs[] = {
"read-write",
"read-only",
"fail"
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 5b95eea517d1..a896dea9750e 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -490,6 +490,14 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
struct mapped_device *md = tio->md;
struct dm_target *ti = md->immutable_target;
+ /*
+ * blk-mq's unquiesce may come from outside events, such as
+ * elevator switch, updating nr_requests or others, and request may
+ * come during suspend, so simply ask for blk-mq to requeue it.
+ */
+ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
+ return BLK_STS_RESOURCE;
+
if (unlikely(!ti)) {
int srcu_idx;
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 22a5ac82446a..88288c8d6bc8 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -475,6 +475,7 @@ static int verity_verify_io(struct dm_verity_io *io)
struct bvec_iter start;
unsigned b;
struct crypto_wait wait;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
for (b = 0; b < io->n_blocks; b++) {
int r;
@@ -529,9 +530,17 @@ static int verity_verify_io(struct dm_verity_io *io)
else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0)
continue;
- else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
- cur_block))
- return -EIO;
+ else {
+ if (bio->bi_status) {
+ /*
+ * Error correction failed; Just return error
+ */
+ return -EIO;
+ }
+ if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+ cur_block))
+ return -EIO;
+ }
}
return 0;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a011d09cb0fa..76d9da49fda7 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -496,18 +496,17 @@ static void start_io_acct(struct dm_io *io)
false, 0, &io->stats_aux);
}
-static void end_io_acct(struct dm_io *io)
+static void end_io_acct(struct mapped_device *md, struct bio *bio,
+ unsigned long start_time, struct dm_stats_aux *stats_aux)
{
- struct mapped_device *md = io->md;
- struct bio *bio = io->orig_bio;
- unsigned long duration = jiffies - io->start_time;
+ unsigned long duration = jiffies - start_time;
- bio_end_io_acct(bio, io->start_time);
+ bio_end_io_acct(bio, start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
- true, duration, &io->stats_aux);
+ true, duration, stats_aux);
/* nudge anyone waiting on suspend queue */
if (unlikely(wq_has_sleeper(&md->wait)))
@@ -790,6 +789,8 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
blk_status_t io_error;
struct bio *bio;
struct mapped_device *md = io->md;
+ unsigned long start_time = 0;
+ struct dm_stats_aux stats_aux;
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
@@ -821,8 +822,10 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
}
io_error = io->status;
- end_io_acct(io);
+ start_time = io->start_time;
+ stats_aux = io->stats_aux;
free_io(md, io);
+ end_io_acct(md, bio, start_time, &stats_aux);
if (io_error == BLK_STS_DM_REQUEUE)
return;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ae8fe54ea358..6c0c3d0d905a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5700,10 +5700,6 @@ static int md_alloc(dev_t dev, char *name)
disk->flags |= GENHD_FL_EXT_DEVT;
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
- /* As soon as we call add_disk(), another thread could get
- * through to md_open, so make sure it doesn't get too far
- */
- mutex_lock(&mddev->open_mutex);
add_disk(disk);
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
@@ -5718,7 +5714,6 @@ static int md_alloc(dev_t dev, char *name)
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
pr_debug("pointless warning\n");
- mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 157c924686e4..80321e03809a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -565,7 +565,7 @@ config VIDEO_QCOM_VENUS
depends on VIDEO_DEV && VIDEO_V4L2 && QCOM_SMEM
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
select QCOM_MDT_LOADER if ARCH_QCOM
- select QCOM_SCM if ARCH_QCOM
+ select QCOM_SCM
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index d402e456f27d..7d0ab19c38bb 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1140,8 +1140,8 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
continue;
length = 0;
switch (c) {
- /* SOF0: baseline JPEG */
- case SOF0:
+ /* JPEG_MARKER_SOF0: baseline JPEG */
+ case JPEG_MARKER_SOF0:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
@@ -1172,7 +1172,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
notfound = 0;
break;
- case DQT:
+ case JPEG_MARKER_DQT:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
@@ -1185,7 +1185,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
skip(&jpeg_buffer, length);
break;
- case DHT:
+ case JPEG_MARKER_DHT:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
@@ -1198,15 +1198,15 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
skip(&jpeg_buffer, length);
break;
- case SOS:
+ case JPEG_MARKER_SOS:
sos = jpeg_buffer.curr - 2; /* 0xffda */
break;
/* skip payload-less markers */
- case RST ... RST + 7:
- case SOI:
- case EOI:
- case TEM:
+ case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:
+ case JPEG_MARKER_SOI:
+ case JPEG_MARKER_EOI:
+ case JPEG_MARKER_TEM:
break;
/* skip uninteresting payload markers */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index a77d93c098ce..8473a019bb5f 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -37,15 +37,15 @@
#define EXYNOS3250_IRQ_TIMEOUT 0x10000000
/* a selection of JPEG markers */
-#define TEM 0x01
-#define SOF0 0xc0
-#define DHT 0xc4
-#define RST 0xd0
-#define SOI 0xd8
-#define EOI 0xd9
-#define SOS 0xda
-#define DQT 0xdb
-#define DHP 0xde
+#define JPEG_MARKER_TEM 0x01
+#define JPEG_MARKER_SOF0 0xc0
+#define JPEG_MARKER_DHT 0xc4
+#define JPEG_MARKER_RST 0xd0
+#define JPEG_MARKER_SOI 0xd8
+#define JPEG_MARKER_EOI 0xd9
+#define JPEG_MARKER_SOS 0xda
+#define JPEG_MARKER_DQT 0xdb
+#define JPEG_MARKER_DHP 0xde
/* Flags that indicate a format can be used for capture/output */
#define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)
@@ -187,11 +187,11 @@ struct s5p_jpeg_marker {
* @fmt: driver-specific format of this queue
* @w: image width
* @h: image height
- * @sos: SOS marker's position relative to the buffer beginning
- * @dht: DHT markers' positions relative to the buffer beginning
- * @dqt: DQT markers' positions relative to the buffer beginning
- * @sof: SOF0 marker's position relative to the buffer beginning
- * @sof_len: SOF0 marker's payload length (without length field itself)
+ * @sos: JPEG_MARKER_SOS's position relative to the buffer beginning
+ * @dht: JPEG_MARKER_DHT' positions relative to the buffer beginning
+ * @dqt: JPEG_MARKER_DQT' positions relative to the buffer beginning
+ * @sof: JPEG_MARKER_SOF0's position relative to the buffer beginning
+ * @sof_len: JPEG_MARKER_SOF0's payload length (without length field itself)
* @size: image buffer size in bytes
*/
struct s5p_jpeg_q_data {
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 3e729a17b35f..48d52baec1a1 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -24,6 +24,7 @@ static const u8 COMMAND_VERSION[] = { 'v' };
// End transmit and repeat reset command so we exit sump mode
static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };
static const u8 COMMAND_SMODE_ENTER[] = { 's' };
+static const u8 COMMAND_SMODE_EXIT[] = { 0 };
static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
#define REPLY_XMITCOUNT 't'
@@ -309,12 +310,30 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
buf[i] = cpu_to_be16(v);
}
- buf[count] = cpu_to_be16(0xffff);
+ buf[count] = 0xffff;
irtoy->tx_buf = buf;
irtoy->tx_len = size;
irtoy->emitted = 0;
+ // There is an issue where if the unit is receiving IR while the
+ // first TXSTART command is sent, the device might end up hanging
+ // with its led on. It does not respond to any command when this
+ // happens. To work around this, re-enter sample mode.
+ err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
+ sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
+ if (err) {
+ dev_err(irtoy->dev, "exit sample mode: %d\n", err);
+ return err;
+ }
+
+ err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
+ sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
+ if (err) {
+ dev_err(irtoy->dev, "enter sample mode: %d\n", err);
+ return err;
+ }
+
err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),
STATE_TX);
kfree(buf);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 85ba901bc11b..0f5a49fc7c9e 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -224,6 +224,7 @@ config HI6421V600_IRQ
tristate "HiSilicon Hi6421v600 IRQ and powerkey"
depends on OF
depends on SPMI
+ depends on HAS_IOMEM
select MFD_CORE
select REGMAP_SPMI
help
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
index 1b6076a89ca6..6669625ba4c8 100644
--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -267,13 +267,13 @@ int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
struct device *tty_dev;
tty_port_init(&vk->tty[i].port);
- tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv,
- i, dev);
+ tty_dev = tty_port_register_device_attr(&vk->tty[i].port,
+ tty_drv, i, dev, vk,
+ NULL);
if (IS_ERR(tty_dev)) {
err = PTR_ERR(tty_dev);
goto unwind;
}
- dev_set_drvdata(tty_dev, vk);
vk->tty[i].is_opened = false;
}
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index e5a4ed3701eb..a798fad5f03c 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -47,7 +47,7 @@ static inline bool needs_unaligned_copy(const void *ptr)
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
return false;
#else
- return ((ptr - NULL) & 3) != 0;
+ return ((uintptr_t)ptr & 3) != 0;
#endif
}
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 4d09b672ac3c..632325474233 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -366,6 +366,13 @@ static const struct of_device_id at25_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at25_of_match);
+static const struct spi_device_id at25_spi_ids[] = {
+ { .name = "at25",},
+ { .name = "fm25",},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, at25_spi_ids);
+
static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
@@ -491,6 +498,7 @@ static struct spi_driver at25_driver = {
.dev_groups = sernum_groups,
},
.probe = at25_probe,
+ .id_table = at25_spi_ids,
};
module_spi_driver(at25_driver);
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 29d8971ec558..1f15399e5cb4 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -406,6 +406,23 @@ static const struct of_device_id eeprom_93xx46_of_table[] = {
};
MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
+static const struct spi_device_id eeprom_93xx46_spi_ids[] = {
+ { .name = "eeprom-93xx46",
+ .driver_data = (kernel_ulong_t)&at93c46_data, },
+ { .name = "at93c46",
+ .driver_data = (kernel_ulong_t)&at93c46_data, },
+ { .name = "at93c46d",
+ .driver_data = (kernel_ulong_t)&atmel_at93c46d_data, },
+ { .name = "at93c56",
+ .driver_data = (kernel_ulong_t)&at93c56_data, },
+ { .name = "at93c66",
+ .driver_data = (kernel_ulong_t)&at93c66_data, },
+ { .name = "93lc46b",
+ .driver_data = (kernel_ulong_t)&microchip_93lc46b_data, },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, eeprom_93xx46_spi_ids);
+
static int eeprom_93xx46_probe_dt(struct spi_device *spi)
{
const struct of_device_id *of_id =
@@ -555,6 +572,7 @@ static struct spi_driver eeprom_93xx46_driver = {
},
.probe = eeprom_93xx46_probe,
.remove = eeprom_93xx46_remove,
+ .id_table = eeprom_93xx46_spi_ids,
};
module_spi_driver(eeprom_93xx46_driver);
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index beda610e6b30..ad6ced454655 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -814,10 +814,12 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
rpra[i].pv = (u64) ctx->args[i].ptr;
pages[i].addr = ctx->maps[i]->phys;
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, ctx->args[i].ptr);
if (vma)
pages[i].addr += ctx->args[i].ptr -
vma->vm_start;
+ mmap_read_unlock(current->mm);
pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
diff --git a/drivers/misc/gehc-achc.c b/drivers/misc/gehc-achc.c
index 02f33bc60c56..4c9c5394da6f 100644
--- a/drivers/misc/gehc-achc.c
+++ b/drivers/misc/gehc-achc.c
@@ -539,6 +539,7 @@ static int gehc_achc_probe(struct spi_device *spi)
static const struct spi_device_id gehc_achc_id[] = {
{ "ge,achc", 0 },
+ { "achc", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, gehc_achc_id);
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 2e1befbd1ad9..693981891870 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -1090,7 +1090,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
/* check for 64-bit DMA address supported (DAC) */
/* check for 32-bit DMA address supported (SAC) */
- if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) ||
+ if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
dev_err(&pci_dev->dev,
"err: neither DMA32 nor DMA64 supported\n");
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 7b0516cf808b..6dafff375f1c 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -405,7 +405,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
{
bool next_entry_found = false;
- struct hl_cs *next;
+ struct hl_cs *next, *first_cs;
if (!cs_needs_timeout(cs))
return;
@@ -415,9 +415,16 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
/* We need to handle tdr only once for the complete staged submission.
* Hence, we choose the CS that reaches this function first which is
* the CS marked as 'staged_last'.
+ * In case single staged cs was submitted which has both first and last
+ * indications, then "cs_find_first" below will return NULL, since we
+ * removed the cs node from the list before getting here,
+ * in such cases just continue with the cs to cancel it's TDR work.
*/
- if (cs->staged_cs && cs->staged_last)
- cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+ if (cs->staged_cs && cs->staged_last) {
+ first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+ if (first_cs)
+ cs = first_cs;
+ }
spin_unlock(&hdev->cs_mirror_lock);
@@ -1288,6 +1295,12 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
if (rc)
goto free_cs_object;
+ /* If this is a staged submission we must return the staged sequence
+ * rather than the internal CS sequence
+ */
+ if (cs->staged_cs)
+ *cs_seq = cs->staged_sequence;
+
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0 ; i < num_chunks ; i++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
@@ -1988,6 +2001,15 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
goto free_cs_chunk_array;
}
+ if (!hdev->nic_ports_mask) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
+ dev_err(hdev->dev,
+ "Collective operations not supported when NIC ports are disabled");
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
collective_engine_id = chunk->collective_engine_id;
}
@@ -2026,9 +2048,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
spin_unlock(&ctx->sig_mgr.lock);
if (!handle_found) {
- dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
+ /* treat as signal CS already finished */
+ dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
signal_seq);
- rc = -EINVAL;
+ rc = 0;
goto free_cs_chunk_array;
}
@@ -2613,7 +2636,8 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
* completed after the poll function.
*/
if (!mcs_data.completion_bitmap) {
- dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
+ dev_warn_ratelimited(hdev->dev,
+ "Multi-CS got completion on wait but no CS completed\n");
rc = -EFAULT;
}
}
@@ -2625,11 +2649,18 @@ put_ctx:
free_seq_arr:
kfree(cs_seq_arr);
- /* update output args */
- memset(args, 0, sizeof(*args));
if (rc)
return rc;
+ if (mcs_data.wait_status == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for Multi-CS\n");
+ return -EINTR;
+ }
+
+ /* update output args */
+ memset(args, 0, sizeof(*args));
+
if (mcs_data.completion_bitmap) {
args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
args->out.cs_completion_map = mcs_data.completion_bitmap;
@@ -2643,8 +2674,6 @@ free_seq_arr:
/* update if some CS was gone */
if (mcs_data.timestamp)
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
- } else if (mcs_data.wait_status == -ERESTARTSYS) {
- args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
} else {
args->out.status = HL_WAIT_CS_STATUS_BUSY;
}
@@ -2664,16 +2693,17 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
&status, &timestamp);
+ if (rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for CS handle %llu\n",
+ seq);
+ return -EINTR;
+ }
+
memset(args, 0, sizeof(*args));
if (rc) {
- if (rc == -ERESTARTSYS) {
- dev_err_ratelimited(hdev->dev,
- "user process got signal while waiting for CS handle %llu\n",
- seq);
- args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
- rc = -EINTR;
- } else if (rc == -ETIMEDOUT) {
+ if (rc == -ETIMEDOUT) {
dev_err_ratelimited(hdev->dev,
"CS %llu has timed-out while user process is waiting for it\n",
seq);
@@ -2740,10 +2770,20 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
else
interrupt = &hdev->user_interrupt[interrupt_offset];
+ /* Add pending user interrupt to relevant list for the interrupt
+ * handler to monitor
+ */
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+ /* We check for completion value as interrupt could have been received
+ * before we added the node to the wait list
+ */
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
- goto free_fence;
+ goto remove_pending_user_interrupt;
}
if (completion_value >= target_value)
@@ -2752,14 +2792,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
*status = CS_WAIT_STATUS_BUSY;
if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
- goto free_fence;
-
- /* Add pending user interrupt to relevant list for the interrupt
- * handler to monitor
- */
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
- list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ goto remove_pending_user_interrupt;
wait_again:
/* Wait for interrupt handler to signal completion */
@@ -2770,6 +2803,15 @@ wait_again:
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ /* reinit_completion must be called before we check for user
+ * completion value, otherwise, if interrupt is received after
+ * the comparison and before the next wait_for_completion,
+ * we will reach timeout and fail
+ */
+ reinit_completion(&pend->fence.completion);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
@@ -2780,18 +2822,13 @@ wait_again:
if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
} else {
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
- reinit_completion(&pend->fence.completion);
timeout = completion_rc;
-
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
goto wait_again;
}
} else if (completion_rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for interrupt ID %d\n",
interrupt->interrupt_id);
- *status = HL_WAIT_CS_STATUS_INTERRUPTED;
rc = -EINTR;
} else {
*status = CS_WAIT_STATUS_BUSY;
@@ -2802,7 +2839,6 @@ remove_pending_user_interrupt:
list_del(&pend->wait_list_node);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
-free_fence:
kfree(pend);
hl_ctx_put(ctx);
@@ -2847,8 +2883,6 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
args->in.interrupt_timeout_us, args->in.addr,
args->in.target, interrupt_offset, &status);
- memset(args, 0, sizeof(*args));
-
if (rc) {
if (rc != -EINTR)
dev_err_ratelimited(hdev->dev,
@@ -2857,6 +2891,8 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return rc;
}
+ memset(args, 0, sizeof(*args));
+
switch (status) {
case CS_WAIT_STATUS_COMPLETED:
args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 76b7de8f1406..0743319b10c7 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -437,6 +437,7 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
struct hl_cs_compl *cs_cmpl)
{
struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
+ u32 offset = 0;
cs_cmpl->hw_sob = handle->hw_sob;
@@ -446,9 +447,13 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
* set offset 1 for example he mean to wait only for the first
* signal only, which will be pre_sob_val, and if he set offset 2
* then the value required is (pre_sob_val + 1) and so on...
+ * if user set wait offset to 0, then treat it as legacy wait cs,
+ * wait for the next signal.
*/
- cs_cmpl->sob_val = handle->pre_sob_val +
- (job->encaps_sig_wait_offset - 1);
+ if (job->encaps_sig_wait_offset)
+ offset = job->encaps_sig_wait_offset - 1;
+
+ cs_cmpl->sob_val = handle->pre_sob_val + offset;
}
static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 383865be3c2c..14da87b38e83 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -395,7 +395,7 @@ static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
{ .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
- { .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" },
+ { .id = 201, .name = "MON_OBJ_DMA_UP_FEEDBACK_RESET" },
{ .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
{ .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
{ .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
@@ -5802,6 +5802,7 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct packet_msg_prot *cq_pkt;
+ u64 msi_addr;
u32 tmp;
cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
@@ -5823,10 +5824,12 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(1);
- if (!gaudi->multi_msi_mode)
- msi_vec = 0;
+ if (gaudi->multi_msi_mode)
+ msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4;
+ else
+ msi_addr = mmPCIE_CORE_MSI_REQ;
- cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4);
+ cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
}
static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index cb265c00cf73..25ac87cebd45 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -8,16 +8,21 @@
#include "gaudiP.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
-#define GAUDI_NUMBER_OF_RR_REGS 24
-#define GAUDI_NUMBER_OF_LBW_RANGES 12
+#define GAUDI_NUMBER_OF_LBW_RR_REGS 28
+#define GAUDI_NUMBER_OF_HBW_RR_REGS 24
+#define GAUDI_NUMBER_OF_LBW_RANGES 10
-static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_HIT_WPROT,
mmDMA_IF_W_S_DMA0_HIT_WPROT,
mmDMA_IF_W_S_DMA1_HIT_WPROT,
+ mmDMA_IF_E_S_SOB_HIT_WPROT,
mmDMA_IF_E_S_DMA0_HIT_WPROT,
mmDMA_IF_E_S_DMA1_HIT_WPROT,
+ mmDMA_IF_W_N_SOB_HIT_WPROT,
mmDMA_IF_W_N_DMA0_HIT_WPROT,
mmDMA_IF_W_N_DMA1_HIT_WPROT,
+ mmDMA_IF_E_N_SOB_HIT_WPROT,
mmDMA_IF_E_N_DMA0_HIT_WPROT,
mmDMA_IF_E_N_DMA1_HIT_WPROT,
mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
@@ -38,13 +43,17 @@ static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
};
-static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_HIT_RPROT,
mmDMA_IF_W_S_DMA0_HIT_RPROT,
mmDMA_IF_W_S_DMA1_HIT_RPROT,
+ mmDMA_IF_E_S_SOB_HIT_RPROT,
mmDMA_IF_E_S_DMA0_HIT_RPROT,
mmDMA_IF_E_S_DMA1_HIT_RPROT,
+ mmDMA_IF_W_N_SOB_HIT_RPROT,
mmDMA_IF_W_N_DMA0_HIT_RPROT,
mmDMA_IF_W_N_DMA1_HIT_RPROT,
+ mmDMA_IF_E_N_SOB_HIT_RPROT,
mmDMA_IF_E_N_DMA0_HIT_RPROT,
mmDMA_IF_E_N_DMA1_HIT_RPROT,
mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
@@ -65,13 +74,17 @@ static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
};
-static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MIN_WPROT_0,
mmDMA_IF_W_S_DMA0_MIN_WPROT_0,
mmDMA_IF_W_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_S_SOB_MIN_WPROT_0,
mmDMA_IF_E_S_DMA0_MIN_WPROT_0,
mmDMA_IF_E_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_W_N_SOB_MIN_WPROT_0,
mmDMA_IF_W_N_DMA0_MIN_WPROT_0,
mmDMA_IF_W_N_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_N_SOB_MIN_WPROT_0,
mmDMA_IF_E_N_DMA0_MIN_WPROT_0,
mmDMA_IF_E_N_DMA1_MIN_WPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
@@ -92,13 +105,17 @@ static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
};
-static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MAX_WPROT_0,
mmDMA_IF_W_S_DMA0_MAX_WPROT_0,
mmDMA_IF_W_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_S_SOB_MAX_WPROT_0,
mmDMA_IF_E_S_DMA0_MAX_WPROT_0,
mmDMA_IF_E_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_W_N_SOB_MAX_WPROT_0,
mmDMA_IF_W_N_DMA0_MAX_WPROT_0,
mmDMA_IF_W_N_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_N_SOB_MAX_WPROT_0,
mmDMA_IF_E_N_DMA0_MAX_WPROT_0,
mmDMA_IF_E_N_DMA1_MAX_WPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
@@ -119,13 +136,17 @@ static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
};
-static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MIN_RPROT_0,
mmDMA_IF_W_S_DMA0_MIN_RPROT_0,
mmDMA_IF_W_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_S_SOB_MIN_RPROT_0,
mmDMA_IF_E_S_DMA0_MIN_RPROT_0,
mmDMA_IF_E_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_W_N_SOB_MIN_RPROT_0,
mmDMA_IF_W_N_DMA0_MIN_RPROT_0,
mmDMA_IF_W_N_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_N_SOB_MIN_RPROT_0,
mmDMA_IF_E_N_DMA0_MIN_RPROT_0,
mmDMA_IF_E_N_DMA1_MIN_RPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
@@ -146,13 +167,17 @@ static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
};
-static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MAX_RPROT_0,
mmDMA_IF_W_S_DMA0_MAX_RPROT_0,
mmDMA_IF_W_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_S_SOB_MAX_RPROT_0,
mmDMA_IF_E_S_DMA0_MAX_RPROT_0,
mmDMA_IF_E_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_W_N_SOB_MAX_RPROT_0,
mmDMA_IF_W_N_DMA0_MAX_RPROT_0,
mmDMA_IF_W_N_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_N_SOB_MAX_RPROT_0,
mmDMA_IF_E_N_DMA0_MAX_RPROT_0,
mmDMA_IF_E_N_DMA1_MAX_RPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
@@ -173,7 +198,7 @@ static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
};
-static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW,
@@ -200,7 +225,7 @@ static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW
};
-static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR,
@@ -227,7 +252,7 @@ static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR
};
-static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
@@ -254,7 +279,7 @@ static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0
};
-static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
@@ -281,7 +306,7 @@ static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0
};
-static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
@@ -308,7 +333,7 @@ static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0
};
-static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
@@ -335,7 +360,7 @@ static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0
};
-static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
@@ -362,7 +387,7 @@ static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0
};
-static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
@@ -389,7 +414,7 @@ static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0
};
-static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
@@ -416,7 +441,7 @@ static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0
};
-static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
@@ -12849,50 +12874,44 @@ static void gaudi_init_range_registers_lbw(struct hl_device *hdev)
u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES];
int i, j;
- lbw_rng_start[0] = (0xFBFE0000 & 0x3FFFFFF) - 1;
- lbw_rng_end[0] = (0xFBFFF000 & 0x3FFFFFF) + 1;
+ lbw_rng_start[0] = (0xFC0E8000 & 0x3FFFFFF) - 1; /* 0x000E7FFF */
+ lbw_rng_end[0] = (0xFC11FFFF & 0x3FFFFFF) + 1; /* 0x00120000 */
- lbw_rng_start[1] = (0xFC0E8000 & 0x3FFFFFF) - 1;
- lbw_rng_end[1] = (0xFC120000 & 0x3FFFFFF) + 1;
+ lbw_rng_start[1] = (0xFC1E8000 & 0x3FFFFFF) - 1; /* 0x001E7FFF */
+ lbw_rng_end[1] = (0xFC48FFFF & 0x3FFFFFF) + 1; /* 0x00490000 */
- lbw_rng_start[2] = (0xFC1E8000 & 0x3FFFFFF) - 1;
- lbw_rng_end[2] = (0xFC48FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[2] = (0xFC600000 & 0x3FFFFFF) - 1; /* 0x005FFFFF */
+ lbw_rng_end[2] = (0xFCC48FFF & 0x3FFFFFF) + 1; /* 0x00C49000 */
- lbw_rng_start[3] = (0xFC600000 & 0x3FFFFFF) - 1;
- lbw_rng_end[3] = (0xFCC48FFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[3] = (0xFCC4A000 & 0x3FFFFFF) - 1; /* 0x00C49FFF */
+ lbw_rng_end[3] = (0xFCCDFFFF & 0x3FFFFFF) + 1; /* 0x00CE0000 */
- lbw_rng_start[4] = (0xFCC4A000 & 0x3FFFFFF) - 1;
- lbw_rng_end[4] = (0xFCCDFFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[4] = (0xFCCE4000 & 0x3FFFFFF) - 1; /* 0x00CE3FFF */
+ lbw_rng_end[4] = (0xFCD1FFFF & 0x3FFFFFF) + 1; /* 0x00D20000 */
- lbw_rng_start[5] = (0xFCCE4000 & 0x3FFFFFF) - 1;
- lbw_rng_end[5] = (0xFCD1FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[5] = (0xFCD24000 & 0x3FFFFFF) - 1; /* 0x00D23FFF */
+ lbw_rng_end[5] = (0xFCD5FFFF & 0x3FFFFFF) + 1; /* 0x00D60000 */
- lbw_rng_start[6] = (0xFCD24000 & 0x3FFFFFF) - 1;
- lbw_rng_end[6] = (0xFCD5FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[6] = (0xFCD64000 & 0x3FFFFFF) - 1; /* 0x00D63FFF */
+ lbw_rng_end[6] = (0xFCD9FFFF & 0x3FFFFFF) + 1; /* 0x00DA0000 */
- lbw_rng_start[7] = (0xFCD64000 & 0x3FFFFFF) - 1;
- lbw_rng_end[7] = (0xFCD9FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[7] = (0xFCDA4000 & 0x3FFFFFF) - 1; /* 0x00DA3FFF */
+ lbw_rng_end[7] = (0xFCDDFFFF & 0x3FFFFFF) + 1; /* 0x00DE0000 */
- lbw_rng_start[8] = (0xFCDA4000 & 0x3FFFFFF) - 1;
- lbw_rng_end[8] = (0xFCDDFFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[8] = (0xFCDE4000 & 0x3FFFFFF) - 1; /* 0x00DE3FFF */
+ lbw_rng_end[8] = (0xFCE05FFF & 0x3FFFFFF) + 1; /* 0x00E06000 */
- lbw_rng_start[9] = (0xFCDE4000 & 0x3FFFFFF) - 1;
- lbw_rng_end[9] = (0xFCE05FFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[9] = (0xFCFC9000 & 0x3FFFFFF) - 1; /* 0x00FC8FFF */
+ lbw_rng_end[9] = (0xFFFFFFFE & 0x3FFFFFF) + 1; /* 0x03FFFFFF */
- lbw_rng_start[10] = (0xFEC43000 & 0x3FFFFFF) - 1;
- lbw_rng_end[10] = (0xFEC43FFF & 0x3FFFFFF) + 1;
-
- lbw_rng_start[11] = (0xFE484000 & 0x3FFFFFF) - 1;
- lbw_rng_end[11] = (0xFE484FFF & 0x3FFFFFF) + 1;
-
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) {
WREG32(gaudi_rr_lbw_hit_aw_regs[i],
(1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
WREG32(gaudi_rr_lbw_hit_ar_regs[i],
(1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
}
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++)
+ for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++)
for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) {
WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2),
lbw_rng_start[j]);
@@ -12939,12 +12958,12 @@ static void gaudi_init_range_registers_hbw(struct hl_device *hdev)
* 6th range is the host
*/
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F);
WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D);
}
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo);
WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo);
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index ffdfbd9b3220..1a6576666794 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -308,6 +308,8 @@
#define mmPCIE_AUX_FLR_CTRL 0xC07394
#define mmPCIE_AUX_DBI 0xC07490
+#define mmPCIE_CORE_MSI_REQ 0xC04100
+
#define mmPSOC_PCI_PLL_NR 0xC72100
#define mmSRAM_W_PLL_NR 0x4C8100
#define mmPSOC_HBM_PLL_NR 0xC74100
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 99b5c1ecc444..be41843df75b 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1298,7 +1298,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_STARTING) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
return 0;
}
@@ -1381,7 +1382,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_DR_SETUP) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
return 0;
}
@@ -1448,7 +1450,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
return 0;
}
@@ -1490,7 +1493,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
return 0;
}
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index cb34925e10f1..67bb6a25fd0a 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -92,6 +92,7 @@
#define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+#define MEI_DEV_ID_ICP_N 0x38E0 /* Ice Lake Point N */
#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index c3393b383e59..3a45aaf002ac 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 71313961cc54..95b3511b0560 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -547,7 +547,7 @@ config MMC_SDHCI_MSM
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
- select QCOM_SCM if MMC_CRYPTO && ARCH_QCOM
+ select QCOM_SCM if MMC_CRYPTO
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 6578cc64ae9e..380f9aa56eb2 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1802,10 +1802,15 @@ static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
spin_lock_irqsave(&host->irq_lock, flags);
- if (!host->data_status)
+ /*
+ * Only inject an error if we haven't already got an error or data over
+ * interrupt.
+ */
+ if (!host->data_status) {
host->data_status = SDMMC_INT_DCRC;
- set_bit(EVENT_DATA_ERROR, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
spin_unlock_irqrestore(&host->irq_lock, flags);
@@ -2721,12 +2726,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+ spin_lock(&host->irq_lock);
+
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
host->data_status = pending;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
+
+ spin_unlock(&host->irq_lock);
}
if (pending & SDMMC_INT_DATA_OVER) {
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 3f28eb4d17fe..8f36536cb1b6 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -746,7 +746,7 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
writel(start, host->regs + SD_EMMC_START);
}
-/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+/* local sg copy for dram_access_quirk */
static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
size_t buflen, bool to_buffer)
{
@@ -764,21 +764,27 @@ static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data
sg_miter_start(&miter, sgl, nents, sg_flags);
while ((offset < buflen) && sg_miter_next(&miter)) {
- unsigned int len;
+ unsigned int buf_offset = 0;
+ unsigned int len, left;
+ u32 *buf = miter.addr;
len = min(miter.length, buflen - offset);
+ left = len;
- /* When dram_access_quirk, the bounce buffer is a iomem mapping */
- if (host->dram_access_quirk) {
- if (to_buffer)
- memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
- else
- memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+ if (to_buffer) {
+ do {
+ writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
+
+ buf_offset += 4;
+ left -= 4;
+ } while (left);
} else {
- if (to_buffer)
- memcpy(host->bounce_buf + offset, miter.addr, len);
- else
- memcpy(miter.addr, host->bounce_buf + offset, len);
+ do {
+ *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
+
+ buf_offset += 4;
+ left -= 4;
+ } while (left);
}
offset += len;
@@ -830,7 +836,11 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
if (data->flags & MMC_DATA_WRITE) {
cmd_cfg |= CMD_CFG_DATA_WR;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+ if (host->dram_access_quirk)
+ meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+ else
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ host->bounce_buf, xfer_bytes);
dma_wmb();
}
@@ -849,12 +859,43 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
}
+static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /* Reject request if any element offset or size is not 32bit aligned */
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+ !IS_ALIGNED(sg->length, sizeof(u32))) {
+ dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
+ data->sg->offset, data->sg->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct meson_host *host = mmc_priv(mmc);
bool needs_pre_post_req = mrq->data &&
!(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
+ /*
+ * The memory at the end of the controller used as bounce buffer for
+ * the dram_access_quirk only accepts 32bit read/write access,
+ * check the aligment and length of the data before starting the request.
+ */
+ if (host->dram_access_quirk && mrq->data) {
+ mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
+ if (mrq->cmd->error) {
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
if (needs_pre_post_req) {
meson_mmc_get_transfer_mode(mmc, mrq);
if (!meson_mmc_desc_chain_mode(mrq->data))
@@ -999,7 +1040,11 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
if (meson_mmc_bounce_buf_read(data)) {
xfer_bytes = data->blksz * data->blocks;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+ if (host->dram_access_quirk)
+ meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+ else
+ sg_copy_from_buffer(data->sg, data->sg_len,
+ host->bounce_buf, xfer_bytes);
}
next_cmd = meson_mmc_get_next_command(cmd);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 6fc4cf3c9dce..a4407f391f66 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -561,6 +561,8 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
/* Unknown why but without polling reset status, it will hang */
read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
false, priv->rstc);
+ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
priv->needs_adjust_hs400 = false;
renesas_sdhi_set_clock(host, host->clk_cache);
} else if (priv->scc_ctl) {
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 5564d7b23e7c..d1a1c548c515 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
@@ -61,7 +62,6 @@ static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
{
u16 clk;
- unsigned long timeout;
host->mmc->actual_clock = 0;
@@ -86,16 +86,11 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
- timeout = 20;
- while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
- & SDHCI_CLOCK_INT_STABLE)) {
- if (timeout == 0) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- return;
- }
- timeout--;
- mdelay(1);
+ if (read_poll_timeout(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE),
+ 1000, 20000, false, host, SDHCI_CLOCK_CONTROL)) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ return;
}
clk |= SDHCI_CLOCK_CARD_EN;
@@ -114,6 +109,7 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ unsigned int tmp;
sdhci_reset(host, mask);
@@ -126,6 +122,10 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
SDMMC_CALCR);
+
+ if (read_poll_timeout(sdhci_readl, tmp, !(tmp & SDMMC_CALCR_EN),
+ 10, 20000, false, host, SDMMC_CALCR))
+ dev_err(mmc_dev(host->mmc), "Failed to calibrate\n");
}
}
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index ef0badea4f41..04e6f7b26706 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -1676,13 +1676,17 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int data_size1, data_size2, oob_size1, oob_size2;
int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+ int raw_cw = cw;
nand_read_page_op(chip, page, 0, NULL, 0);
host->use_ecc = false;
+ if (nandc->props->qpic_v2)
+ raw_cw = ecc->steps - 1;
+
clear_bam_transaction(nandc);
set_address(host, host->cw_size * cw, page);
- update_rw_regs(host, 1, true, cw);
+ update_rw_regs(host, 1, true, raw_cw);
config_nand_page_read(chip);
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
@@ -1711,7 +1715,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
}
- config_nand_cw_read(chip, false, cw);
+ config_nand_cw_read(chip, false, raw_cw);
read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
reg_off += data_size1;
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index f0695d68c47e..97f254bdbb16 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -945,8 +945,8 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
dev->broadcast[0] = 0xFF;
/* Set hardware address. */
- dev->dev_addr[0] = aa->s_node;
dev->addr_len = 1;
+ dev_addr_set(dev, &aa->s_node);
return 0;
case SIOCGIFADDR:
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 1f8925e75b3f..388d7b3bd4c2 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -846,9 +846,8 @@ static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
set_30 (dev,ltflags);
dev->broadcast[0] = 0xFF;
- dev->dev_addr[0] = aa->s_node;
-
dev->addr_len=1;
+ dev_addr_set(dev, &aa->s_node);
return 0;
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 12d085405bd0..8c3ccc7c83cd 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -207,7 +207,8 @@ static int __init arcrimi_found(struct net_device *dev)
}
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
+ arcnet_set_addr(dev, arcnet_readb(lp->mem_start,
+ COM9026_REG_R_STATION));
arc_printk(D_NORMAL, dev, "ARCnet RIM I: station %02Xh found at IRQ %d, ShMem %lXh (%ld*%d bytes)\n",
dev->dev_addr[0],
@@ -324,7 +325,7 @@ static int __init arc_rimi_init(void)
return -ENOMEM;
if (node && node != 0xff)
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->mem_start = io;
dev->irq = irq;
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 5d4a4c7efbbf..19e996a829c9 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -364,6 +364,11 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
struct net_device *dev);
void arcnet_timeout(struct net_device *dev, unsigned int txqueue);
+static inline void arcnet_set_addr(struct net_device *dev, u8 addr)
+{
+ dev_addr_set(dev, &addr);
+}
+
/* I/O equivalents */
#ifdef CONFIG_SA1100_CT6001
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index be618e4b9ed5..293a621e654c 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -151,7 +151,7 @@ static int __init com20020_init(void)
return -ENOMEM;
if (node && node != 0xff)
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->netdev_ops = &com20020_netdev_ops;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 3c8f665c1558..6382e1937cca 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -194,7 +194,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
dev->base_addr = ioaddr;
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->sysfs_groups[0] = &com20020_state_group;
dev->irq = pdev->irq;
lp->card_name = "PCI COM20020";
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 78043a9c5981..06e1651b594b 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -157,7 +157,7 @@ static int com20020_set_hwaddr(struct net_device *dev, void *addr)
struct arcnet_local *lp = netdev_priv(dev);
struct sockaddr *hwaddr = addr;
- memcpy(dev->dev_addr, hwaddr->sa_data, 1);
+ dev_addr_set(dev, hwaddr->sa_data);
com20020_set_subaddress(lp, ioaddr, SUB_NODE);
arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG);
@@ -220,7 +220,7 @@ int com20020_found(struct net_device *dev, int shared)
/* FIXME: do this some other way! */
if (!dev->dev_addr[0])
- dev->dev_addr[0] = arcnet_inb(ioaddr, 8);
+ arcnet_set_addr(dev, arcnet_inb(ioaddr, 8));
com20020_set_subaddress(lp, ioaddr, SUB_SETUP1);
arcnet_outb(lp->setup, ioaddr, COM20020_REG_W_XREG);
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index b88a109b3b15..24150c933fcb 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -133,7 +133,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
lp->hw.owner = THIS_MODULE;
/* fill in our module parameters as defaults */
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->resource[0]->end = 16;
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 3856b447d38e..37b47749fc8b 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -252,7 +252,7 @@ static int __init com90io_found(struct net_device *dev)
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = get_buffer_byte(dev, 1);
+ arcnet_set_addr(dev, get_buffer_byte(dev, 1));
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index d8dfb9ea0de8..f49dae194284 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -531,7 +531,8 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem,
}
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
+ arcnet_set_addr(dev, arcnet_readb(lp->mem_start,
+ COM9026_REG_R_STATION));
dev->base_addr = ioaddr;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 77dc79a7f574..0c52612cb8e9 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4414,7 +4414,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
}
/* success */
- memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
+ dev_addr_set(bond_dev, ss->__data);
return 0;
unwind:
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b9e9842fed94..c48b77167fab 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -811,8 +811,8 @@ int bond_create_sysfs(struct bond_net *bn)
*/
if (ret == -EEXIST) {
/* Is someone being kinky and naming a device bonding_master? */
- if (__dev_get_by_name(bn->net,
- class_attr_bonding_masters.attr.name))
+ if (netdev_name_in_use(bn->net,
+ class_attr_bonding_masters.attr.name))
pr_err("network device named %s already exists in sysfs\n",
class_attr_bonding_masters.attr.name);
ret = 0;
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 308d4f2fff00..eee47bad0592 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -32,8 +32,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+ void __iomem *src = priv->mram_base + offset;
- ioread32_rep(priv->mram_base + offset, val, val_count);
+ while (val_count--) {
+ *(unsigned int *)val = ioread32(src);
+ val += 4;
+ src += 4;
+ }
return 0;
}
@@ -51,8 +56,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
const void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+ void __iomem *dst = priv->mram_base + offset;
- iowrite32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ iowrite32(*(unsigned int *)val, dst);
+ val += 4;
+ dst += 4;
+ }
return 0;
}
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 00e4533c8bdd..8999ec9455ec 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
struct rcar_can_priv *priv = netdev_priv(ndev);
u16 ctlr;
- if (netif_running(ndev)) {
- netif_stop_queue(ndev);
- netif_device_detach(ndev);
- }
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+
ctlr = readw(&priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_CANM_HALT;
writew(ctlr, &priv->regs->ctlr);
@@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
u16 ctlr;
int err;
+ if (!netif_running(ndev))
+ return 0;
+
err = clk_enable(priv->clk);
if (err) {
netdev_err(ndev, "clk_enable() failed, error %d\n", err);
@@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- if (netif_running(ndev)) {
- netif_device_attach(ndev);
- netif_start_queue(ndev);
- }
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+
return 0;
}
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6db90dc4bc9d..84f34020aafb 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -752,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
struct net_device *prev_dev = chan->prev_dev;
dev_info(&pdev->dev, "removing device %s\n", dev->name);
+ /* do that only for first channel */
+ if (!prev_dev && chan->pciec_card)
+ peak_pciec_remove(chan->pciec_card);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
dev = prev_dev;
- if (!dev) {
- /* do that only for first channel */
- if (chan->pciec_card)
- peak_pciec_remove(chan->pciec_card);
+ if (!dev)
break;
- }
priv = netdev_priv(dev);
chan = priv->priv;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index b11eabad575b..09029a3bad1a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
new_state = CAN_STATE_ERROR_WARNING;
} else {
- /* no error bit (so, no error skb, back to active state) */
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ /* back to (or still in) ERROR_ACTIVE state */
+ new_state = CAN_STATE_ERROR_ACTIVE;
pdev->bec.txerr = 0;
pdev->bec.rxerr = 0;
- return 0;
}
/* state hasn't changed */
@@ -568,8 +567,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(netdev, &cf);
- if (skb)
- can_change_state(netdev, cf, tx_state, rx_state);
+ can_change_state(netdev, cf, tx_state, rx_state);
/* things must be done even in case of OOM */
if (new_state == CAN_STATE_BUS_OFF)
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index a5f1aa911fe2..7b1457a6e327 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -70,6 +70,7 @@ config NET_DSA_QCA8K
config NET_DSA_REALTEK_SMI
tristate "Realtek SMI Ethernet switch family support"
select NET_DSA_TAG_RTL4_A
+ select NET_DSA_TAG_RTL8_4
select FIXED_PHY
select IRQ_DOMAIN
select REALTEK_PHY
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index f3598c040994..8da1569a34e6 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o
+realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index a533a90e3904..a7aeb3c132c9 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -351,9 +351,25 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
static void b53_mdio_remove(struct mdio_device *mdiodev)
{
struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
- struct dsa_switch *ds = dev->ds;
- dsa_unregister_switch(ds);
+ if (!dev)
+ return;
+
+ b53_switch_remove(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void b53_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!dev)
+ return;
+
+ b53_switch_shutdown(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id b53_of_match[] = {
@@ -373,6 +389,7 @@ MODULE_DEVICE_TABLE(of, b53_of_match);
static struct mdio_driver b53_mdio_driver = {
.probe = b53_mdio_probe,
.remove = b53_mdio_remove,
+ .shutdown = b53_mdio_shutdown,
.mdiodrv.driver = {
.name = "bcm53xx",
.of_match_table = b53_of_match,
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 82680e083cc2..ae4c79d39bc0 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -316,9 +316,21 @@ static int b53_mmap_remove(struct platform_device *pdev)
if (dev)
b53_switch_remove(dev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void b53_mmap_shutdown(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_shutdown(dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct of_device_id b53_mmap_of_table[] = {
{ .compatible = "brcm,bcm3384-switch" },
{ .compatible = "brcm,bcm6328-switch" },
@@ -331,6 +343,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
.remove = b53_mmap_remove,
+ .shutdown = b53_mmap_shutdown,
.driver = {
.name = "b53-switch",
.of_match_table = b53_mmap_of_table,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 5be8fe000037..544101e74bca 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -227,6 +227,11 @@ static inline void b53_switch_remove(struct b53_device *dev)
dsa_unregister_switch(dev->ds);
}
+static inline void b53_switch_shutdown(struct b53_device *dev)
+{
+ dsa_switch_shutdown(dev->ds);
+}
+
#define b53_build_op(type_op_size, val_type) \
static inline int b53_##type_op_size(struct b53_device *dev, u8 page, \
u8 reg, val_type val) \
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index ecb9f7f6b335..01e37b75471e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -321,9 +321,21 @@ static int b53_spi_remove(struct spi_device *spi)
if (dev)
b53_switch_remove(dev);
+ spi_set_drvdata(spi, NULL);
+
return 0;
}
+static void b53_spi_shutdown(struct spi_device *spi)
+{
+ struct b53_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ b53_switch_shutdown(dev);
+
+ spi_set_drvdata(spi, NULL);
+}
+
static const struct of_device_id b53_spi_of_match[] = {
{ .compatible = "brcm,bcm5325" },
{ .compatible = "brcm,bcm5365" },
@@ -344,6 +356,7 @@ static struct spi_driver b53_spi_driver = {
},
.probe = b53_spi_probe,
.remove = b53_spi_remove,
+ .shutdown = b53_spi_shutdown,
};
module_spi_driver(b53_spi_driver);
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 3f4249de70c5..4591bb1c05d2 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -629,17 +629,34 @@ static int b53_srab_probe(struct platform_device *pdev)
static int b53_srab_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
- struct b53_srab_priv *priv = dev->priv;
- b53_srab_intr_set(priv, false);
+ if (!dev)
+ return 0;
+
+ b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void b53_srab_shutdown(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (!dev)
+ return;
+
+ b53_switch_shutdown(dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static struct platform_driver b53_srab_driver = {
.probe = b53_srab_probe,
.remove = b53_srab_remove,
+ .shutdown = b53_srab_shutdown,
.driver = {
.name = "b53-srab-switch",
.of_match_table = b53_srab_of_match,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index aa713936d77c..a86ddc4bb897 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -68,7 +68,7 @@ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port, count = 0;
- for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_cpu_port(ds, port))
continue;
if (priv->port_sts[port].enabled)
@@ -1514,6 +1514,9 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ if (!priv)
+ return 0;
+
priv->wol_ports_mask = 0;
/* Disable interrupts */
bcm_sf2_intr_disable(priv);
@@ -1525,6 +1528,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
@@ -1532,6 +1537,9 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ if (!priv)
+ return;
+
/* For a kernel about to be kexec'd we want to keep the GPHY on for a
* successful MDIO bus scan to occur. If we did turn off the GPHY
* before (e.g: port_disable), this will also power it back on.
@@ -1540,6 +1548,10 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
*/
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
+ dsa_switch_shutdown(priv->dev->ds);
+
+ platform_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index bfdf3324aac3..e638e3eea911 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -340,10 +340,29 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
- struct dsa_loop_priv *ps = ds->priv;
+ struct dsa_loop_priv *ps;
+
+ if (!ds)
+ return;
+
+ ps = ds->priv;
dsa_unregister_switch(ds);
dev_put(ps->netdev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static struct mdio_driver dsa_loop_drv = {
@@ -352,6 +371,7 @@ static struct mdio_driver dsa_loop_drv = {
},
.probe = dsa_loop_drv_probe,
.remove = dsa_loop_drv_remove,
+ .shutdown = dsa_loop_drv_shutdown,
};
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 542cfc4ccb08..354655f9ed00 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1916,6 +1916,9 @@ static int hellcreek_remove(struct platform_device *pdev)
{
struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+ if (!hellcreek)
+ return 0;
+
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
@@ -1924,6 +1927,18 @@ static int hellcreek_remove(struct platform_device *pdev)
return 0;
}
+static void hellcreek_shutdown(struct platform_device *pdev)
+{
+ struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+ if (!hellcreek)
+ return;
+
+ dsa_switch_shutdown(hellcreek->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct hellcreek_platform_data de1soc_r1_pdata = {
.name = "r4c30",
.num_ports = 4,
@@ -1946,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match);
static struct platform_driver hellcreek_driver = {
.probe = hellcreek_probe,
.remove = hellcreek_remove,
+ .shutdown = hellcreek_shutdown,
.driver = {
.name = "hellcreek",
.of_match_table = hellcreek_of_match,
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index d7ce281570b5..89f920289ae2 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1379,6 +1379,12 @@ int lan9303_remove(struct lan9303 *chip)
}
EXPORT_SYMBOL(lan9303_remove);
+void lan9303_shutdown(struct lan9303 *chip)
+{
+ dsa_switch_shutdown(chip->ds);
+}
+EXPORT_SYMBOL(lan9303_shutdown);
+
MODULE_AUTHOR("Juergen Borleis <[email protected]>");
MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h
index 11f590b64701..c7f73efa50f0 100644
--- a/drivers/net/dsa/lan9303.h
+++ b/drivers/net/dsa/lan9303.h
@@ -10,3 +10,4 @@ extern const struct lan9303_phy_ops lan9303_indirect_phy_ops;
int lan9303_probe(struct lan9303 *chip, struct device_node *np);
int lan9303_remove(struct lan9303 *chip);
+void lan9303_shutdown(struct lan9303 *chip);
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 9bffaef65a04..8ca4713310fa 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -67,13 +67,28 @@ static int lan9303_i2c_probe(struct i2c_client *client,
static int lan9303_i2c_remove(struct i2c_client *client)
{
- struct lan9303_i2c *sw_dev;
+ struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
- sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
- return -ENODEV;
+ return 0;
+
+ lan9303_remove(&sw_dev->chip);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static void lan9303_i2c_shutdown(struct i2c_client *client)
+{
+ struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_shutdown(&sw_dev->chip);
- return lan9303_remove(&sw_dev->chip);
+ i2c_set_clientdata(client, NULL);
}
/*-------------------------------------------------------------------------*/
@@ -97,6 +112,7 @@ static struct i2c_driver lan9303_i2c_driver = {
},
.probe = lan9303_i2c_probe,
.remove = lan9303_i2c_remove,
+ .shutdown = lan9303_i2c_shutdown,
.id_table = lan9303_i2c_id,
};
module_i2c_driver(lan9303_i2c_driver);
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index 9cbe80460b53..bbb7032409ba 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -138,6 +138,20 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
return;
lan9303_remove(&sw_dev->chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_shutdown(&sw_dev->chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
/*-------------------------------------------------------------------------*/
@@ -155,6 +169,7 @@ static struct mdio_driver lan9303_mdio_driver = {
},
.probe = lan9303_mdio_probe,
.remove = lan9303_mdio_remove,
+ .shutdown = lan9303_mdio_shutdown,
};
mdio_module_driver(lan9303_mdio_driver);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 267324889dd6..dbd4486a173f 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -230,7 +230,7 @@
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02
@@ -2184,6 +2184,9 @@ static int gswip_remove(struct platform_device *pdev)
struct gswip_priv *priv = platform_get_drvdata(pdev);
int i;
+ if (!priv)
+ return 0;
+
/* disable the switch */
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
@@ -2197,9 +2200,23 @@ static int gswip_remove(struct platform_device *pdev)
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void gswip_shutdown(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct gswip_hw_info gswip_xrx200 = {
.max_ports = 7,
.cpu_port = 6,
@@ -2223,6 +2240,7 @@ MODULE_DEVICE_TABLE(of, gswip_of_match);
static struct platform_driver gswip_driver = {
.probe = gswip_probe,
.remove = gswip_remove,
+ .shutdown = gswip_shutdown,
.driver = {
.name = "gswip",
.of_match_table = gswip_of_match,
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index ea7550d1b634..866767b70d65 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -94,6 +94,8 @@ static int ksz8795_spi_remove(struct spi_device *spi)
if (dev)
ksz_switch_remove(dev);
+ spi_set_drvdata(spi, NULL);
+
return 0;
}
@@ -101,8 +103,15 @@ static void ksz8795_spi_shutdown(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
- if (dev && dev->dev_ops->shutdown)
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->shutdown)
dev->dev_ops->shutdown(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ spi_set_drvdata(spi, NULL);
}
static const struct of_device_id ksz8795_dt_ids[] = {
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 11293485138c..5883fa7edda2 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -191,6 +191,18 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
if (dev)
ksz_switch_remove(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
+{
+ struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (dev)
+ dsa_switch_shutdown(dev->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id ksz8863_dt_ids[] = {
@@ -203,6 +215,7 @@ MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
static struct mdio_driver ksz8863_driver = {
.probe = ksz8863_smi_probe,
.remove = ksz8863_smi_remove,
+ .shutdown = ksz8863_smi_shutdown,
.mdiodrv.driver = {
.name = "ksz8863-switch",
.of_match_table = ksz8863_dt_ids,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 4e053a25d077..f3afb8b8c4cc 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -56,7 +56,10 @@ static int ksz9477_i2c_remove(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
- ksz_switch_remove(dev);
+ if (dev)
+ ksz_switch_remove(dev);
+
+ i2c_set_clientdata(i2c, NULL);
return 0;
}
@@ -65,8 +68,15 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
- if (dev && dev->dev_ops->shutdown)
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->shutdown)
dev->dev_ops->shutdown(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ i2c_set_clientdata(i2c, NULL);
}
static const struct i2c_device_id ksz9477_i2c_id[] = {
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 15bc11b3cda4..e3cb0e6c9f6f 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -72,6 +72,8 @@ static int ksz9477_spi_remove(struct spi_device *spi)
if (dev)
ksz_switch_remove(dev);
+ spi_set_drvdata(spi, NULL);
+
return 0;
}
@@ -79,8 +81,10 @@ static void ksz9477_spi_shutdown(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
- if (dev && dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
+ if (dev)
+ dsa_switch_shutdown(dev->ds);
+
+ spi_set_drvdata(spi, NULL);
}
static const struct of_device_id ksz9477_dt_ids[] = {
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 1542bfb8b5e5..7c2968a639eb 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -449,8 +449,10 @@ EXPORT_SYMBOL(ksz_switch_register);
void ksz_switch_remove(struct ksz_device *dev)
{
/* timer started */
- if (dev->mib_read_interval)
+ if (dev->mib_read_interval) {
+ dev->mib_read_interval = 0;
cancel_delayed_work_sync(&dev->mib_read);
+ }
dev->dev_ops->exit(dev);
dsa_unregister_switch(dev->ds);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index d0cba2d1cd68..9890672a206d 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1035,9 +1035,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return 0;
-
mutex_lock(&priv->reg_mutex);
/* Allow the user port gets connected to the cpu port and also
@@ -1060,9 +1057,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return;
-
mutex_lock(&priv->reg_mutex);
/* Clear up all port matrix which could be restored in the next
@@ -3211,7 +3205,7 @@ mt7530_probe(struct mdio_device *mdiodev)
return -ENOMEM;
priv->ds->dev = &mdiodev->dev;
- priv->ds->num_ports = DSA_MAX_PORTS;
+ priv->ds->num_ports = MT7530_NUM_PORTS;
/* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence.
@@ -3286,6 +3280,9 @@ mt7530_remove(struct mdio_device *mdiodev)
struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
int ret = 0;
+ if (!priv)
+ return;
+
ret = regulator_disable(priv->core_pwr);
if (ret < 0)
dev_err(priv->dev,
@@ -3301,11 +3298,26 @@ mt7530_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(priv->ds);
mutex_destroy(&priv->reg_mutex);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mt7530_shutdown(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static struct mdio_driver mt7530_mdio_driver = {
.probe = mt7530_probe,
.remove = mt7530_remove,
+ .shutdown = mt7530_shutdown,
.mdiodrv.driver = {
.name = "mt7530",
.of_match_table = mt7530_of_match,
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 24b8219fd607..a4c6eb9a52d0 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -290,7 +290,24 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+ if (!ds)
+ return;
+
dsa_unregister_switch(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6060_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id mv88e6060_of_match[] = {
@@ -303,6 +320,7 @@ static const struct of_device_id mv88e6060_of_match[] = {
static struct mdio_driver mv88e6060_driver = {
.probe = mv88e6060_probe,
.remove = mv88e6060_remove,
+ .shutdown = mv88e6060_shutdown,
.mdiodrv.driver = {
.name = "mv88e6060",
.of_match_table = mv88e6060_of_match,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c45ca2473743..8dadcae93c9b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -12,6 +12,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/dsa/mv88e6xxx.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
@@ -749,7 +750,11 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
+ /* Internal PHYs propagate their configuration directly to the MAC.
+ * External PHYs depend on whether the PPU is enabled for this port.
+ */
+ if (((!mv88e6xxx_phy_is_internal(ds, port) &&
+ !mv88e6xxx_port_ppu_updates(chip, port)) ||
mode == MLO_AN_FIXED) && ops->port_sync_link)
err = ops->port_sync_link(chip, port, mode, false);
mv88e6xxx_reg_unlock(chip);
@@ -772,7 +777,12 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) {
+ /* Internal PHYs propagate their configuration directly to the MAC.
+ * External PHYs depend on whether the PPU is enabled for this port.
+ */
+ if ((!mv88e6xxx_phy_is_internal(ds, port) &&
+ !mv88e6xxx_port_ppu_updates(chip, port)) ||
+ mode == MLO_AN_FIXED) {
/* FIXME: for an automedia port, should we force the link
* down here - what if the link comes up due to "other" media
* while we're bringing the port up, how is the exclusivity
@@ -1677,6 +1687,30 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
return 0;
}
+static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
+{
+ struct dsa_port *dp = dsa_to_port(chip->ds, port);
+ struct mv88e6xxx_port *p = &chip->ports[port];
+ u16 pvid = MV88E6XXX_VID_STANDALONE;
+ bool drop_untagged = false;
+ int err;
+
+ if (dp->bridge_dev) {
+ if (br_vlan_enabled(dp->bridge_dev)) {
+ pvid = p->bridge_pvid.vid;
+ drop_untagged = !p->bridge_pvid.valid;
+ } else {
+ pvid = MV88E6XXX_VID_BRIDGED;
+ }
+ }
+
+ err = mv88e6xxx_port_set_pvid(chip, port, pvid);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_drop_untagged(chip, port, drop_untagged);
+}
+
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
@@ -1690,7 +1724,16 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
+
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto unlock;
+
+unlock:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -1725,11 +1768,15 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
u16 fid;
int err;
- /* Null VLAN ID corresponds to the port private database */
+ /* Ports have two private address databases: one for when the port is
+ * standalone and one for when the port is under a bridge and the
+ * 802.1Q mode is disabled. When the port is standalone, DSA wants its
+ * address database to remain 100% empty, so we never load an ATU entry
+ * into a standalone port's database. Therefore, translate the null
+ * VLAN ID into the port's database used for VLAN-unaware bridging.
+ */
if (vid == 0) {
- err = mv88e6xxx_port_get_fid(chip, port, &fid);
- if (err)
- return err;
+ fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -2123,6 +2170,7 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct mv88e6xxx_port *p = &chip->ports[port];
bool warn;
u8 member;
int err;
@@ -2156,13 +2204,21 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
}
if (pvid) {
- err = mv88e6xxx_port_set_pvid(chip, port, vlan->vid);
- if (err) {
- dev_err(ds->dev, "p%d: failed to set PVID %d\n",
- port, vlan->vid);
+ p->bridge_pvid.vid = vlan->vid;
+ p->bridge_pvid.valid = true;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto out;
+ } else if (vlan->vid && p->bridge_pvid.vid == vlan->vid) {
+ /* The old pvid was reinstalled as a non-pvid VLAN */
+ p->bridge_pvid.valid = false;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
goto out;
- }
}
+
out:
mv88e6xxx_reg_unlock(chip);
@@ -2212,6 +2268,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port *p = &chip->ports[port];
int err = 0;
u16 pvid;
@@ -2229,7 +2286,9 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
goto unlock;
if (vlan->vid == pvid) {
- err = mv88e6xxx_port_set_pvid(chip, port, 0);
+ p->bridge_pvid.valid = false;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto unlock;
}
@@ -2393,7 +2452,16 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
int err;
mv88e6xxx_reg_lock(chip);
+
err = mv88e6xxx_bridge_map(chip, br);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto unlock;
+
+unlock:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -2403,11 +2471,20 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
+
if (mv88e6xxx_bridge_map(chip, br) ||
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ dev_err(ds->dev,
+ "port %d failed to restore standalone pvid: %pe\n",
+ port, ERR_PTR(err));
+
mv88e6xxx_reg_unlock(chip);
}
@@ -2834,8 +2911,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- /* Port Control 2: don't force a good FCS, set the maximum frame size to
- * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
+ /* Port Control 2: don't force a good FCS, set the MTU size to
+ * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
* untagged frames on this port, do a destination address lookup on all
* received packets as usual, disable ARP mirroring and don't send a
* copy of all transmitted/received frames on this port to the CPU.
@@ -2853,8 +2930,22 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
+ /* Associate MV88E6XXX_VID_BRIDGED with MV88E6XXX_FID_BRIDGED in the
+ * ATU by virtue of the fact that mv88e6xxx_atu_new() will pick it as
+ * the first free FID after MV88E6XXX_FID_STANDALONE. This will be used
+ * as the private PVID on ports under a VLAN-unaware bridge.
+ * Shared (DSA and CPU) ports must also be members of it, to translate
+ * the VID from the DSA tag into MV88E6XXX_FID_BRIDGED, instead of
+ * relying on their port default FID.
+ */
+ err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED,
+ false);
+ if (err)
+ return err;
+
if (chip->info->ops->port_set_jumbo_size) {
- err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);
+ err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
if (err)
return err;
}
@@ -2925,7 +3016,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- err = mv88e6xxx_port_set_fid(chip, port, 0);
+ err = mv88e6xxx_port_set_fid(chip, port, MV88E6XXX_FID_STANDALONE);
if (err)
return err;
@@ -2944,10 +3035,10 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
if (chip->info->ops->port_set_jumbo_size)
- return 10240;
+ return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
else if (chip->info->ops->set_max_frame_size)
- return 1632;
- return 1522;
+ return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
}
static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
@@ -2955,6 +3046,9 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
struct mv88e6xxx_chip *chip = ds->priv;
int ret = 0;
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ new_mtu += EDSA_HLEN;
+
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_jumbo_size)
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
@@ -3071,7 +3165,7 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
- mv88e6xxx_teardown_devlink_regions(ds);
+ mv88e6xxx_teardown_devlink_regions_global(ds);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3112,6 +3206,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
}
}
+ err = mv88e6xxx_vtu_setup(chip);
+ if (err)
+ goto unlock;
+
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
@@ -3141,10 +3239,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
- err = mv88e6xxx_vtu_setup(chip);
- if (err)
- goto unlock;
-
err = mv88e6xxx_pvt_setup(chip);
if (err)
goto unlock;
@@ -3215,7 +3309,7 @@ unlock:
if (err)
goto out_resources;
- err = mv88e6xxx_setup_devlink_regions(ds);
+ err = mv88e6xxx_setup_devlink_regions_global(ds);
if (err)
goto out_params;
@@ -3229,6 +3323,16 @@ out_resources:
return err;
}
+static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
+{
+ return mv88e6xxx_setup_devlink_regions_port(ds, port);
+}
+
+static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
+{
+ mv88e6xxx_teardown_devlink_regions_port(ds, port);
+}
+
/* prod_id for switch families which do not have a PHY model number */
static const u16 family_prod_id_table[] = {
[MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
@@ -3715,7 +3819,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
@@ -3740,6 +3843,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
.phylink_validate = mv88e6185_phylink_validate,
+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6165_ops = {
@@ -6116,6 +6220,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.change_tag_protocol = mv88e6xxx_change_tag_protocol,
.setup = mv88e6xxx_setup,
.teardown = mv88e6xxx_teardown,
+ .port_setup = mv88e6xxx_port_setup,
+ .port_teardown = mv88e6xxx_port_teardown,
.phylink_validate = mv88e6xxx_validate,
.phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
.phylink_mac_config = mv88e6xxx_mac_config,
@@ -6389,7 +6495,12 @@ out:
static void mv88e6xxx_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_chip *chip;
+
+ if (!ds)
+ return;
+
+ chip = ds->priv;
if (chip->info->ptp_support) {
mv88e6xxx_hwtstamp_free(chip);
@@ -6410,6 +6521,20 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id mv88e6xxx_of_match[] = {
@@ -6433,6 +6558,7 @@ MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
static struct mdio_driver mv88e6xxx_driver = {
.probe = mv88e6xxx_probe,
.remove = mv88e6xxx_remove,
+ .shutdown = mv88e6xxx_shutdown,
.mdiodrv.driver = {
.name = "mv88e6085",
.of_match_table = mv88e6xxx_of_match,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 675b1f3e43b7..8271b8aa7b71 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -18,8 +18,12 @@
#include <linux/timecounter.h>
#include <net/dsa.h>
+#define EDSA_HLEN 8
#define MV88E6XXX_N_FID 4096
+#define MV88E6XXX_FID_STANDALONE 0
+#define MV88E6XXX_FID_BRIDGED 1
+
/* PVT limits for 4-bit port and 5-bit switch */
#define MV88E6XXX_MAX_PVT_SWITCHES 32
#define MV88E6XXX_MAX_PVT_PORTS 16
@@ -245,9 +249,15 @@ struct mv88e6xxx_policy {
u16 vid;
};
+struct mv88e6xxx_vlan {
+ u16 vid;
+ bool valid;
+};
+
struct mv88e6xxx_port {
struct mv88e6xxx_chip *chip;
int port;
+ struct mv88e6xxx_vlan bridge_pvid;
u64 serdes_stats[2];
u64 atu_member_violation;
u64 atu_miss_violation;
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 0c0f5ea6680c..381068395c63 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -647,26 +647,25 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
},
};
-static void
-mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
dsa_devlink_region_destroy(chip->regions[i]);
}
-static void
-mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip,
- int port)
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
dsa_devlink_region_destroy(chip->ports[port].region);
}
-static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
- struct mv88e6xxx_chip *chip,
- int port)
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
struct devlink_region *region;
region = dsa_devlink_port_region_create(ds,
@@ -681,40 +680,10 @@ static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
return 0;
}
-static void
-mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip)
-{
- int port;
-
- for (port = 0; port < mv88e6xxx_num_ports(chip); port++)
- mv88e6xxx_teardown_devlink_regions_port(chip, port);
-}
-
-static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds,
- struct mv88e6xxx_chip *chip)
-{
- int port;
- int err;
-
- for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
- err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port);
- if (err)
- goto out;
- }
-
- return 0;
-
-out:
- while (port-- > 0)
- mv88e6xxx_teardown_devlink_regions_port(chip, port);
-
- return err;
-}
-
-static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
- struct mv88e6xxx_chip *chip)
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
{
bool (*cond)(struct mv88e6xxx_chip *chip);
+ struct mv88e6xxx_chip *chip = ds->priv;
struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
@@ -753,30 +722,6 @@ out:
return PTR_ERR(region);
}
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- err = mv88e6xxx_setup_devlink_regions_global(ds, chip);
- if (err)
- return err;
-
- err = mv88e6xxx_setup_devlink_regions_ports(ds, chip);
- if (err)
- mv88e6xxx_teardown_devlink_regions_global(chip);
-
- return err;
-}
-
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
-
- mv88e6xxx_teardown_devlink_regions_ports(chip);
- mv88e6xxx_teardown_devlink_regions_global(chip);
-}
-
int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.h b/drivers/net/dsa/mv88e6xxx/devlink.h
index 3d72db3dcf95..65ce6a6858b9 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.h
+++ b/drivers/net/dsa/mv88e6xxx/devlink.h
@@ -12,8 +12,10 @@ int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port);
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port);
int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 815b0f681d69..5848112036b0 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -232,6 +232,8 @@ int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
u16 val;
int err;
+ mtu += ETH_HLEN + ETH_FCS_LEN;
+
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index f77e2ee64a60..d9817b20ea64 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -1257,6 +1257,27 @@ int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+ bool drop_untagged)
+{
+ u16 old, new;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &old);
+ if (err)
+ return err;
+
+ if (drop_untagged)
+ new = old | MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+ else
+ new = old & ~MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+
+ if (new == old)
+ return 0;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
+}
+
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
{
u16 reg;
@@ -1277,6 +1298,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
u16 reg;
int err;
+ size += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index b10e5aebacf6..03382b66f800 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -423,6 +423,8 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+ bool drop_untagged);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3656e67af789..83808e7dbdda 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019-2021 NXP Semiconductors
+/* Copyright 2019-2021 NXP
*
* This is an umbrella module for all network switches that are
* register-compatible with Ocelot and that perform I/O to their host CPU
@@ -266,12 +266,12 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
*/
static int felix_setup_mmio_filtering(struct felix *felix)
{
- unsigned long user_ports = 0, cpu_ports = 0;
+ unsigned long user_ports = dsa_user_ports(felix->ds);
struct ocelot_vcap_filter *redirect_rule;
struct ocelot_vcap_filter *tagging_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
- int port, ret;
+ int cpu = -1, port, ret;
tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!tagging_rule)
@@ -284,12 +284,15 @@ static int felix_setup_mmio_filtering(struct felix *felix)
}
for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_user_port(ds, port))
- user_ports |= BIT(port);
- if (dsa_is_cpu_port(ds, port))
- cpu_ports |= BIT(port);
+ if (dsa_is_cpu_port(ds, port)) {
+ cpu = port;
+ break;
+ }
}
+ if (cpu < 0)
+ return -EINVAL;
+
tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
*(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
*(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
@@ -325,7 +328,7 @@ static int felix_setup_mmio_filtering(struct felix *felix)
* the CPU port module
*/
redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
- redirect_rule->action.port_mask = cpu_ports;
+ redirect_rule->action.port_mask = BIT(cpu);
} else {
/* Trap PTP packets only to the CPU port module (which is
* redirected to the NPI port)
@@ -955,8 +958,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
switch_node = dev->of_node;
ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
if (!ports_node) {
- dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");
return -ENODEV;
}
@@ -1074,6 +1079,101 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return 0;
}
+static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *skb)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ struct sk_buff *skb_match = NULL, *skb_tmp;
+ unsigned long flags;
+
+ if (!clone)
+ return;
+
+ spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags);
+
+ skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+ if (skb != clone)
+ continue;
+ __skb_unlink(skb, &ocelot_port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+
+ spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags);
+
+ WARN_ONCE(!skb_match,
+ "Could not find skb clone in TX timestamping list\n");
+}
+
+#define work_to_xmit_work(w) \
+ container_of((w), struct felix_deferred_xmit_work, work)
+
+static void felix_port_deferred_xmit(struct kthread_work *work)
+{
+ struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct sk_buff *skb = xmit_work->skb;
+ u32 rew_op = ocelot_ptp_rew_op(skb);
+ struct ocelot *ocelot = ds->priv;
+ int port = xmit_work->dp->index;
+ int retries = 10;
+
+ do {
+ if (ocelot_can_inject(ocelot, 0))
+ break;
+
+ cpu_relax();
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(ocelot->dev, "port %d failed to inject skb\n",
+ port);
+ ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
+ kfree_skb(skb);
+ return;
+ }
+
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+
+ consume_skb(skb);
+ kfree(xmit_work);
+}
+
+static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct felix_port *felix_port;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
+ if (!felix_port)
+ return -ENOMEM;
+
+ felix_port->xmit_worker = felix->xmit_worker;
+ felix_port->xmit_work_fn = felix_port_deferred_xmit;
+
+ dp->priv = felix_port;
+
+ return 0;
+}
+
+static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct felix_port *felix_port = dp->priv;
+
+ if (!felix_port)
+ return;
+
+ dp->priv = NULL;
+ kfree(felix_port);
+}
+
/* Hardware initialization done here so that we can allocate structures with
* devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
* us to allocate structures twice (leak memory) and map PCI memory twice
@@ -1102,6 +1202,12 @@ static int felix_setup(struct dsa_switch *ds)
}
}
+ felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
+ if (IS_ERR(felix->xmit_worker)) {
+ err = PTR_ERR(felix->xmit_worker);
+ goto out_deinit_timestamp;
+ }
+
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
@@ -1112,6 +1218,14 @@ static int felix_setup(struct dsa_switch *ds)
* bits of vlan tag.
*/
felix_port_qos_map_init(ocelot, port);
+
+ err = felix_port_setup_tagger_data(ds, port);
+ if (err) {
+ dev_err(ds->dev,
+ "port %d failed to set up tagger data: %pe\n",
+ port, ERR_PTR(err));
+ goto out_deinit_ports;
+ }
}
err = ocelot_devlink_sb_register(ocelot);
@@ -1126,6 +1240,7 @@ static int felix_setup(struct dsa_switch *ds)
* there's no real point in checking for errors.
*/
felix_set_tag_protocol(ds, port, felix->tag_proto);
+ break;
}
ds->mtu_enforcement_ingress = true;
@@ -1138,9 +1253,13 @@ out_deinit_ports:
if (dsa_is_unused_port(ds, port))
continue;
+ felix_port_teardown_tagger_data(ds, port);
ocelot_deinit_port(ocelot, port);
}
+ kthread_destroy_worker(felix->xmit_worker);
+
+out_deinit_timestamp:
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1162,19 +1281,23 @@ static void felix_teardown(struct dsa_switch *ds)
continue;
felix_del_tag_protocol(ds, port, felix->tag_proto);
+ break;
}
- ocelot_devlink_sb_unregister(ocelot);
- ocelot_deinit_timestamp(ocelot);
- ocelot_deinit(ocelot);
-
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
+ felix_port_teardown_tagger_data(ds, port);
ocelot_deinit_port(ocelot, port);
}
+ kthread_destroy_worker(felix->xmit_worker);
+
+ ocelot_devlink_sb_unregister(ocelot);
+ ocelot_deinit_timestamp(ocelot);
+ ocelot_deinit(ocelot);
+
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
}
@@ -1291,8 +1414,12 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
if (!ocelot->ptp)
return;
- if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone))
+ if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
+ dev_err_ratelimited(ds->dev,
+ "port %d delivering skb without TX timestamp\n",
+ port);
return;
+ }
if (clone)
OCELOT_SKB_CB(skb)->clone = clone;
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 5854bab43327..be3e42e135c0 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
*/
#ifndef _MSCC_FELIX_H
#define _MSCC_FELIX_H
@@ -62,6 +62,7 @@ struct felix {
resource_size_t switch_base;
resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
+ struct kthread_worker *xmit_worker;
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index f966a253d1c7..11b42fd812e4 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Copyright 2017 Microsemi Corporation
- * Copyright 2018-2019 NXP Semiconductors
+ * Copyright 2018-2019 NXP
*/
#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_qsys.h>
@@ -1472,9 +1472,10 @@ err_pci_enable:
static void felix_pci_remove(struct pci_dev *pdev)
{
- struct felix *felix;
+ struct felix *felix = pci_get_drvdata(pdev);
- felix = pci_get_drvdata(pdev);
+ if (!felix)
+ return;
dsa_unregister_switch(felix->ds);
@@ -1482,6 +1483,20 @@ static void felix_pci_remove(struct pci_dev *pdev)
kfree(felix);
pci_disable_device(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void felix_pci_shutdown(struct pci_dev *pdev)
+{
+ struct felix *felix = pci_get_drvdata(pdev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ pci_set_drvdata(pdev, NULL);
}
static struct pci_device_id felix_ids[] = {
@@ -1498,6 +1513,7 @@ static struct pci_driver felix_vsc9959_pci_driver = {
.id_table = felix_ids,
.probe = felix_pci_probe,
.remove = felix_pci_remove,
+ .shutdown = felix_pci_shutdown,
};
module_pci_driver(felix_vsc9959_pci_driver);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index deae923c8b7a..de1d34a1f1e4 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1245,18 +1245,33 @@ err_alloc_felix:
static int seville_remove(struct platform_device *pdev)
{
- struct felix *felix;
+ struct felix *felix = platform_get_drvdata(pdev);
- felix = platform_get_drvdata(pdev);
+ if (!felix)
+ return 0;
dsa_unregister_switch(felix->ds);
kfree(felix->ds);
kfree(felix);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void seville_shutdown(struct platform_device *pdev)
+{
+ struct felix *felix = platform_get_drvdata(pdev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct of_device_id seville_of_match[] = {
{ .compatible = "mscc,vsc9953-switch" },
{ },
@@ -1266,6 +1281,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match);
static struct platform_driver seville_vsc9953_driver = {
.probe = seville_probe,
.remove = seville_remove,
+ .shutdown = seville_shutdown,
.driver = {
.name = "mscc_seville",
.of_match_table = of_match_ptr(seville_of_match),
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 563d8a279030..a6bfb6abc51a 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -1083,6 +1083,9 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
unsigned int i;
+ if (!priv)
+ return;
+
for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
struct ar9331_sw_port *port = &priv->port[i];
@@ -1094,6 +1097,20 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(&priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id ar9331_sw_of_match[] = {
@@ -1104,6 +1121,7 @@ static const struct of_device_id ar9331_sw_of_match[] = {
static struct mdio_driver ar9331_sw_mdio_driver = {
.probe = ar9331_sw_probe,
.remove = ar9331_sw_remove,
+ .shutdown = ar9331_sw_shutdown,
.mdiodrv.driver = {
.name = AR9331_SW_NAME,
.of_match_table = ar9331_sw_of_match,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index bda5a9bf4f52..ea7f12778922 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -889,62 +889,183 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
}
static int
-qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv)
+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
{
- struct device_node *port_dn;
- phy_interface_t mode;
- struct dsa_port *dp;
- u32 val;
+ u32 mask = 0;
+ int ret = 0;
- /* CPU port is already checked */
- dp = dsa_to_port(priv->ds, 0);
+ /* SoC specific settings for ipq8064.
+ * If more device require this consider adding
+ * a dedicated binding.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064"))
+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
+
+ /* SoC specific settings for ipq8065 */
+ if (of_machine_is_compatible("qcom,ipq8065"))
+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
+
+ if (mask) {
+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
+ QCA8K_MAC_PWR_RGMII0_1_8V |
+ QCA8K_MAC_PWR_RGMII1_1_8V,
+ mask);
+ }
+
+ return ret;
+}
- port_dn = dp->dn;
+static int qca8k_find_cpu_port(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = ds->priv;
- /* Check if port 0 is set to the correct type */
- of_get_phy_mode(port_dn, &mode);
- if (mode != PHY_INTERFACE_MODE_RGMII_ID &&
- mode != PHY_INTERFACE_MODE_RGMII_RXID &&
- mode != PHY_INTERFACE_MODE_RGMII_TXID) {
+ /* Find the connected cpu port. Valid port are 0 or 6 */
+ if (dsa_is_cpu_port(ds, 0))
return 0;
+
+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
+
+ if (dsa_is_cpu_port(ds, 6))
+ return 6;
+
+ return -EINVAL;
+}
+
+static int
+qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
+{
+ struct device_node *node = priv->dev->of_node;
+ const struct qca8k_match_data *data;
+ u32 val = 0;
+ int ret;
+
+ /* QCA8327 require to set to the correct mode.
+ * His bigger brother QCA8328 have the 172 pin layout.
+ * Should be applied by default but we set this just to make sure.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ data = of_device_get_match_data(priv->dev);
+
+ /* Set the correct package of 148 pin for QCA8327 */
+ if (data->reduced_package)
+ val |= QCA8327_PWS_PACKAGE148_EN;
+
+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
+ val);
+ if (ret)
+ return ret;
}
- switch (mode) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val))
- val = 2;
- else
- /* Switch regs accept value in ns, convert ps to ns */
- val = val / 1000;
+ if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
+ val |= QCA8K_PWS_POWER_ON_SEL;
- if (val > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
- val = 3;
+ if (of_property_read_bool(node, "qca,led-open-drain")) {
+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
+ return -EINVAL;
}
- priv->rgmii_rx_delay = val;
- /* Stop here if we need to check only for rx delay */
- if (mode != PHY_INTERFACE_MODE_RGMII_ID)
- break;
+ val |= QCA8K_PWS_LED_OPEN_EN_CSR;
+ }
- fallthrough;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val))
- val = 1;
- else
- /* Switch regs accept value in ns, convert ps to ns */
- val = val / 1000;
+ return qca8k_rmw(priv, QCA8K_REG_PWS,
+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
+ val);
+}
- if (val > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
- val = 3;
- }
+static int
+qca8k_parse_port_config(struct qca8k_priv *priv)
+{
+ int port, cpu_port_index = -1, ret;
+ struct device_node *port_dn;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ u32 delay;
- priv->rgmii_tx_delay = val;
- break;
- default:
- return 0;
+ /* We have 2 CPU port. Check them */
+ for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
+ /* Skip every other port */
+ if (port != 0 && port != 6)
+ continue;
+
+ dp = dsa_to_port(priv->ds, port);
+ port_dn = dp->dn;
+ cpu_port_index++;
+
+ if (!of_device_is_available(port_dn))
+ continue;
+
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
+ continue;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_SGMII:
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ delay = 1;
+
+ if (delay > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
+
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ delay = 2;
+
+ if (delay > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
+
+ /* Skip sgmii parsing for rgmii* mode */
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ break;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
+ priv->ports_config.sgmii_tx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
+ priv->ports_config.sgmii_rx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
+ priv->ports_config.sgmii_enable_pll = true;
+
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
+ priv->ports_config.sgmii_enable_pll = false;
+ }
+
+ if (priv->switch_revision < 2)
+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
+ }
+
+ break;
+ default:
+ continue;
+ }
}
return 0;
@@ -954,15 +1075,20 @@ static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret, i;
+ int cpu_port, ret, i;
u32 mask;
- /* Make sure that port 0 is the cpu port */
- if (!dsa_is_cpu_port(ds, 0)) {
- dev_err(priv->dev, "port 0 is not the CPU port");
- return -EINVAL;
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
}
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
+
mutex_init(&priv->reg_mutex);
/* Start by setting up the register mapping */
@@ -975,7 +1101,11 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
- ret = qca8k_setup_of_rgmii_delay(priv);
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
if (ret)
return ret;
@@ -992,41 +1122,49 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
dev_warn(priv->dev, "mib init failed");
- /* Enable QCA header mode on the cpu port */
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
-
- /* Disable forwarding by default on all ports */
+ /* Initial setup of all ports */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER, 0);
if (ret)
return ret;
- }
- /* Disable MAC by default on all ports */
- for (i = 1; i < QCA8K_NUM_PORTS; i++)
- qca8k_port_set_status(priv, i, 0);
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+ }
- /* Forward all unknown frames to CPU port for Linux processing */
+ /* Disable MAC by default on all user ports */
+ if (dsa_is_user_port(ds, i))
+ qca8k_port_set_status(priv, i, 0);
+ }
+
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
if (ret)
return ret;
- /* Setup connection between CPU port & user ports */
+ /* Setup connection between CPU port & user ports
+ * Configure specific switch configuration for ports
+ */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
/* CPU port gets connected to all user ports of the switch */
if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
if (ret)
return ret;
@@ -1038,7 +1176,7 @@ qca8k_setup(struct dsa_switch *ds)
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER,
- BIT(QCA8K_CPU_PORT));
+ BIT(cpu_port));
if (ret)
return ret;
@@ -1063,16 +1201,14 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
}
- }
- /* The port 5 of the qca8337 have some problem in flood condition. The
- * original legacy driver had some specific buffer and priority settings
- * for the different port suggested by the QCA switch team. Add this
- * missing settings to improve switch stability under load condition.
- * This problem is limited to qca8337 and other qca8k switch are not affected.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337) {
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
switch (i) {
/* The 2 CPU port and port 5 requires some different
* priority than any other ports.
@@ -1108,6 +1244,12 @@ qca8k_setup(struct dsa_switch *ds)
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
+
+ /* Set initial MTU for every port.
+ * We have only have a general MTU setting. So track
+ * every port and set the max across all port.
+ */
+ priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -1121,8 +1263,6 @@ qca8k_setup(struct dsa_switch *ds)
}
/* Setup our port MTUs to match power on defaults */
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
if (ret)
dev_warn(priv->dev, "failed setting MTU settings");
@@ -1137,12 +1277,53 @@ qca8k_setup(struct dsa_switch *ds)
}
static void
+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
+ u32 reg)
+{
+ u32 delay, val = 0;
+ int ret;
+
+ /* Delay can be declared in 3 different way.
+ * Mode to rgmii and internal-delay standard binding defined
+ * rgmii-id or rgmii-tx/rx phy mode set.
+ * The parse logic set a delay different than 0 only when one
+ * of the 3 different way is used. In all other case delay is
+ * not enabled. With ID or TX/RXID delay is enabled and set
+ * to the default and recommended value.
+ */
+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
+ }
+
+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
+ }
+
+ /* Set RGMII delay based on the selected values */
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
+ val);
+ if (ret)
+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
+}
+
+static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
+ int cpu_port_index, ret;
u32 reg, val;
- int ret;
switch (port) {
case 0: /* 1st CPU port */
@@ -1154,6 +1335,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return;
reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
break;
case 1:
case 2:
@@ -1172,6 +1354,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return;
reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
break;
default:
dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
@@ -1186,23 +1369,18 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
- /* RGMII mode means no delay so don't enable the delay */
- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
- break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
- /* RGMII_ID needs internal delay. This is enabled through
- * PORT5_PAD_CTRL for all ports, rather than individual port
- * registers
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
+
+ /* Configure rgmii delay */
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
+ /* QCA8337 requires to set rgmii rx delay for all ports.
+ * This is enabled through PORT5_PAD_CTRL for all ports,
+ * rather than individual port registers.
*/
- qca8k_write(priv, reg,
- QCA8K_PORT_PAD_RGMII_EN |
- QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) |
- QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) |
- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
- /* QCA8337 requires to set rgmii rx delay */
if (priv->switch_id == QCA8K_ID_QCA8337)
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
@@ -1227,8 +1405,11 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
if (ret)
return;
- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
- QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD;
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
if (dsa_is_cpu_port(ds, port)) {
/* CPU port, we're talking to the CPU MAC, be a PHY */
@@ -1243,6 +1424,35 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
}
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
@@ -1522,10 +1732,15 @@ static int
qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask = BIT(QCA8K_CPU_PORT);
+ int port_mask, cpu_port;
int i, ret;
- for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Add this port to the portvlan mask of the other ports
@@ -1551,9 +1766,13 @@ static void
qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int i;
+ int cpu_port, i;
- for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Remove this port to the portvlan mask of the other ports
@@ -1568,7 +1787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
* this port
*/
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT));
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
}
static int
@@ -1880,10 +2099,27 @@ qca8k_sw_remove(struct mdio_device *mdiodev)
struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
int i;
+ if (!priv)
+ return;
+
for (i = 0; i < QCA8K_NUM_PORTS; i++)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
@@ -1922,7 +2158,12 @@ static int qca8k_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
-static const struct qca8k_match_data qca832x = {
+static const struct qca8k_match_data qca8327 = {
+ .id = QCA8K_ID_QCA8327,
+ .reduced_package = true,
+};
+
+static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
};
@@ -1931,7 +2172,8 @@ static const struct qca8k_match_data qca833x = {
};
static const struct of_device_id qca8k_of_match[] = {
- { .compatible = "qca,qca8327", .data = &qca832x },
+ { .compatible = "qca,qca8327", .data = &qca8327 },
+ { .compatible = "qca,qca8328", .data = &qca8328 },
{ .compatible = "qca,qca8334", .data = &qca833x },
{ .compatible = "qca,qca8337", .data = &qca833x },
{ /* sentinel */ },
@@ -1940,6 +2182,7 @@ static const struct of_device_id qca8k_of_match[] = {
static struct mdio_driver qca8kmdio_driver = {
.probe = qca8k_sw_probe,
.remove = qca8k_sw_remove,
+ .shutdown = qca8k_sw_shutdown,
.mdiodrv.driver = {
.name = "qca8k",
.of_match_table = qca8k_of_match,
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index ed3b05ad6745..e10571a398c9 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -13,6 +13,7 @@
#include <linux/gpio.h>
#define QCA8K_NUM_PORTS 7
+#define QCA8K_NUM_CPU_PORTS 2
#define QCA8K_MAX_MTU 9000
#define PHY_ID_QCA8327 0x004dd034
@@ -24,8 +25,6 @@
#define QCA8K_NUM_FDB_RECORDS 2048
-#define QCA8K_CPU_PORT 0
-
#define QCA8K_PORT_VID_DEF 1
/* Global control registers */
@@ -35,16 +34,26 @@
#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
+#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
#define QCA8K_REG_PORT5_PAD_CTRL 0x008
#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
#define QCA8K_MAX_DELAY 3
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
#define QCA8K_REG_PWS 0x010
+#define QCA8K_PWS_POWER_ON_SEL BIT(31)
+/* This reg is only valid for QCA832x and toggle the package
+ * type from 176 pin (by default) to 148 pin used on QCA8327
+ */
+#define QCA8327_PWS_PACKAGE148_EN BIT(30)
+#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
#define QCA8K_REG_MODULE_EN 0x030
#define QCA8K_MODULE_EN_MIB BIT(0)
@@ -100,6 +109,11 @@
#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
+/* MAC_PWR_SEL registers */
+#define QCA8K_REG_MAC_PWR_SEL 0x0e4
+#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
+#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
+
/* EEE control registers */
#define QCA8K_REG_EEE_CTRL 0x100
#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
@@ -248,14 +262,27 @@ struct ar8xxx_port_status {
struct qca8k_match_data {
u8 id;
+ bool reduced_package;
+};
+
+enum {
+ QCA8K_CPU_PORT0,
+ QCA8K_CPU_PORT6,
+};
+
+struct qca8k_ports_config {
+ bool sgmii_rx_clk_falling_edge;
+ bool sgmii_tx_clk_falling_edge;
+ bool sgmii_enable_pll;
+ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
};
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
- u8 rgmii_tx_delay;
- u8 rgmii_rx_delay;
bool legacy_phy_port_mapping;
+ struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index 8e49d4f85d48..c66ebd0ee217 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -368,7 +368,7 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
smi->slave_mii_bus->parent = smi->dev;
smi->ds->slave_mii_bus = smi->slave_mii_bus;
- ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
+ ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
if (ret) {
dev_err(smi->dev, "unable to register MDIO bus %s\n",
smi->slave_mii_bus->id);
@@ -464,16 +464,33 @@ static int realtek_smi_probe(struct platform_device *pdev)
static int realtek_smi_remove(struct platform_device *pdev)
{
- struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
+ struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+ if (!smi)
+ return 0;
dsa_unregister_switch(smi->ds);
if (smi->slave_mii_bus)
of_node_put(smi->slave_mii_bus->dev.of_node);
gpiod_set_value(smi->reset, 1);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+ struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+ if (!smi)
+ return;
+
+ dsa_switch_shutdown(smi->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct of_device_id realtek_smi_of_match[] = {
{
.compatible = "realtek,rtl8366rb",
@@ -484,6 +501,10 @@ static const struct of_device_id realtek_smi_of_match[] = {
.compatible = "realtek,rtl8366s",
.data = NULL,
},
+ {
+ .compatible = "realtek,rtl8365mb",
+ .data = &rtl8365mb_variant,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
@@ -495,6 +516,7 @@ static struct platform_driver realtek_smi_driver = {
},
.probe = realtek_smi_probe,
.remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
};
module_platform_driver(realtek_smi_driver);
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index fcf465f7f922..5bfa53e2480a 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -129,9 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
int rtl8366_reset_vlan(struct realtek_smi *smi);
-int rtl8366_init_vlan(struct realtek_smi *smi);
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
@@ -143,5 +140,6 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
extern const struct realtek_smi_variant rtl8366rb_variant;
+extern const struct realtek_smi_variant rtl8365mb_variant;
#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c
new file mode 100644
index 000000000000..baaae97283c5
--- /dev/null
+++ b/drivers/net/dsa/rtl8365mb.c
@@ -0,0 +1,1982 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch.
+ *
+ * Copyright (C) 2021 Alvin Šipraga <[email protected]>
+ * Copyright (C) 2021 Michael Rasmussen <[email protected]>
+ *
+ * The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4
+ * integrated PHYs for the user facing ports, and an extension interface which
+ * can be connected to the CPU - or another PHY - via either MII, RMII, or
+ * RGMII. The switch is configured via the Realtek Simple Management Interface
+ * (SMI), which uses the MDIO/MDC lines.
+ *
+ * Below is a simplified block diagram of the chip and its relevant interfaces.
+ *
+ * .-----------------------------------.
+ * | |
+ * UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC |
+ * | |
+ * CPU/PHY <-MII/RMII/RGMII---> Extension <---> Extension |
+ * | interface 1 GMAC 1 |
+ * | |
+ * SMI driver/ <-MDC/SCL---> Management ~~~~~~~~~~~~~~ |
+ * EEPROM <-MDIO/SDA--> interface ~REALTEK ~~~~~ |
+ * | ~RTL8365MB ~~~ |
+ * | ~GXXXC TAIWAN~ |
+ * GPIO <--------------> Reset ~~~~~~~~~~~~~~ |
+ * | |
+ * Interrupt <----------> Link UP/DOWN events |
+ * controller | |
+ * '-----------------------------------'
+ *
+ * The driver uses DSA to integrate the 4 user and 1 extension ports into the
+ * kernel. Netdevices are created for the user ports, as are PHY devices for
+ * their integrated PHYs. The device tree firmware should also specify the link
+ * partner of the extension port - either via a fixed-link or other phy-handle.
+ * See the device tree bindings for more detailed information. Note that the
+ * driver has only been tested with a fixed-link, but in principle it should not
+ * matter.
+ *
+ * NOTE: Currently, only the RGMII interface is implemented in this driver.
+ *
+ * The interrupt line is asserted on link UP/DOWN events. The driver creates a
+ * custom irqchip to handle this interrupt and demultiplex the events by reading
+ * the status registers via SMI. Interrupts are then propagated to the relevant
+ * PHY device.
+ *
+ * The EEPROM contains initial register values which the chip will read over I2C
+ * upon hardware reset. It is also possible to omit the EEPROM. In both cases,
+ * the driver will manually reprogram some registers using jam tables to reach
+ * an initial state defined by the vendor driver.
+ *
+ * This Linux driver is written based on an OS-agnostic vendor driver from
+ * Realtek. The reference GPL-licensed sources can be found in the OpenWrt
+ * source tree under the name rtl8367c. The vendor driver claims to support a
+ * number of similar switch controllers from Realtek, but the only hardware we
+ * have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under
+ * the name RTL8367C. Although one wishes that the 'C' stood for some kind of
+ * common hardware revision, there exist examples of chips with the suffix -VC
+ * which are explicitly not supported by the rtl8367c driver and which instead
+ * require the rtl8367d vendor driver. With all this uncertainty, the driver has
+ * been modestly named rtl8365mb. Future implementors may wish to rename things
+ * accordingly.
+ *
+ * In the same family of chips, some carry up to 8 user ports and up to 2
+ * extension ports. Where possible this driver tries to make things generic, but
+ * more work must be done to support these configurations. According to
+ * documentation from Realtek, the family should include the following chips:
+ *
+ * - RTL8363NB
+ * - RTL8363NB-VB
+ * - RTL8363SC
+ * - RTL8363SC-VB
+ * - RTL8364NB
+ * - RTL8364NB-VB
+ * - RTL8365MB-VC
+ * - RTL8366SC
+ * - RTL8367RB-VB
+ * - RTL8367SB
+ * - RTL8367S
+ * - RTL8370MB
+ * - RTL8310SR
+ *
+ * Some of the register logic for these additional chips has been skipped over
+ * while implementing this driver. It is therefore not possible to assume that
+ * things will work out-of-the-box for other chips, and a careful review of the
+ * vendor driver may be needed to expand support. The RTL8365MB-VC seems to be
+ * one of the simpler chips.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/mutex.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/if_bridge.h>
+
+#include "realtek-smi-core.h"
+
+/* Chip-specific data and limits */
+#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
+#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
+#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
+
+/* Family-specific data and limits */
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
+
+/* Chip identification registers */
+#define RTL8365MB_CHIP_ID_REG 0x1300
+
+#define RTL8365MB_CHIP_VER_REG 0x1301
+
+#define RTL8365MB_MAGIC_REG 0x13C2
+#define RTL8365MB_MAGIC_VALUE 0x0249
+
+/* Chip reset register */
+#define RTL8365MB_CHIP_RESET_REG 0x1322
+#define RTL8365MB_CHIP_RESET_SW_MASK 0x0002
+#define RTL8365MB_CHIP_RESET_HW_MASK 0x0001
+
+/* Interrupt polarity register */
+#define RTL8365MB_INTR_POLARITY_REG 0x1100
+#define RTL8365MB_INTR_POLARITY_MASK 0x0001
+#define RTL8365MB_INTR_POLARITY_HIGH 0
+#define RTL8365MB_INTR_POLARITY_LOW 1
+
+/* Interrupt control/status register - enable/check specific interrupt types */
+#define RTL8365MB_INTR_CTRL_REG 0x1101
+#define RTL8365MB_INTR_STATUS_REG 0x1102
+#define RTL8365MB_INTR_SLIENT_START_2_MASK 0x1000
+#define RTL8365MB_INTR_SLIENT_START_MASK 0x0800
+#define RTL8365MB_INTR_ACL_ACTION_MASK 0x0200
+#define RTL8365MB_INTR_CABLE_DIAG_FIN_MASK 0x0100
+#define RTL8365MB_INTR_INTERRUPT_8051_MASK 0x0080
+#define RTL8365MB_INTR_LOOP_DETECTION_MASK 0x0040
+#define RTL8365MB_INTR_GREEN_TIMER_MASK 0x0020
+#define RTL8365MB_INTR_SPECIAL_CONGEST_MASK 0x0010
+#define RTL8365MB_INTR_SPEED_CHANGE_MASK 0x0008
+#define RTL8365MB_INTR_LEARN_OVER_MASK 0x0004
+#define RTL8365MB_INTR_METER_EXCEEDED_MASK 0x0002
+#define RTL8365MB_INTR_LINK_CHANGE_MASK 0x0001
+#define RTL8365MB_INTR_ALL_MASK \
+ (RTL8365MB_INTR_SLIENT_START_2_MASK | \
+ RTL8365MB_INTR_SLIENT_START_MASK | \
+ RTL8365MB_INTR_ACL_ACTION_MASK | \
+ RTL8365MB_INTR_CABLE_DIAG_FIN_MASK | \
+ RTL8365MB_INTR_INTERRUPT_8051_MASK | \
+ RTL8365MB_INTR_LOOP_DETECTION_MASK | \
+ RTL8365MB_INTR_GREEN_TIMER_MASK | \
+ RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \
+ RTL8365MB_INTR_SPEED_CHANGE_MASK | \
+ RTL8365MB_INTR_LEARN_OVER_MASK | \
+ RTL8365MB_INTR_METER_EXCEEDED_MASK | \
+ RTL8365MB_INTR_LINK_CHANGE_MASK)
+
+/* Per-port interrupt type status registers */
+#define RTL8365MB_PORT_LINKDOWN_IND_REG 0x1106
+#define RTL8365MB_PORT_LINKDOWN_IND_MASK 0x07FF
+
+#define RTL8365MB_PORT_LINKUP_IND_REG 0x1107
+#define RTL8365MB_PORT_LINKUP_IND_MASK 0x07FF
+
+/* PHY indirect access registers */
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG 0x1F00
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK 0x0002
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ 0
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE 1
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK 0x0001
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE 1
+#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
+#define RTL8365MB_PHY_BASE 0x2000
+#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
+#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG 0x1F04
+
+/* PHY OCP address prefix register */
+#define RTL8365MB_GPHY_OCP_MSB_0_REG 0x1D15
+#define RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK 0x0FC0
+#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK 0xFC00
+
+/* The PHY OCP addresses of PHY registers 0~31 start here */
+#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
+
+/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
+#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
+#define RTL8365MB_EXT_PORT_MODE_RGMII 1
+#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
+#define RTL8365MB_EXT_PORT_MODE_MII_PHY 3
+#define RTL8365MB_EXT_PORT_MODE_TMII_MAC 4
+#define RTL8365MB_EXT_PORT_MODE_TMII_PHY 5
+#define RTL8365MB_EXT_PORT_MODE_GMII 6
+#define RTL8365MB_EXT_PORT_MODE_RMII_MAC 7
+#define RTL8365MB_EXT_PORT_MODE_RMII_PHY 8
+#define RTL8365MB_EXT_PORT_MODE_SGMII 9
+#define RTL8365MB_EXT_PORT_MODE_HSGMII 10
+#define RTL8365MB_EXT_PORT_MODE_1000X_100FX 11
+#define RTL8365MB_EXT_PORT_MODE_1000X 12
+#define RTL8365MB_EXT_PORT_MODE_100FX 13
+
+/* EXT port interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
+ (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
+ ((_extport) >> 1) * (0x13C3 - 0x1305))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
+ (0xF << (((_extport) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
+ (((_extport) % 2) * 4)
+
+/* EXT port RGMII TX/RX delay configuration registers 1~2 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
+#define RTL8365MB_EXT_RGMXF_REG(_extport) \
+ (RTL8365MB_EXT_RGMXF_REG1 + \
+ (((_extport) >> 1) * (0x13C5 - 0x1307)))
+#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
+#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
+
+/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
+#define RTL8365MB_PORT_SPEED_10M 0
+#define RTL8365MB_PORT_SPEED_100M 1
+#define RTL8365MB_PORT_SPEED_1000M 2
+
+/* EXT port force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
+ (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
+ ((_extport) & 0x1) + \
+ ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK 0x0020
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK 0x0010
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK 0x0004
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK 0x0003
+
+/* CPU port mask register - controls which ports are treated as CPU ports */
+#define RTL8365MB_CPU_PORT_MASK_REG 0x1219
+#define RTL8365MB_CPU_PORT_MASK_MASK 0x07FF
+
+/* CPU control register */
+#define RTL8365MB_CPU_CTRL_REG 0x121A
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK 0x0400
+#define RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK 0x0200
+#define RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK 0x0080
+#define RTL8365MB_CPU_CTRL_TAG_POSITION_MASK 0x0040
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_MASK 0x0038
+#define RTL8365MB_CPU_CTRL_INSERTMODE_MASK 0x0006
+#define RTL8365MB_CPU_CTRL_EN_MASK 0x0001
+
+/* Maximum packet length register */
+#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
+#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
+
+/* Port learning limit registers */
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \
+ (RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport))
+
+/* Port isolation (forwarding mask) registers */
+#define RTL8365MB_PORT_ISOLATION_REG_BASE 0x08A2
+#define RTL8365MB_PORT_ISOLATION_REG(_physport) \
+ (RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
+#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
+
+/* MSTP port state registers - indexed by tree instancrSTI (tree ine */
+#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
+#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
+ (RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1)
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \
+ (0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport)))
+
+/* MIB counter value registers */
+#define RTL8365MB_MIB_COUNTER_BASE 0x1000
+#define RTL8365MB_MIB_COUNTER_REG(_x) (RTL8365MB_MIB_COUNTER_BASE + (_x))
+
+/* MIB counter address register */
+#define RTL8365MB_MIB_ADDRESS_REG 0x1004
+#define RTL8365MB_MIB_ADDRESS_PORT_OFFSET 0x007C
+#define RTL8365MB_MIB_ADDRESS(_p, _x) \
+ (((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2)
+
+#define RTL8365MB_MIB_CTRL0_REG 0x1005
+#define RTL8365MB_MIB_CTRL0_RESET_MASK 0x0002
+#define RTL8365MB_MIB_CTRL0_BUSY_MASK 0x0001
+
+/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed
+ * to block. On the other hand, accessing MIB counters absolutely requires us to
+ * block. The solution is thus to schedule work which polls the MIB counters
+ * asynchronously and updates some private data, which the callback can then
+ * fetch atomically. Three seconds should be a good enough polling interval.
+ */
+#define RTL8365MB_STATS_INTERVAL_JIFFIES (3 * HZ)
+
+enum rtl8365mb_mib_counter_index {
+ RTL8365MB_MIB_ifInOctets,
+ RTL8365MB_MIB_dot3StatsFCSErrors,
+ RTL8365MB_MIB_dot3StatsSymbolErrors,
+ RTL8365MB_MIB_dot3InPauseFrames,
+ RTL8365MB_MIB_dot3ControlInUnknownOpcodes,
+ RTL8365MB_MIB_etherStatsFragments,
+ RTL8365MB_MIB_etherStatsJabbers,
+ RTL8365MB_MIB_ifInUcastPkts,
+ RTL8365MB_MIB_etherStatsDropEvents,
+ RTL8365MB_MIB_ifInMulticastPkts,
+ RTL8365MB_MIB_ifInBroadcastPkts,
+ RTL8365MB_MIB_inMldChecksumError,
+ RTL8365MB_MIB_inIgmpChecksumError,
+ RTL8365MB_MIB_inMldSpecificQuery,
+ RTL8365MB_MIB_inMldGeneralQuery,
+ RTL8365MB_MIB_inIgmpSpecificQuery,
+ RTL8365MB_MIB_inIgmpGeneralQuery,
+ RTL8365MB_MIB_inMldLeaves,
+ RTL8365MB_MIB_inIgmpLeaves,
+ RTL8365MB_MIB_etherStatsOctets,
+ RTL8365MB_MIB_etherStatsUnderSizePkts,
+ RTL8365MB_MIB_etherOversizeStats,
+ RTL8365MB_MIB_etherStatsPkts64Octets,
+ RTL8365MB_MIB_etherStatsPkts65to127Octets,
+ RTL8365MB_MIB_etherStatsPkts128to255Octets,
+ RTL8365MB_MIB_etherStatsPkts256to511Octets,
+ RTL8365MB_MIB_etherStatsPkts512to1023Octets,
+ RTL8365MB_MIB_etherStatsPkts1024to1518Octets,
+ RTL8365MB_MIB_ifOutOctets,
+ RTL8365MB_MIB_dot3StatsSingleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsMultipleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsDeferredTransmissions,
+ RTL8365MB_MIB_dot3StatsLateCollisions,
+ RTL8365MB_MIB_etherStatsCollisions,
+ RTL8365MB_MIB_dot3StatsExcessiveCollisions,
+ RTL8365MB_MIB_dot3OutPauseFrames,
+ RTL8365MB_MIB_ifOutDiscards,
+ RTL8365MB_MIB_dot1dTpPortInDiscards,
+ RTL8365MB_MIB_ifOutUcastPkts,
+ RTL8365MB_MIB_ifOutMulticastPkts,
+ RTL8365MB_MIB_ifOutBroadcastPkts,
+ RTL8365MB_MIB_outOampduPkts,
+ RTL8365MB_MIB_inOampduPkts,
+ RTL8365MB_MIB_inIgmpJoinsSuccess,
+ RTL8365MB_MIB_inIgmpJoinsFail,
+ RTL8365MB_MIB_inMldJoinsSuccess,
+ RTL8365MB_MIB_inMldJoinsFail,
+ RTL8365MB_MIB_inReportSuppressionDrop,
+ RTL8365MB_MIB_inLeaveSuppressionDrop,
+ RTL8365MB_MIB_outIgmpReports,
+ RTL8365MB_MIB_outIgmpLeaves,
+ RTL8365MB_MIB_outIgmpGeneralQuery,
+ RTL8365MB_MIB_outIgmpSpecificQuery,
+ RTL8365MB_MIB_outMldReports,
+ RTL8365MB_MIB_outMldLeaves,
+ RTL8365MB_MIB_outMldGeneralQuery,
+ RTL8365MB_MIB_outMldSpecificQuery,
+ RTL8365MB_MIB_inKnownMulticastPkts,
+ RTL8365MB_MIB_END,
+};
+
+struct rtl8365mb_mib_counter {
+ u32 offset;
+ u32 length;
+ const char *name;
+};
+
+#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \
+ [RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name }
+
+static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = {
+ RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes),
+ RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments),
+ RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers),
+ RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents),
+ RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts),
+ RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats),
+ RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions),
+ RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports),
+ RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports),
+ RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts),
+};
+
+static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END);
+
+struct rtl8365mb_jam_tbl_entry {
+ u16 reg;
+ u16 val;
+};
+
+/* Lifted from the vendor driver sources */
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = {
+ { 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 },
+ { 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA },
+ { 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 },
+ { 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F },
+ { 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 },
+ { 0x13F0, 0x0000 },
+};
+
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
+ { 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 },
+ { 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E },
+ { 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 },
+ { 0x1D32, 0x0002 },
+};
+
+enum rtl8365mb_stp_state {
+ RTL8365MB_STP_STATE_DISABLED = 0,
+ RTL8365MB_STP_STATE_BLOCKING = 1,
+ RTL8365MB_STP_STATE_LEARNING = 2,
+ RTL8365MB_STP_STATE_FORWARDING = 3,
+};
+
+enum rtl8365mb_cpu_insert {
+ RTL8365MB_CPU_INSERT_TO_ALL = 0,
+ RTL8365MB_CPU_INSERT_TO_TRAPPING = 1,
+ RTL8365MB_CPU_INSERT_TO_NONE = 2,
+};
+
+enum rtl8365mb_cpu_position {
+ RTL8365MB_CPU_POS_AFTER_SA = 0,
+ RTL8365MB_CPU_POS_BEFORE_CRC = 1,
+};
+
+enum rtl8365mb_cpu_format {
+ RTL8365MB_CPU_FORMAT_8BYTES = 0,
+ RTL8365MB_CPU_FORMAT_4BYTES = 1,
+};
+
+enum rtl8365mb_cpu_rxlen {
+ RTL8365MB_CPU_RXLEN_72BYTES = 0,
+ RTL8365MB_CPU_RXLEN_64BYTES = 1,
+};
+
+/**
+ * struct rtl8365mb_cpu - CPU port configuration
+ * @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames
+ * @mask: port mask of ports that parse should parse CPU tags
+ * @trap_port: forward trapped frames to this port
+ * @insert: CPU tag insertion mode in switch->CPU frames
+ * @position: position of CPU tag in frame
+ * @rx_length: minimum CPU RX length
+ * @format: CPU tag format
+ *
+ * Represents the CPU tagging and CPU port configuration of the switch. These
+ * settings are configurable at runtime.
+ */
+struct rtl8365mb_cpu {
+ bool enable;
+ u32 mask;
+ u32 trap_port;
+ enum rtl8365mb_cpu_insert insert;
+ enum rtl8365mb_cpu_position position;
+ enum rtl8365mb_cpu_rxlen rx_length;
+ enum rtl8365mb_cpu_format format;
+};
+
+/**
+ * struct rtl8365mb_port - private per-port data
+ * @smi: pointer to parent realtek_smi data
+ * @index: DSA port index, same as dsa_port::index
+ * @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
+ * access via rtl8365mb_get_stats64
+ * @stats_lock: protect the stats structure during read/update
+ * @mib_work: delayed work for polling MIB counters
+ */
+struct rtl8365mb_port {
+ struct realtek_smi *smi;
+ unsigned int index;
+ struct rtnl_link_stats64 stats;
+ spinlock_t stats_lock;
+ struct delayed_work mib_work;
+};
+
+/**
+ * struct rtl8365mb - private chip-specific driver data
+ * @smi: pointer to parent realtek_smi data
+ * @irq: registered IRQ or zero
+ * @chip_id: chip identifier
+ * @chip_ver: chip silicon revision
+ * @port_mask: mask of all ports
+ * @learn_limit_max: maximum number of L2 addresses the chip can learn
+ * @cpu: CPU tagging and CPU port configuration for this chip
+ * @mib_lock: prevent concurrent reads of MIB counters
+ * @ports: per-port data
+ * @jam_table: chip-specific initialization jam table
+ * @jam_size: size of the chip's jam table
+ *
+ * Private data for this driver.
+ */
+struct rtl8365mb {
+ struct realtek_smi *smi;
+ int irq;
+ u32 chip_id;
+ u32 chip_ver;
+ u32 port_mask;
+ u32 learn_limit_max;
+ struct rtl8365mb_cpu cpu;
+ struct mutex mib_lock;
+ struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
+ const struct rtl8365mb_jam_tbl_entry *jam_table;
+ size_t jam_size;
+};
+
+static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(smi->map,
+ RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
+ val, !val, 10, 100);
+}
+
+static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+ u32 ocp_addr)
+{
+ u32 val;
+ int ret;
+
+ /* Set OCP prefix */
+ val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
+ FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
+ if (ret)
+ return ret;
+
+ /* Set PHY register address */
+ val = RTL8365MB_PHY_BASE;
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK,
+ ocp_addr >> 1);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
+ ocp_addr >> 6);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+ u32 ocp_addr, u16 *data)
+{
+ u32 val;
+ int ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ if (ret)
+ return ret;
+
+ /* Execute read operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ /* Get PHY register data */
+ ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
+ &val);
+ if (ret)
+ return ret;
+
+ *data = val & 0xFFFF;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+ u32 ocp_addr, u16 data)
+{
+ u32 val;
+ int ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ if (ret)
+ return ret;
+
+ /* Set PHY register data */
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
+ data);
+ if (ret)
+ return ret;
+
+ /* Execute write operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+{
+ u32 ocp_addr;
+ u16 val;
+ int ret;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return val;
+}
+
+static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+ u16 val)
+{
+ u32 ocp_addr;
+ int ret;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RTL8_4;
+}
+
+static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
+ phy_interface_t interface)
+{
+ struct device_node *dn;
+ struct dsa_port *dp;
+ int tx_delay = 0;
+ int rx_delay = 0;
+ int ext_port;
+ u32 val;
+ int ret;
+
+ if (port == smi->cpu_port) {
+ ext_port = 1;
+ } else {
+ dev_err(smi->dev, "only one EXT port is currently supported\n");
+ return -EINVAL;
+ }
+
+ dp = dsa_to_port(smi->ds, port);
+ dn = dp->dn;
+
+ /* Set the RGMII TX/RX delay
+ *
+ * The Realtek vendor driver indicates the following possible
+ * configuration settings:
+ *
+ * TX delay:
+ * 0 = no delay, 1 = 2 ns delay
+ * RX delay:
+ * 0 = no delay, 7 = maximum delay
+ * No units are specified, but there are a total of 8 steps.
+ *
+ * The vendor driver also states that this must be configured *before*
+ * forcing the external interface into a particular mode, which is done
+ * in the rtl8365mb_phylink_mac_link_{up,down} functions.
+ *
+ * Only configure an RGMII TX (resp. RX) delay if the
+ * tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is
+ * specified. We ignore the detail of the RGMII interface mode
+ * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
+ * property.
+ *
+ * For the RX delay, we assume that a register value of 4 corresponds to
+ * 2 ns. But this is just an educated guess, so ignore all other values
+ * to avoid too much confusion.
+ */
+ if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
+ val = val / 1000; /* convert to ns */
+
+ if (val == 0 || val == 2)
+ tx_delay = val / 2;
+ else
+ dev_warn(smi->dev,
+ "EXT port TX delay must be 0 or 2 ns\n");
+ }
+
+ if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
+ val = val / 1000; /* convert to ns */
+
+ if (val == 0 || val == 2)
+ rx_delay = val * 2;
+ else
+ dev_warn(smi->dev,
+ "EXT port RX delay must be 0 to 2 ns\n");
+ }
+
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
+ RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
+ RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
+ RTL8365MB_EXT_PORT_MODE_RGMII
+ << RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
+ ext_port));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
+ bool link, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ u32 r_tx_pause;
+ u32 r_rx_pause;
+ u32 r_duplex;
+ u32 r_speed;
+ u32 r_link;
+ int ext_port;
+ int val;
+ int ret;
+
+ if (port == smi->cpu_port) {
+ ext_port = 1;
+ } else {
+ dev_err(smi->dev, "only one EXT port is currently supported\n");
+ return -EINVAL;
+ }
+
+ if (link) {
+ /* Force the link up with the desired configuration */
+ r_link = 1;
+ r_rx_pause = rx_pause ? 1 : 0;
+ r_tx_pause = tx_pause ? 1 : 0;
+
+ if (speed == SPEED_1000) {
+ r_speed = RTL8365MB_PORT_SPEED_1000M;
+ } else if (speed == SPEED_100) {
+ r_speed = RTL8365MB_PORT_SPEED_100M;
+ } else if (speed == SPEED_10) {
+ r_speed = RTL8365MB_PORT_SPEED_10M;
+ } else {
+ dev_err(smi->dev, "unsupported port speed %s\n",
+ phy_speed_to_str(speed));
+ return -EINVAL;
+ }
+
+ if (duplex == DUPLEX_FULL) {
+ r_duplex = 1;
+ } else if (duplex == DUPLEX_HALF) {
+ r_duplex = 0;
+ } else {
+ dev_err(smi->dev, "unsupported duplex %s\n",
+ phy_duplex_to_str(duplex));
+ return -EINVAL;
+ }
+ } else {
+ /* Force the link down and reset any programmed configuration */
+ r_link = 0;
+ r_tx_pause = 0;
+ r_rx_pause = 0;
+ r_speed = 0;
+ r_duplex = 0;
+ }
+
+ val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK,
+ r_tx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK,
+ r_rx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
+ r_duplex) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
+ ret = regmap_write(smi->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ if (dsa_is_user_port(ds, port) &&
+ (interface == PHY_INTERFACE_MODE_NA ||
+ interface == PHY_INTERFACE_MODE_INTERNAL))
+ /* Internal PHY */
+ return true;
+ else if (dsa_is_cpu_port(ds, port) &&
+ phy_interface_mode_is_rgmii(interface))
+ /* Extension MAC */
+ return true;
+
+ return false;
+}
+
+static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct realtek_smi *smi = ds->priv;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
+
+ /* include/linux/phylink.h says:
+ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
+ * expects the MAC driver to return all supported link modes.
+ */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
+ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ phy_modes(state->interface), port);
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+
+static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
+ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ phy_modes(state->interface), port);
+ return;
+ }
+
+ if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
+ dev_err(smi->dev,
+ "port %d supports only conventional PHY or fixed-link\n",
+ port);
+ return;
+ }
+
+ if (phy_interface_mode_is_rgmii(state->interface)) {
+ ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to configure RGMII mode on port %d: %d\n",
+ port, ret);
+ return;
+ }
+
+ /* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also
+ * supports
+ */
+}
+
+static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+ cancel_delayed_work_sync(&p->mib_work);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
+ false, false);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to reset forced mode on port %d: %d\n",
+ port, ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+ schedule_delayed_work(&p->mib_work, 0);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
+ duplex, tx_pause,
+ rx_pause);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to force mode on port %d: %d\n", port,
+ ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct realtek_smi *smi = ds->priv;
+ enum rtl8365mb_stp_state val;
+ int msti = 0;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8365MB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8365MB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8365MB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8365MB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(smi->dev, "invalid STP state: %u\n", state);
+ return;
+ }
+
+ regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
+ val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
+}
+
+static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
+ bool enable)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+
+ /* Enable/disable learning by limiting the number of L2 addresses the
+ * port can learn. Realtek documentation states that a limit of zero
+ * disables learning. When enabling learning, set it to the chip's
+ * maximum.
+ */
+ return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ enable ? mb->learn_limit_max : 0);
+}
+
+static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
+ u32 mask)
+{
+ return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+}
+
+static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
+ u32 offset, u32 length, u64 *mibvalue)
+{
+ u64 tmpvalue = 0;
+ u32 val;
+ int ret;
+ int i;
+
+ /* The MIB address is an SRAM address. We request a particular address
+ * and then poll the control register before reading the value from some
+ * counter registers.
+ */
+ ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
+ RTL8365MB_MIB_ADDRESS(port, offset));
+ if (ret)
+ return ret;
+
+ /* Poll for completion */
+ ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
+ !(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
+ 10, 100);
+ if (ret)
+ return ret;
+
+ /* Presumably this indicates a MIB counter read failure */
+ if (val & RTL8365MB_MIB_CTRL0_RESET_MASK)
+ return -EIO;
+
+ /* There are four MIB counter registers each holding a 16 bit word of a
+ * MIB counter. Depending on the offset, we should read from the upper
+ * two or lower two registers. In case the MIB counter is 4 words, we
+ * read from all four registers.
+ */
+ if (length == 4)
+ offset = 3;
+ else
+ offset = (offset + 1) % 4;
+
+ /* Read the MIB counter 16 bits at a time */
+ for (i = 0; i < length; i++) {
+ ret = regmap_read(smi->map,
+ RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
+ if (ret)
+ return ret;
+
+ tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF);
+ }
+
+ /* Only commit the result if no error occurred */
+ *mibvalue = tmpvalue;
+
+ return 0;
+}
+
+static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ mib->length, &data[i]);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to read port %d counters: %d\n", port,
+ ret);
+ break;
+ }
+ }
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN);
+ }
+}
+
+static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return RTL8365MB_MIB_END;
+}
+
+static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ &phy_stats->SymbolErrorDuringCarrier);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3OutPauseFrames] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3InPauseFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ [RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
+
+ };
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ mib->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* The RTL8365MB-VC exposes MIB objects, which we have to translate into
+ * IEEE 802.3 Managed Objects. This is not always completely faithful,
+ * but we try out best. See RFC 3635 for a detailed treatment of the
+ * subject.
+ */
+
+ mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3OutPauseFrames] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+ mac_stats->SingleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames];
+ mac_stats->MultipleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames];
+ mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3InPauseFrames];
+ mac_stats->FrameCheckSequenceErrors =
+ cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] -
+ 18 * mac_stats->FramesTransmittedOK;
+ mac_stats->FramesWithDeferredXmissions =
+ cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions];
+ mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ mac_stats->FramesAbortedDueToXSColls =
+ cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions];
+ mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] -
+ 18 * mac_stats->FramesReceivedOK;
+ mac_stats->MulticastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts];
+ mac_stats->BroadcastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+ mac_stats->MulticastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ mac_stats->BroadcastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts];
+}
+
+static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ &ctrl_stats->UnsupportedOpcodesReceived);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_etherStatsDropEvents] = 1,
+ [RTL8365MB_MIB_etherStatsCollisions] = 1,
+ [RTL8365MB_MIB_etherStatsFragments] = 1,
+ [RTL8365MB_MIB_etherStatsJabbers] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ };
+ struct rtl8365mb *mb = smi->chip_data;
+ struct rtnl_link_stats64 *stats;
+ int ret;
+ int i;
+
+ stats = &mb->ports[port].stats;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
+ c->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* Don't update statistics if there was an error reading the counters */
+ if (ret)
+ return;
+
+ spin_lock(&mb->ports[port].stats_lock);
+
+ stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+
+ /* if{In,Out}Octets includes FCS - remove it */
+ stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets;
+ stats->tx_bytes =
+ cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets;
+
+ stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents];
+ stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions];
+
+ stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] +
+ cnt[RTL8365MB_MIB_etherStatsJabbers];
+ stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors;
+
+ stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards];
+ stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors;
+
+ spin_unlock(&mb->ports[port].stats_lock);
+}
+
+static void rtl8365mb_stats_poll(struct work_struct *work)
+{
+ struct rtl8365mb_port *p = container_of(to_delayed_work(work),
+ struct rtl8365mb_port,
+ mib_work);
+ struct realtek_smi *smi = p->smi;
+
+ rtl8365mb_stats_update(smi, p->index);
+
+ schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
+}
+
+static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(s, &p->stats, sizeof(*s));
+ spin_unlock(&p->stats_lock);
+}
+
+static void rtl8365mb_stats_setup(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int i;
+
+ /* Per-chip global mutex to protect MIB counter access, since doing
+ * so requires accessing a series of registers in a particular order.
+ */
+ mutex_init(&mb->mib_lock);
+
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ /* Per-port spinlock to protect the stats64 data */
+ spin_lock_init(&p->stats_lock);
+
+ /* This work polls the MIB counters and keeps the stats64 data
+ * up-to-date.
+ */
+ INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll);
+ }
+}
+
+static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int i;
+
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ cancel_delayed_work_sync(&p->mib_work);
+ }
+}
+
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
+ u32 *val)
+{
+ int ret;
+
+ ret = regmap_read(smi->map, reg, val);
+ if (ret)
+ return ret;
+
+ return regmap_write(smi->map, reg, *val);
+}
+
+static irqreturn_t rtl8365mb_irq(int irq, void *data)
+{
+ struct realtek_smi *smi = data;
+ unsigned long line_changes = 0;
+ struct rtl8365mb *mb;
+ u32 stat;
+ int line;
+ int ret;
+
+ mb = smi->chip_data;
+
+ ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
+ &stat);
+ if (ret)
+ goto out_error;
+
+ if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) {
+ u32 linkdown_ind;
+ u32 linkup_ind;
+ u32 val;
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
+
+ line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
+ }
+
+ if (!line_changes)
+ goto out_none;
+
+ for_each_set_bit(line, &line_changes, smi->num_ports) {
+ int child_irq = irq_find_mapping(smi->irqdomain, line);
+
+ handle_nested_irq(child_irq);
+ }
+
+ return IRQ_HANDLED;
+
+out_error:
+ dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
+
+out_none:
+ return IRQ_NONE;
+}
+
+static struct irq_chip rtl8365mb_irq_chip = {
+ .name = "rtl8365mb",
+ /* The hardware doesn't support masking IRQs on a per-port basis */
+};
+
+static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
+ .map = rtl8365mb_irq_map,
+ .unmap = rtl8365mb_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
+{
+ return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
+ RTL8365MB_INTR_LINK_CHANGE_MASK,
+ FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
+ enable ? 1 : 0));
+}
+
+static int rtl8365mb_irq_enable(struct realtek_smi *smi)
+{
+ return rtl8365mb_set_irq_enable(smi, true);
+}
+
+static int rtl8365mb_irq_disable(struct realtek_smi *smi)
+{
+ return rtl8365mb_set_irq_enable(smi, false);
+}
+
+static int rtl8365mb_irq_setup(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ struct device_node *intc;
+ u32 irq_trig;
+ int virq;
+ int irq;
+ u32 val;
+ int ret;
+ int i;
+
+ intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ if (!intc) {
+ dev_err(smi->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ /* rtl8365mb IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(smi->dev, "failed to get parent irq: %d\n",
+ irq);
+ ret = irq ? irq : -EINVAL;
+ goto out_put_node;
+ }
+
+ smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
+ &rtl8365mb_irqdomain_ops, smi);
+ if (!smi->irqdomain) {
+ dev_err(smi->dev, "failed to add irq domain\n");
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_create_mapping(smi->irqdomain, i);
+ if (!virq) {
+ dev_err(smi->dev,
+ "failed to create irq domain mapping\n");
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ irq_set_parent(virq, irq);
+ }
+
+ /* Configure chip interrupt signal polarity */
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ val = RTL8365MB_INTR_POLARITY_HIGH;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ val = RTL8365MB_INTR_POLARITY_LOW;
+ break;
+ default:
+ dev_err(smi->dev, "unsupported irq trigger type %u\n",
+ irq_trig);
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
+ RTL8365MB_INTR_POLARITY_MASK,
+ FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Disable the interrupt in case the chip has it enabled on reset */
+ ret = rtl8365mb_irq_disable(smi);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Clear the interrupt status register */
+ ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
+ RTL8365MB_INTR_ALL_MASK);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
+ "rtl8365mb", smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to request irq: %d\n", ret);
+ goto out_remove_irqdomain;
+ }
+
+ /* Store the irq so that we know to free it during teardown */
+ mb->irq = irq;
+
+ ret = rtl8365mb_irq_enable(smi);
+ if (ret)
+ goto out_free_irq;
+
+ of_node_put(intc);
+
+ return 0;
+
+out_free_irq:
+ free_irq(mb->irq, smi);
+ mb->irq = 0;
+
+out_remove_irqdomain:
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_find_mapping(smi->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(smi->irqdomain);
+ smi->irqdomain = NULL;
+
+out_put_node:
+ of_node_put(intc);
+
+ return ret;
+}
+
+static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int virq;
+ int i;
+
+ if (mb->irq) {
+ free_irq(mb->irq, smi);
+ mb->irq = 0;
+ }
+
+ if (smi->irqdomain) {
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_find_mapping(smi->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(smi->irqdomain);
+ smi->irqdomain = NULL;
+ }
+}
+
+static int rtl8365mb_cpu_config(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb_cpu *cpu = &mb->cpu;
+ u32 val;
+ int ret;
+
+ ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
+ RTL8365MB_CPU_PORT_MASK_MASK,
+ FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
+ cpu->mask));
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
+ cpu->trap_port >> 3);
+ ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_switch_init(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int ret;
+ int i;
+
+ /* Do any chip-specific init jam before getting to the common stuff */
+ if (mb->jam_table) {
+ for (i = 0; i < mb->jam_size; i++) {
+ ret = regmap_write(smi->map, mb->jam_table[i].reg,
+ mb->jam_table[i].val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Common init jam */
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
+ ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
+ rtl8365mb_init_jam_common[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8365mb_reset_chip(struct realtek_smi *smi)
+{
+ u32 val;
+
+ realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
+ 1));
+
+ /* Realtek documentation says the chip needs 1 second to reset. Sleep
+ * for 100 ms before accessing any registers to prevent ACK timeouts.
+ */
+ msleep(100);
+ return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
+ !(val & RTL8365MB_CHIP_RESET_HW_MASK),
+ 20000, 1e6);
+}
+
+static int rtl8365mb_setup(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ ret = rtl8365mb_reset_chip(smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to reset chip: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Configure switch to vendor-defined initial state */
+ ret = rtl8365mb_switch_init(smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Set up cascading IRQs */
+ ret = rtl8365mb_irq_setup(smi);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ else if (ret)
+ dev_info(smi->dev, "no interrupt support\n");
+
+ /* Configure CPU tagging */
+ ret = rtl8365mb_cpu_config(smi);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Configure ports */
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ /* Set up per-port private data */
+ p->smi = smi;
+ p->index = i;
+
+ /* Forward only to the CPU */
+ ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Disable learning */
+ ret = rtl8365mb_port_set_learning(smi, i, false);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Set the initial STP state of all ports to DISABLED, otherwise
+ * ports will still forward frames to the CPU despite being
+ * administratively down by default.
+ */
+ rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
+ }
+
+ /* Set maximum packet length to 1536 bytes */
+ ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ RTL8365MB_CFG0_MAX_LEN_MASK,
+ FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
+ if (ret)
+ goto out_teardown_irq;
+
+ ret = realtek_smi_setup_mdio(smi);
+ if (ret) {
+ dev_err(smi->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
+ }
+
+ /* Start statistics counter polling */
+ rtl8365mb_stats_setup(smi);
+
+ return 0;
+
+out_teardown_irq:
+ rtl8365mb_irq_teardown(smi);
+
+out_error:
+ return ret;
+}
+
+static void rtl8365mb_teardown(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ rtl8365mb_stats_teardown(smi);
+ rtl8365mb_irq_teardown(smi);
+}
+
+static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
+{
+ int ret;
+
+ /* For some reason we have to write a magic value to an arbitrary
+ * register whenever accessing the chip ID/version registers.
+ */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver);
+ if (ret)
+ return ret;
+
+ /* Reset magic register */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_detect(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ u32 chip_id;
+ u32 chip_ver;
+ int ret;
+
+ ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
+ if (ret) {
+ dev_err(smi->dev, "failed to read chip id and version: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (chip_id) {
+ case RTL8365MB_CHIP_ID_8365MB_VC:
+ dev_info(smi->dev,
+ "found an RTL8365MB-VC switch (ver=0x%04x)\n",
+ chip_ver);
+
+ smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
+ smi->num_ports = smi->cpu_port + 1;
+
+ mb->smi = smi;
+ mb->chip_id = chip_id;
+ mb->chip_ver = chip_ver;
+ mb->port_mask = BIT(smi->num_ports) - 1;
+ mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
+ mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
+ mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
+
+ mb->cpu.enable = 1;
+ mb->cpu.mask = BIT(smi->cpu_port);
+ mb->cpu.trap_port = smi->cpu_port;
+ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
+ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
+ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
+ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+
+ break;
+ default:
+ dev_err(smi->dev,
+ "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
+ chip_id, chip_ver);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops rtl8365mb_switch_ops = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_validate = rtl8365mb_phylink_validate,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+};
+
+static const struct realtek_smi_ops rtl8365mb_smi_ops = {
+ .detect = rtl8365mb_detect,
+ .phy_read = rtl8365mb_phy_read,
+ .phy_write = rtl8365mb_phy_write,
+};
+
+const struct realtek_smi_variant rtl8365mb_variant = {
+ .ds_ops = &rtl8365mb_switch_ops,
+ .ops = &rtl8365mb_smi_ops,
+ .clk_delay = 10,
+ .cmd_read = 0xb9,
+ .cmd_write = 0xb8,
+ .chip_data_sz = sizeof(struct rtl8365mb),
+};
+EXPORT_SYMBOL_GPL(rtl8365mb_variant);
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 75897a369096..bdb8d8d34880 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -292,89 +292,6 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
}
EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
-int rtl8366_init_vlan(struct realtek_smi *smi)
-{
- int port;
- int ret;
-
- ret = rtl8366_reset_vlan(smi);
- if (ret)
- return ret;
-
- /* Loop over the available ports, for each port, associate
- * it with the VLAN (port+1)
- */
- for (port = 0; port < smi->num_ports; port++) {
- u32 mask;
-
- if (port == smi->cpu_port)
- /* For the CPU port, make all ports members of this
- * VLAN.
- */
- mask = GENMASK((int)smi->num_ports - 1, 0);
- else
- /* For all other ports, enable itself plus the
- * CPU port.
- */
- mask = BIT(port) | BIT(smi->cpu_port);
-
- /* For each port, set the port as member of VLAN (port+1)
- * and untagged, except for the CPU port: the CPU port (5) is
- * member of VLAN 6 and so are ALL the other ports as well.
- * Use filter 0 (no filter).
- */
- dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n",
- (port + 1), port, mask);
- ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
- if (ret)
- return ret;
-
- dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n",
- (port + 1), port, (port + 1));
- ret = rtl8366_set_pvid(smi, port, (port + 1));
- if (ret)
- return ret;
- }
-
- return rtl8366_enable_vlan(smi, true);
-}
-EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
-
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct realtek_smi *smi = ds->priv;
- struct rtl8366_vlan_4k vlan4k;
- int ret;
-
- /* Use VLAN nr port + 1 since VLAN0 is not valid */
- if (!smi->ops->is_vlan_valid(smi, port + 1))
- return -EINVAL;
-
- dev_info(smi->dev, "%s filtering on port %d\n",
- vlan_filtering ? "enable" : "disable",
- port);
-
- /* TODO:
- * The hardware support filter ID (FID) 0..7, I have no clue how to
- * support this in the driver when the callback only says on/off.
- */
- ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k);
- if (ret)
- return ret;
-
- /* Just set the filter to FID 1 for now then */
- ret = rtl8366_set_vlan(smi, port + 1,
- vlan4k.member,
- vlan4k.untag,
- 1);
- if (ret)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
-
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
@@ -401,12 +318,9 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return ret;
}
- dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
- vlan->vid, port, untagged ? "untagged" : "tagged",
- pvid ? " PVID" : "no PVID");
-
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
- dev_err(smi->dev, "port is DSA or CPU port\n");
+ dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
+ pvid ? "PVID" : "no PVID");
member |= BIT(port);
@@ -439,7 +353,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
struct realtek_smi *smi = ds->priv;
int ret, i;
- dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port);
+ dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
for (i = 0; i < smi->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
@@ -457,7 +371,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
* anymore then clear the whole member
* config so it can be reused.
*/
- if (!vlanmc.member && vlanmc.untag) {
+ if (!vlanmc.member) {
vlanmc.vid = 0;
vlanmc.priority = 0;
vlanmc.fid = 0;
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index a89093bc6c6a..03deacd83e61 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -42,9 +43,12 @@
/* Port Enable Control register */
#define RTL8366RB_PECR 0x0001
-/* Switch Security Control registers */
-#define RTL8366RB_SSCR0 0x0002
-#define RTL8366RB_SSCR1 0x0003
+/* Switch per-port learning disablement register */
+#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002
+
+/* Security control, actually aging register */
+#define RTL8366RB_SECURITY_CTRL 0x0003
+
#define RTL8366RB_SSCR2 0x0004
#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
@@ -106,6 +110,18 @@
#define RTL8366RB_POWER_SAVING_REG 0x0021
+/* Spanning tree status (STP) control, two bits per port per FID */
+#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */
+#define RTL8366RB_STP_STATE_DISABLED 0x0
+#define RTL8366RB_STP_STATE_BLOCKING 0x1
+#define RTL8366RB_STP_STATE_LEARNING 0x2
+#define RTL8366RB_STP_STATE_FORWARDING 0x3
+#define RTL8366RB_STP_MASK GENMASK(1, 0)
+#define RTL8366RB_STP_STATE(port, state) \
+ ((state) << ((port) * 2))
+#define RTL8366RB_STP_STATE_MASK(port) \
+ RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
+
/* CPU port control reg */
#define RTL8368RB_CPU_CTRL_REG 0x0061
#define RTL8368RB_CPU_PORTS_MSK 0x00FF
@@ -143,6 +159,21 @@
#define RTL8366RB_PHY_NO_OFFSET 9
#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
+/* VLAN Ingress Control Register 1, one bit per port.
+ * bit 0 .. 5 will make the switch drop ingress frames without
+ * VID such as untagged or priority-tagged frames for respective
+ * port.
+ * bit 6 .. 11 will make the switch drop ingress frames carrying
+ * a C-tag with VID != 0 for respective port.
+ */
+#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E
+#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6))
+
+/* VLAN Ingress Control Register 2, one bit per port.
+ * bit0 .. bit5 will make the switch drop all ingress frames with
+ * a VLAN classification that does not include the port is in its
+ * member set.
+ */
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
/* LED control registers */
@@ -215,6 +246,7 @@
#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
+#define RTL8366RB_NUM_FIDS 8
#define RTL8366RB_FIDMAX 7
#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
@@ -300,6 +332,13 @@
#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
+/* Port isolation registers */
+#define RTL8366RB_PORT_ISO_BASE 0x0F08
+#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum))
+#define RTL8366RB_PORT_ISO_EN BIT(0)
+#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1)
+#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1)
+
/* bits 0..5 enable force when cleared */
#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
@@ -314,9 +353,11 @@
/**
* struct rtl8366rb - RTL8366RB-specific data
* @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
*/
struct rtl8366rb {
unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
};
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
@@ -835,6 +876,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ /* Isolate all user ports so they can only send packets to itself and the CPU port */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+ }
+ /* CPU port can send packets to all ports */
+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
@@ -888,13 +944,14 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* layer 2 size, see rtl8366rb_change_mtu() */
rb->max_mtu[i] = 1532;
- /* Enable learning for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
+ /* Disable learning for all ports */
+ ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0);
+ ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
@@ -911,11 +968,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Discard VLAN tagged packets if the port is not a member of
- * the VLAN with which the packets is associated.
- */
+ /* Accept all packets by default, we enable filtering on-demand */
+ ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ 0);
+ if (ret)
+ return ret;
ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
- RTL8366RB_PORT_ALL);
+ 0);
if (ret)
return ret;
@@ -963,7 +1022,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
}
- ret = rtl8366_init_vlan(smi);
+ ret = rtl8366_reset_vlan(smi);
if (ret)
return ret;
@@ -977,8 +1036,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return -ENODEV;
}
- ds->configure_vlan_while_not_filtering = false;
-
return 0;
}
@@ -1127,6 +1184,190 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
rb8366rb_set_port_led(smi, port, false);
}
+static int
+rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct realtek_smi *smi = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than the current one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ /* Join this port to each other port on the bridge */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)));
+ if (ret)
+ dev_err(smi->dev, "failed to join port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Set the bits for the ports we can access */
+ return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap));
+}
+
+static void
+rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct realtek_smi *smi = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than this one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ /* Remove this port from any other port on the bridge */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
+ if (ret)
+ dev_err(smi->dev, "failed to leave port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Clear the bits for the ports we can not access, leave ourselves */
+ regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
+}
+
+/**
+ * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
+ * @smi: SMI state container
+ * @port: the port to drop untagged and C-tagged frames on
+ * @drop: whether to drop or pass untagged and C-tagged frames
+ */
+static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
+{
+ return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
+ drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
+}
+
+static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366rb *rb;
+ int ret;
+
+ rb = smi->chip_data;
+
+ dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
+ vlan_filtering ? "enable" : "disable");
+
+ /* If the port is not in the member set, the frame will be dropped */
+ ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ BIT(port), vlan_filtering ? BIT(port) : 0);
+ if (ret)
+ return ret;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. If we turn off VLAN
+ * filtering on a port, we need to accept any frames.
+ */
+ if (vlan_filtering)
+ ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
+ else
+ ret = rtl8366rb_drop_untagged(smi, port, false);
+
+ return ret;
+}
+
+static int
+rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ BIT(port),
+ (flags.val & BR_LEARNING) ? 0 : BIT(port));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct realtek_smi *smi = ds->priv;
+ u32 val;
+ int i;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8366RB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8366RB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8366RB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8366RB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(smi->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ /* Set the same status for the port on all the FIDs */
+ for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
+ regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
+ RTL8366RB_STP_STATE_MASK(port),
+ RTL8366RB_STP_STATE(port, val));
+ }
+}
+
+static void
+rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ /* This will age out any learned L2 entries */
+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), BIT(port));
+ /* Restore the normal state of things */
+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), 0);
+}
+
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct realtek_smi *smi = ds->priv;
@@ -1338,24 +1579,44 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
{
+ struct rtl8366rb *rb;
+ bool pvid_enabled;
+ int ret;
+
+ rb = smi->chip_data;
+ pvid_enabled = !!index;
+
if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
- return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
(index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
+ if (ret)
+ return ret;
+
+ rb->pvid_enabled[port] = pvid_enabled;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. Make sure to update the
+ * filtering setting.
+ */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
+ ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
+
+ return ret;
}
static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
{
- unsigned int max = RTL8366RB_NUM_VLANS;
+ unsigned int max = RTL8366RB_NUM_VLANS - 1;
if (smi->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
- if (vlan == 0 || vlan > max)
+ if (vlan > max)
return false;
return true;
@@ -1510,11 +1771,17 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
- .port_vlan_filtering = rtl8366_vlan_filtering,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
.port_vlan_add = rtl8366_vlan_add,
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
.port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
.port_change_mtu = rtl8366rb_change_mtu,
.port_max_mtu = rtl8366rb_max_mtu,
};
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 5e5d24e7c02b..808419f3b808 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -20,6 +20,27 @@
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
+/* Calculated assuming 1Gbps, where the clock has 125 MHz (8 ns period)
+ * To avoid floating point operations, we'll multiply the degrees by 10
+ * to get a "phase" and get 1 decimal point precision.
+ */
+#define SJA1105_RGMII_DELAY_PS_TO_PHASE(ps) \
+ (((ps) * 360) / 800)
+#define SJA1105_RGMII_DELAY_PHASE_TO_PS(phase) \
+ ((800 * (phase)) / 360)
+#define SJA1105_RGMII_DELAY_PHASE_TO_HW(phase) \
+ (((phase) - 738) / 9)
+#define SJA1105_RGMII_DELAY_PS_TO_HW(ps) \
+ SJA1105_RGMII_DELAY_PHASE_TO_HW(SJA1105_RGMII_DELAY_PS_TO_PHASE(ps))
+
+/* Valid range in degrees is a value between 73.8 and 101.7
+ * in 0.9 degree increments
+ */
+#define SJA1105_RGMII_DELAY_MIN_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(738)
+#define SJA1105_RGMII_DELAY_MAX_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(1017)
+
typedef enum {
SPI_READ = 0,
SPI_WRITE = 1,
@@ -222,16 +243,14 @@ struct sja1105_flow_block {
struct sja1105_private {
struct sja1105_static_config static_config;
- bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
- bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
+ int rgmii_rx_delay_ps[SJA1105_MAX_NUM_PORTS];
+ int rgmii_tx_delay_ps[SJA1105_MAX_NUM_PORTS];
phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
bool fixed_link[SJA1105_MAX_NUM_PORTS];
- bool vlan_aware;
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
const struct sja1105_info *info;
size_t max_xfer_len;
- struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 387a1f2f161c..e3699f76f6d7 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#include <linux/packing.h>
@@ -498,17 +498,6 @@ sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
}
-/* Valid range in degrees is an integer between 73.8 and 101.7 */
-static u64 sja1105_rgmii_delay(u64 phase)
-{
- /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
- * To avoid floating point operations we'll multiply by 10
- * and get 1 decimal point precision.
- */
- phase *= 10;
- return (phase - 738) / 9;
-}
-
/* The RGMII delay setup procedure is 2-step and gets called upon each
* .phylink_mac_config. Both are strategic.
* The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
@@ -521,13 +510,15 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int rc;
- if (priv->rgmii_rx_delay[port])
- pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
- if (priv->rgmii_tx_delay[port])
- pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ if (rx_delay)
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
+ if (tx_delay)
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
/* Stage 1: Turn the RGMII delay lines off. */
pad_mii_id.rxc_bypass = 1;
@@ -542,11 +533,11 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
return rc;
/* Stage 2: Turn the RGMII delay lines on. */
- if (priv->rgmii_rx_delay[port]) {
+ if (rx_delay) {
pad_mii_id.rxc_bypass = 0;
pad_mii_id.rxc_pd = 0;
}
- if (priv->rgmii_tx_delay[port]) {
+ if (tx_delay) {
pad_mii_id.txc_bypass = 0;
pad_mii_id.txc_pd = 0;
}
@@ -561,20 +552,22 @@ int sja1110_setup_rgmii_delay(const void *ctx, int port)
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
pad_mii_id.rxc_pd = 1;
pad_mii_id.txc_pd = 1;
- if (priv->rgmii_rx_delay[port]) {
- pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
+ if (rx_delay) {
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
pad_mii_id.rxc_bypass = 1;
pad_mii_id.rxc_pd = 0;
}
- if (priv->rgmii_tx_delay[port]) {
- pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ if (tx_delay) {
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
pad_mii_id.txc_bypass = 1;
pad_mii_id.txc_pd = 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 05c7f4ca3b1a..0569ff066634 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
- * Copyright 2020 NXP Semiconductors
+ * Copyright 2020 NXP
*/
#include "sja1105.h"
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 6c10ffa968ce..72b9b39b0989 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
*/
#include "sja1105.h"
#include "sja1105_vl.h"
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 2f8cc6686c38..1832d4bd3440 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -27,15 +27,29 @@
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
-static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
- unsigned int startup_delay)
+/* Configure the optional reset pin and bring up switch */
+static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len,
+ unsigned int startup_delay)
{
+ struct gpio_desc *gpio;
+
+ gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (!gpio)
+ return 0;
+
gpiod_set_value_cansleep(gpio, 1);
/* Wait for minimum reset pulse length */
msleep(pulse_len);
gpiod_set_value_cansleep(gpio, 0);
/* Wait until chip is ready after reset */
msleep(startup_delay);
+
+ gpiod_put(gpio);
+
+ return 0;
}
static void
@@ -1095,27 +1109,78 @@ static int sja1105_static_config_load(struct sja1105_private *priv)
return sja1105_static_config_upload(priv);
}
-static int sja1105_parse_rgmii_delays(struct sja1105_private *priv)
+/* This is the "new way" for a MAC driver to configure its RGMII delay lines,
+ * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
+ * properties. It has the advantage of working with fixed links and with PHYs
+ * that apply RGMII delays too, and the MAC driver needs not perform any
+ * special checks.
+ *
+ * Previously we were acting upon the "phy-mode" property when we were
+ * operating in fixed-link, basically acting as a PHY, but with a reversed
+ * interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should
+ * behave as if it is connected to a PHY which has applied RGMII delays in the
+ * TX direction. So if anything, RX delays should have been added by the MAC,
+ * but we were adding TX delays.
+ *
+ * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
+ * back to the legacy behavior and apply delays on fixed-link ports based on
+ * the reverse interpretation of the phy-mode. This is a deviation from the
+ * expected default behavior which is to simply apply no delays. To achieve
+ * that behavior with the new bindings, it is mandatory to specify
+ * "{rx,tx}-internal-delay-ps" with a value of 0.
+ */
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port,
+ struct device_node *port_dn)
{
- struct dsa_switch *ds = priv->ds;
- int port;
+ phy_interface_t phy_mode = priv->phy_mode[port];
+ struct device *dev = &priv->spidev->dev;
+ int rx_delay = -1, tx_delay = -1;
- for (port = 0; port < ds->num_ports; port++) {
- if (!priv->fixed_link[port])
- continue;
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return 0;
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID ||
- priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_rx_delay[port] = true;
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID ||
- priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_tx_delay[port] = true;
+ if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) {
+ dev_warn(dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port);
- if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) &&
- !priv->info->setup_rgmii_delay)
- return -EINVAL;
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) {
+ dev_err(dev, "Chip cannot apply RGMII delays\n");
+ return -EINVAL;
+ }
+
+ if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (rx_delay > SJA1105_RGMII_DELAY_MAX_PS) ||
+ (tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) {
+ dev_err(dev,
+ "port %d RGMII delay values out of range, must be between %d and %d ps\n",
+ port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS);
+ return -ERANGE;
}
+
+ priv->rgmii_rx_delay_ps[port] = rx_delay;
+ priv->rgmii_tx_delay_ps[port] = tx_delay;
+
return 0;
}
@@ -1166,6 +1231,10 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
}
priv->phy_mode[index] = phy_mode;
+
+ err = sja1105_parse_rgmii_delays(priv, index, child);
+ if (err)
+ return err;
}
return 0;
@@ -1766,6 +1835,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
@@ -1802,7 +1872,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!priv->vlan_aware)
+ if (!dsa_port_is_vlan_filtering(dp))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -2295,11 +2365,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
tpid2 = ETH_P_SJA1105;
}
- if (priv->vlan_aware == enabled)
- return 0;
-
- priv->vlan_aware = enabled;
-
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
@@ -2332,7 +2397,7 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
*/
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !priv->vlan_aware;
+ l2_lookup_params->shared_learn = !enabled;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
@@ -2965,7 +3030,6 @@ static int sja1105_setup_ports(struct sja1105_private *priv)
continue;
dp->priv = sp;
- sp->dp = dp;
sp->data = tagger_data;
slave = dp->slave;
kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
@@ -3117,7 +3181,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
sja1105_static_config_free(&priv->static_config);
}
-const struct dsa_switch_ops sja1105_switch_ops = {
+static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup,
.teardown = sja1105_teardown,
@@ -3166,7 +3230,6 @@ const struct dsa_switch_ops sja1105_switch_ops = {
.port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload,
.port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload,
};
-EXPORT_SYMBOL_GPL(sja1105_switch_ops);
static const struct of_device_id sja1105_dt_ids[];
@@ -3230,17 +3293,14 @@ static int sja1105_probe(struct spi_device *spi)
return -EINVAL;
}
+ rc = sja1105_hw_reset(dev, 1, 1);
+ if (rc)
+ return rc;
+
priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- /* Configure the optional reset pin and bring up switch */
- priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(priv->reset_gpio))
- dev_dbg(dev, "reset-gpios not defined, ignoring\n");
- else
- sja1105_hw_reset(priv->reset_gpio, 1, 1);
-
/* Populate our driver private structure (priv) based on
* the device tree node that was probed (spi)
*/
@@ -3312,15 +3372,6 @@ static int sja1105_probe(struct spi_device *spi)
return rc;
}
- /* Error out early if internal delays are required through DT
- * and we can't apply them.
- */
- rc = sja1105_parse_rgmii_delays(priv);
- if (rc < 0) {
- dev_err(ds->dev, "RGMII delay not supported\n");
- return rc;
- }
-
if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
@@ -3335,13 +3386,29 @@ static int sja1105_probe(struct spi_device *spi)
static int sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
- struct dsa_switch *ds = priv->ds;
- dsa_unregister_switch(ds);
+ if (!priv)
+ return 0;
+
+ dsa_unregister_switch(priv->ds);
+
+ spi_set_drvdata(spi, NULL);
return 0;
}
+static void sja1105_shutdown(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ spi_set_drvdata(spi, NULL);
+}
+
static const struct of_device_id sja1105_dt_ids[] = {
{ .compatible = "nxp,sja1105e", .data = &sja1105e_info },
{ .compatible = "nxp,sja1105t", .data = &sja1105t_info },
@@ -3365,6 +3432,7 @@ static struct spi_driver sja1105_driver = {
},
.probe = sja1105_probe,
.remove = sja1105_remove,
+ .shutdown = sja1105_shutdown,
};
module_spi_driver(sja1105_driver);
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 705d3900e43a..215dd17ca790 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021, NXP Semiconductors
+/* Copyright 2021 NXP
*/
#include <linux/pcs/pcs-xpcs.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 691f6dd7e669..54396992a919 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -64,6 +64,7 @@ enum sja1105_ptp_clk_mode {
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
bool on)
{
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
@@ -79,7 +80,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
priv->tagger_data.stampable_skb = NULL;
}
ptp_cancel_worker_sync(ptp_data->clock);
- skb_queue_purge(&ptp_data->skb_txtstamp_queue);
+ skb_queue_purge(&tagger_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
@@ -452,40 +453,6 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
return priv->info->rxtstamp(ds, port, skb);
}
-void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
- enum sja1110_meta_tstamp dir, u64 tstamp)
-{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
- struct skb_shared_hwtstamps shwt = {0};
-
- /* We don't care about RX timestamps on the CPU port */
- if (dir == SJA1110_META_TSTAMP_RX)
- return;
-
- spin_lock(&ptp_data->skb_txtstamp_queue.lock);
-
- skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
- if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
- continue;
-
- __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
- skb_match = skb;
-
- break;
- }
-
- spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
-
- if (WARN_ON(!skb_match))
- return;
-
- shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
- skb_complete_tx_timestamp(skb_match, &shwt);
-}
-EXPORT_SYMBOL_GPL(sja1110_process_meta_tstamp);
-
/* In addition to cloning the skb which is done by the common
* sja1105_port_txtstamp, we need to generate a timestamp ID and save the
* packet to the TX timestamping queue.
@@ -494,7 +461,6 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct sja1105_private *priv = ds->priv;
- struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_port *sp = &priv->ports[port];
u8 ts_id;
@@ -510,7 +476,7 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
spin_unlock(&sp->data->meta_lock);
- skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
+ skb_queue_tail(&sp->data->skb_txtstamp_queue, clone);
}
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
@@ -953,7 +919,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
/* Only used on SJA1105 */
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
/* Only used on SJA1110 */
- skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
+ skb_queue_head_init(&tagger_data->skb_txtstamp_queue);
spin_lock_init(&tagger_data->meta_lock);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
@@ -971,6 +937,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
if (IS_ERR_OR_NULL(ptp_data->clock))
@@ -978,7 +945,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
del_timer_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
- skb_queue_purge(&ptp_data->skb_txtstamp_queue);
+ skb_queue_purge(&tagger_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 3c874bb4c17b..3ae6b9fdd492 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -8,21 +8,6 @@
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
-/* Timestamps are in units of 8 ns clock ticks (equivalent to
- * a fixed 125 MHz clock).
- */
-#define SJA1105_TICK_NS 8
-
-static inline s64 ns_to_sja1105_ticks(s64 ns)
-{
- return ns / SJA1105_TICK_NS;
-}
-
-static inline s64 sja1105_ticks_to_ns(s64 ticks)
-{
- return ticks * SJA1105_TICK_NS;
-}
-
/* Calculate the first base_time in the future that satisfies this
* relationship:
*
@@ -77,10 +62,6 @@ struct sja1105_ptp_data {
struct timer_list extts_timer;
/* Used only on SJA1105 to reconstruct partial timestamps */
struct sk_buff_head skb_rxtstamp_queue;
- /* Used on SJA1110 where meta frames are generated only for
- * 2-step TX timestamps
- */
- struct sk_buff_head skb_txtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index d60a530d0272..d3c9ad6d39d4 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 7a422ef4deb6..baba204ad62f 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#include "sja1105_static_config.h"
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index bce0f5c03d0b..6a372d5f22ae 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#ifndef _SJA1105_STATIC_CONFIG_H
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index ec7b65daec20..f5dca6a9b0f9 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
*/
#include <net/tc_act/tc_gate.h>
#include <linux/dsa/8021q.h>
@@ -394,7 +394,8 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
- u16 vid = dsa_8021q_rx_vid(priv->ds, port);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ u16 vid = dsa_tag_8021q_rx_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
@@ -494,13 +495,15 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
bool append)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int rc;
- if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
- } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
@@ -568,6 +571,8 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
u32 num_entries, struct action_gate_entry *entries)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int ipv = -1;
int i, rc;
s32 rem;
@@ -592,11 +597,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
return -ERANGE;
}
- if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
- } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.h b/drivers/net/dsa/sja1105/sja1105_vl.h
index 173d78963fed..51fba0dce91a 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.h
+++ b/drivers/net/dsa/sja1105/sja1105_vl.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
*/
#ifndef _SJA1105_VL_H
#define _SJA1105_VL_H
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 19ce4aa0973b..a4b1447ff055 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1225,6 +1225,12 @@ int vsc73xx_remove(struct vsc73xx *vsc)
}
EXPORT_SYMBOL(vsc73xx_remove);
+void vsc73xx_shutdown(struct vsc73xx *vsc)
+{
+ dsa_switch_shutdown(vsc->ds);
+}
+EXPORT_SYMBOL(vsc73xx_shutdown);
+
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index 2a57f337b2a2..fe4b154a0a57 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -116,7 +116,26 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
{
struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
- return vsc73xx_remove(&vsc_platform->vsc);
+ if (!vsc_platform)
+ return 0;
+
+ vsc73xx_remove(&vsc_platform->vsc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void vsc73xx_platform_shutdown(struct platform_device *pdev)
+{
+ struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
+
+ if (!vsc_platform)
+ return;
+
+ vsc73xx_shutdown(&vsc_platform->vsc);
+
+ platform_set_drvdata(pdev, NULL);
}
static const struct vsc73xx_ops vsc73xx_platform_ops = {
@@ -144,6 +163,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static struct platform_driver vsc73xx_platform_driver = {
.probe = vsc73xx_platform_probe,
.remove = vsc73xx_platform_remove,
+ .shutdown = vsc73xx_platform_shutdown,
.driver = {
.name = "vsc73xx-platform",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 81eca4a5781d..645398901e05 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -163,7 +163,26 @@ static int vsc73xx_spi_remove(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
- return vsc73xx_remove(&vsc_spi->vsc);
+ if (!vsc_spi)
+ return 0;
+
+ vsc73xx_remove(&vsc_spi->vsc);
+
+ spi_set_drvdata(spi, NULL);
+
+ return 0;
+}
+
+static void vsc73xx_spi_shutdown(struct spi_device *spi)
+{
+ struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
+
+ if (!vsc_spi)
+ return;
+
+ vsc73xx_shutdown(&vsc_spi->vsc);
+
+ spi_set_drvdata(spi, NULL);
}
static const struct vsc73xx_ops vsc73xx_spi_ops = {
@@ -191,6 +210,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove,
+ .shutdown = vsc73xx_spi_shutdown,
.driver = {
.name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 7478f8d4e0a9..30b951504e65 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -27,3 +27,4 @@ struct vsc73xx_ops {
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
int vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_shutdown(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 130abb0f1438..469420941054 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -822,6 +822,12 @@ void xrs700x_switch_remove(struct xrs700x *priv)
}
EXPORT_SYMBOL(xrs700x_switch_remove);
+void xrs700x_switch_shutdown(struct xrs700x *priv)
+{
+ dsa_switch_shutdown(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_shutdown);
+
MODULE_AUTHOR("George McCollister <[email protected]>");
MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x.h b/drivers/net/dsa/xrs700x/xrs700x.h
index ff62cf61b091..4d58257471d2 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.h
+++ b/drivers/net/dsa/xrs700x/xrs700x.h
@@ -40,3 +40,4 @@ struct xrs700x {
struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv);
int xrs700x_switch_register(struct xrs700x *priv);
void xrs700x_switch_remove(struct xrs700x *priv);
+void xrs700x_switch_shutdown(struct xrs700x *priv);
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 489d9385b4f0..6deae388a0d6 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -109,11 +109,28 @@ static int xrs700x_i2c_remove(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
+ if (!priv)
+ return 0;
+
xrs700x_switch_remove(priv);
+ i2c_set_clientdata(i2c, NULL);
+
return 0;
}
+static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
+{
+ struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_shutdown(priv);
+
+ i2c_set_clientdata(i2c, NULL);
+}
+
static const struct i2c_device_id xrs700x_i2c_id[] = {
{ "xrs700x-switch", 0 },
{},
@@ -137,6 +154,7 @@ static struct i2c_driver xrs700x_i2c_driver = {
},
.probe = xrs700x_i2c_probe,
.remove = xrs700x_i2c_remove,
+ .shutdown = xrs700x_i2c_shutdown,
.id_table = xrs700x_i2c_id,
};
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index 44f58bee04a4..d01cf1073d49 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -136,7 +136,24 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
{
struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+ if (!priv)
+ return;
+
xrs700x_switch_remove(priv);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_shutdown(priv);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id __maybe_unused xrs700x_mdio_dt_ids[] = {
@@ -155,6 +172,7 @@ static struct mdio_driver xrs700x_mdio_driver = {
},
.probe = xrs700x_mdio_probe,
.remove = xrs700x_mdio_remove,
+ .shutdown = xrs700x_mdio_shutdown,
};
mdio_module_driver(xrs700x_mdio_driver);
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 87c906e744fb..846fa3af4504 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -270,7 +270,7 @@ static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr,
{
struct el3_private *lp = netdev_priv(dev);
- memcpy(dev->dev_addr, phys_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, (u8 *)phys_addr);
dev->base_addr = ioaddr;
dev->irq = irq;
dev->if_port = if_port;
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 6f0ea2facea9..1d124b0f65e7 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -567,6 +567,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
{
struct corkscrew_private *vp = netdev_priv(dev);
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ __be16 addr[ETH_ALEN / 2];
int i;
int irq;
@@ -619,7 +620,6 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
for (i = 0; i < 0x18; i++) {
- __be16 *phys_addr = (__be16 *) dev->dev_addr;
int timer;
outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
/* Pause for at least 162 us. for the read to take place. */
@@ -631,8 +631,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
eeprom[i] = inw(ioaddr + Wn0EepromData);
checksum ^= eeprom[i];
if (i < 3)
- phys_addr[i] = htons(eeprom[i]);
+ addr[i] = htons(eeprom[i]);
}
+ eth_hw_addr_set(dev, (u8 *)addr);
checksum = (checksum ^ (checksum >> 8)) & 0xff;
if (checksum != 0x00)
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index dd4d3c48b98d..dc3b7c960611 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -305,15 +305,13 @@ static int tc574_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct el3_private *lp = netdev_priv(dev);
int ret, i, j;
+ __be16 addr[ETH_ALEN / 2];
unsigned int ioaddr;
- __be16 *phys_addr;
char *cardname;
__u32 config;
u8 *buf;
size_t len;
- phys_addr = (__be16 *)dev->dev_addr;
-
dev_dbg(&link->dev, "3c574_config()\n");
link->io_lines = 16;
@@ -347,19 +345,20 @@ static int tc574_config(struct pcmcia_device *link)
len = pcmcia_get_tuple(link, 0x88, &buf);
if (buf && len >= 6) {
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i * 2]));
+ addr[i] = htons(le16_to_cpu(buf[i * 2]));
kfree(buf);
} else {
kfree(buf); /* 0 < len < 6 */
EL3WINDOW(0);
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
- if (phys_addr[0] == htons(0x6060)) {
+ addr[i] = htons(read_eeprom(ioaddr, i + 10));
+ if (addr[0] == htons(0x6060)) {
pr_notice("IO port conflict at 0x%03lx-0x%03lx\n",
dev->base_addr, dev->base_addr+15);
goto failed;
}
}
+ eth_hw_addr_set(dev, (u8 *)addr);
if (link->prod_id[1])
cardname = link->prod_id[1];
else
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 09816e84314d..4673bc1604e7 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -237,8 +237,8 @@ static void tc589_detach(struct pcmcia_device *link)
static int tc589_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- __be16 *phys_addr;
int ret, i, j, multi = 0, fifo;
+ __be16 addr[ETH_ALEN / 2];
unsigned int ioaddr;
static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
@@ -246,7 +246,6 @@ static int tc589_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c589_config\n");
- phys_addr = (__be16 *)dev->dev_addr;
/* Is this a 3c562? */
if (link->manf_id != MANFID_3COM)
dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
@@ -285,18 +284,19 @@ static int tc589_config(struct pcmcia_device *link)
len = pcmcia_get_tuple(link, 0x88, &buf);
if (buf && len >= 6) {
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+ addr[i] = htons(le16_to_cpu(buf[i*2]));
kfree(buf);
} else {
kfree(buf); /* 0 < len < 6 */
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i));
- if (phys_addr[0] == htons(0x6060)) {
+ addr[i] = htons(read_eeprom(ioaddr, i));
+ if (addr[0] == htons(0x6060)) {
dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
dev->base_addr, dev->base_addr+15);
goto failed;
}
}
+ eth_hw_addr_set(dev, (u8 *)addr);
/* The address and resource configuration register aren't loaded from
* the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 7b0ae9efc004..ccf07667aa5e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1091,6 +1091,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
struct vortex_private *vp;
int option;
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ __be16 addr[ETH_ALEN / 2];
int i, step;
struct net_device *dev;
static int printed_version;
@@ -1284,7 +1285,8 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
- ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ addr[i] = htons(eeprom[i + 10]);
+ eth_hw_addr_set(dev, (u8 *)addr);
if (print_info)
pr_cont(" %pM", dev->dev_addr);
/* Unfortunately an all zero eeprom passes the checksum and this
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index da1ae37a9d73..991ad953aa79 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -320,8 +320,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
pr_cont(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 6c6bdd5913ec..1f8acbba5b6b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -716,7 +716,7 @@ static int ax_init_dev(struct net_device *dev)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
- memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
+ eth_hw_addr_set(dev, SA_prom);
}
#ifdef CONFIG_AX88796_93CX6
@@ -733,7 +733,7 @@ static int ax_init_dev(struct net_device *dev)
(__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1);
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, mac_addr);
}
#endif
if (ax->plat->wordlength == 2) {
@@ -748,16 +748,18 @@ static int ax_init_dev(struct net_device *dev)
/* load the mac-address from the device */
if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+ u8 addr[ETH_ALEN];
+
ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ei_local->mem + E8390_CMD); /* 0x61 */
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] =
- ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
+ addr[i] = ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
+ eth_hw_addr_set(dev, addr);
}
if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, ax->plat->mac_addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 3c370e686ec3..3aef959fc25b 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -187,6 +187,7 @@ static int get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
int i, j;
/* This is based on drivers/net/ethernet/8390/ne.c */
@@ -220,9 +221,11 @@ static int get_prom(struct pcmcia_device *link)
for (i = 0; i < 6; i += 2) {
j = inw(ioaddr + AXNET_DATAPORT);
- dev->dev_addr[i] = j & 0xff;
- dev->dev_addr[i+1] = j >> 8;
+ addr[i] = j & 0xff;
+ addr[i+1] = j >> 8;
}
+ eth_hw_addr_set(dev, addr);
+
return 1;
} /* get_prom */
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 4ad8031ab669..e320cccba61a 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -374,8 +374,7 @@ static int mcf8390_init(struct net_device *dev)
if (ret)
return ret;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 9afc712f5948..0a9118b8be0c 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -500,9 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
dev->base_addr = ioaddr;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = SA_prom[i];
- }
+ eth_hw_addr_set(dev, SA_prom);
pr_cont("%pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index d6715008e04d..6a0a2039600a 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -390,7 +390,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
dev->ethtool_ops = &ne2k_pci_ethtool_ops;
NS8390_init(dev, 0);
- memcpy(dev->dev_addr, SA_prom, dev->addr_len);
+ eth_hw_addr_set(dev, SA_prom);
i = register_netdev(dev);
if (i)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 96ad72abd373..0f07fe03da98 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -278,6 +278,7 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
u_char __iomem *base, *virt;
+ u8 addr[ETH_ALEN];
int i, j;
/* Allocate a small memory window */
@@ -302,7 +303,8 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
(readb(base+2) == hw_info[i].a1) &&
(readb(base+4) == hw_info[i].a2)) {
for (j = 0; j < 6; j++)
- dev->dev_addr[j] = readb(base + (j<<1));
+ addr[j] = readb(base + (j<<1));
+ eth_hw_addr_set(dev, addr);
break;
}
}
@@ -324,6 +326,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
u_char prom[32];
int i, j;
@@ -362,7 +365,8 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
}
if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) {
for (j = 0; j < 6; j++)
- dev->dev_addr[j] = prom[j<<1];
+ addr[j] = prom[j<<1];
+ eth_hw_addr_set(dev, addr);
return (i < NR_INFO) ? hw_info+i : &default_info;
}
return NULL;
@@ -377,6 +381,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
static struct hw_info *get_dl10019(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ u8 addr[ETH_ALEN];
int i;
u_char sum;
@@ -385,7 +390,8 @@ static struct hw_info *get_dl10019(struct pcmcia_device *link)
if (sum != 0xff)
return NULL;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ eth_hw_addr_set(dev, addr);
i = inb(dev->base_addr + 0x1f);
return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info;
}
@@ -400,6 +406,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
int i, j;
/* Not much of a test, but the alternatives are messy */
@@ -413,9 +420,10 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
for (i = 0; i < 6; i += 2) {
j = inw(ioaddr + PCNET_DATAPORT);
- dev->dev_addr[i] = j & 0xff;
- dev->dev_addr[i+1] = j >> 8;
+ addr[i] = j & 0xff;
+ addr[i+1] = j >> 8;
}
+ eth_hw_addr_set(dev, addr);
return NULL;
}
@@ -430,6 +438,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
static struct hw_info *get_hwired(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ u8 addr[ETH_ALEN];
int i;
for (i = 0; i < 6; i++)
@@ -438,7 +447,8 @@ static struct hw_info *get_hwired(struct pcmcia_device *link)
return NULL;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = hw_addr[i];
+ addr[i] = hw_addr[i];
+ eth_hw_addr_set(dev, addr);
return &default_info;
} /* get_hwired */
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index fbbd7f22c142..bd89ca8a92df 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -104,8 +104,8 @@ STNIC_WRITE (int reg, byte val)
static int __init stnic_probe(void)
{
struct net_device *dev;
- int i, err;
struct ei_device *ei_local;
+ int err;
/* If we are not running on a SolutionEngine, give up now */
if (! MACH_SE)
@@ -119,8 +119,7 @@ static int __init stnic_probe(void)
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_get_node_addr (stnic_eadr);
#endif
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = stnic_eadr[i];
+ eth_hw_addr_set(dev, stnic_eadr);
/* Set the base address to point to the NIC, not the "real" base! */
dev->base_addr = 0x1000;
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 35a500a21521..e8b4fe813a08 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -364,8 +364,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
if (i)
return i;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index d796684ec9ca..4601b38f532a 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,6 +33,7 @@ source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
+source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
@@ -100,6 +101,7 @@ config JME
config KORINA
tristate "Korina (IDT RC32434) Ethernet support"
depends on MIKROTIK_RB532 || COMPILE_TEST
+ select CRC32
select MII
help
If you have a Mikrotik RouterBoard 500 or IDT RC32434
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index aaa5078cd7d1..fdd8c6c17451 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
+obj-$(CONFIG_NET_VENDOR_ASIX) += asix/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index c4ecf4fcadf8..1cfdd01b4c2e 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -342,7 +342,7 @@ static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv)
static void owl_emac_set_hw_mac_addr(struct net_device *netdev)
{
struct owl_emac_priv *priv = netdev_priv(netdev);
- u8 *mac_addr = netdev->dev_addr;
+ const u8 *mac_addr = netdev->dev_addr;
u32 addr_high, addr_low;
addr_high = mac_addr[0] << 8 | mac_addr[1];
@@ -1173,7 +1173,7 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, skaddr->sa_data);
owl_emac_set_hw_mac_addr(netdev);
return owl_emac_setup_frame_xmit(netdev_priv(netdev));
@@ -1385,7 +1385,7 @@ static void owl_emac_get_mac_addr(struct net_device *netdev)
struct device *dev = netdev->dev.parent;
int ret;
- ret = eth_platform_get_mac_address(dev, netdev->dev_addr);
+ ret = platform_get_ethdev_address(dev, netdev);
if (!ret && is_valid_ether_addr(netdev->dev_addr))
return;
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index e0f6cc910bd2..c6982f7caf9b 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -641,6 +641,7 @@ static int starfire_init_one(struct pci_dev *pdev,
struct netdev_private *np;
int i, irq, chip_idx = ent->driver_data;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
long ioaddr;
void __iomem *base;
int drv_flags, io_size;
@@ -696,7 +697,8 @@ static int starfire_init_one(struct pci_dev *pdev,
/* Serial EEPROM reads are hidden by the hardware. */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
+ addr[i] = readb(base + EEPROMCtrl + 20 - i);
+ eth_hw_addr_set(dev, addr);
#if ! defined(final_version) /* Dump the EEPROM contents during development. */
if (debug > 4)
@@ -955,7 +957,7 @@ static int netdev_open(struct net_device *dev)
writew(0, ioaddr + PerfFilterTable + 4);
writew(0, ioaddr + PerfFilterTable + 8);
for (i = 1; i < 16; i++) {
- __be16 *eaddrs = (__be16 *)dev->dev_addr;
+ const __be16 *eaddrs = (const __be16 *)dev->dev_addr;
void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
@@ -1787,14 +1789,14 @@ static void set_rx_mode(struct net_device *dev)
} else if (netdev_mc_count(dev) <= 14) {
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
- __be16 *eaddrs;
+ const __be16 *eaddrs;
netdev_for_each_mc_addr(ha, dev) {
eaddrs = (__be16 *) ha->addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
}
- eaddrs = (__be16 *)dev->dev_addr;
+ eaddrs = (const __be16 *)dev->dev_addr;
i = netdev_mc_count(dev) + 2;
while (i++ < 16) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
@@ -1805,7 +1807,7 @@ static void set_rx_mode(struct net_device *dev)
} else {
/* Must use a multicast hash table. */
void __iomem *filter_addr;
- __be16 *eaddrs;
+ const __be16 *eaddrs;
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
@@ -1819,7 +1821,7 @@ static void set_rx_mode(struct net_device *dev)
}
/* Clear the perfect filter list, skip first two entries. */
filter_addr = ioaddr + PerfFilterTable + 2 * 16;
- eaddrs = (__be16 *)dev->dev_addr;
+ eaddrs = (const __be16 *)dev->dev_addr;
for (i = 2; i < 16; i++) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index c560ad06f0be..447dc64a17e5 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1025,7 +1025,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
dev->dev_addr[4] << 8 | dev->dev_addr[5]);
@@ -1346,6 +1346,7 @@ static int greth_of_probe(struct platform_device *ofdev)
int i;
int err;
int tmp;
+ u8 addr[ETH_ALEN];
unsigned long timeout;
dev = alloc_etherdev(sizeof(struct greth_private));
@@ -1449,8 +1450,6 @@ static int greth_of_probe(struct platform_device *ofdev)
break;
}
if (i == 6) {
- u8 addr[ETH_ALEN];
-
err = of_get_mac_address(ofdev->dev.of_node, addr);
if (!err) {
for (i = 0; i < 6; i++)
@@ -1464,7 +1463,8 @@ static int greth_of_probe(struct platform_device *ofdev)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 920633161174..f4edc616388c 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3863,7 +3863,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
et131x_init_send(adapter);
et131x_hwaddr_init(adapter);
- ether_addr_copy(netdev->dev_addr, adapter->addr);
+ eth_hw_addr_set(netdev, adapter->addr);
/* Init the device with the new settings */
et131x_adapter_setup(adapter);
@@ -3966,7 +3966,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
- ether_addr_copy(netdev->dev_addr, adapter->addr);
+ eth_hw_addr_set(netdev, adapter->addr);
rc = -ENOMEM;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 696517eae77f..1fc9a1cd3ef8 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1008,7 +1008,7 @@ static void slic_set_link_autoneg(struct slic_device *sdev)
static void slic_set_mac_address(struct slic_device *sdev)
{
- u8 *addr = sdev->netdev->dev_addr;
+ const u8 *addr = sdev->netdev->dev_addr;
u32 val;
val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24;
@@ -1660,7 +1660,7 @@ static int slic_read_eeprom(struct slic_device *sdev)
goto free_eeprom;
}
/* set mac address */
- ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]);
+ eth_hw_addr_set(sdev->netdev, mac[devfn]);
free_eeprom:
dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 037baea1c738..800ee022388f 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -356,7 +356,7 @@ static int emac_set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev->
dev_addr[2], db->membase + EMAC_MAC_A1_REG);
@@ -852,7 +852,7 @@ static int emac_probe(struct platform_device *pdev)
}
/* Read MAC-address from DT */
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
/* if the MAC address is invalid get a random one */
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 9dc12b13061f..732da15a3827 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -869,6 +869,7 @@ static int ace_init(struct net_device *dev)
int board_idx, ecode = 0;
short i;
unsigned char cache_size;
+ u8 addr[ETH_ALEN];
ap = netdev_priv(dev);
regs = ap->regs;
@@ -988,12 +989,13 @@ static int ace_init(struct net_device *dev)
writel(mac1, &regs->MacAddrHi);
writel(mac2, &regs->MacAddrLo);
- dev->dev_addr[0] = (mac1 >> 8) & 0xff;
- dev->dev_addr[1] = mac1 & 0xff;
- dev->dev_addr[2] = (mac2 >> 24) & 0xff;
- dev->dev_addr[3] = (mac2 >> 16) & 0xff;
- dev->dev_addr[4] = (mac2 >> 8) & 0xff;
- dev->dev_addr[5] = mac2 & 0xff;
+ addr[0] = (mac1 >> 8) & 0xff;
+ addr[1] = mac1 & 0xff;
+ addr[2] = (mac2 >> 24) & 0xff;
+ addr[3] = (mac2 >> 16) & 0xff;
+ addr[4] = (mac2 >> 8) & 0xff;
+ addr[5] = mac2 & 0xff;
+ eth_hw_addr_set(dev, addr);
printk("MAC: %pM\n", dev->dev_addr);
@@ -2712,15 +2714,15 @@ static int ace_set_mac_addr(struct net_device *dev, void *p)
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct sockaddr *addr=p;
- u8 *da;
+ const u8 *da;
struct cmd cmd;
if(netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
- da = (u8 *)dev->dev_addr;
+ da = (const u8 *)dev->dev_addr;
writel(da[0] << 8 | da[1], &regs->MacAddrHi);
writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1c00d719e5d7..d75d95a97dd9 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -849,7 +849,7 @@ static int init_phy(struct net_device *dev)
return 0;
}
-static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
+static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
u32 lsb;
@@ -1524,7 +1524,7 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
/* get default MAC address from device tree */
- ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr);
+ ret = of_get_ethdev_address(pdev->dev.of_node, ndev);
if (ret)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0e43000614ab..7d5d885d85d5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4073,7 +4073,7 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
} else {
ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
- ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ eth_hw_addr_set(netdev, adapter->mac_addr);
}
/* Set offload features */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 4786f0504691..899c8a2a34b6 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -168,7 +168,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
+ depends on (OF_ADDRESS || ACPI || PCI) && HAS_IOMEM
depends on X86 || ARM64 || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
select BITREVERSE
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 92e4246dc359..9421afb950f7 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1500,7 +1500,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
int i;
struct sockaddr *addr = p;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */
for (i = 0; i < ETH_ALEN; i++)
@@ -1743,6 +1743,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
unsigned long reg_addr, reg_len;
struct amd8111e_priv *lp;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
@@ -1809,7 +1810,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Initializing MAC address */
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = readb(lp->mmio + PADR + i);
+ addr[i] = readb(lp->mmio + PADR + i);
+ eth_hw_addr_set(dev, addr);
/* Setting user defined parametrs */
lp->ext_phy_option = speed_duplex[card_idx];
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 9d2f49fd945e..9c7d9690d00c 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -582,7 +582,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
switch( lp->cardtype ) {
case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
- memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
case NEW_RIEBL:
lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
@@ -1123,7 +1123,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
return -EIO;
}
- memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
+ eth_hw_addr_set(dev, saddr->sa_data);
for( i = 0; i < 6; i++ )
MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 9c1636222b99..c6f003975621 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1178,7 +1178,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy1_search_mac0 = 1;
} else {
if (is_valid_ether_addr(pd->mac)) {
- memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac);
} else {
/* Set a random MAC since no valid provided by platform_data. */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 4019cab87505..30ee5329bd7c 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -529,7 +529,8 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
mace_init
Resets the MACE chip.
---------------------------------------------------------------------------- */
-static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
+static int mace_init(mace_private *lp, unsigned int ioaddr,
+ const char *enet_addr)
{
int i;
int ct = 0;
@@ -635,7 +636,7 @@ static int nmclan_config(struct pcmcia_device *link)
kfree(buf);
goto failed;
}
- memcpy(dev->dev_addr, buf, ETH_ALEN);
+ eth_hw_addr_set(dev, buf);
kfree(buf);
/* Verify configuration by reading the MACE ID. */
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 70d76fdb9f56..f5c50ff377ff 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1595,6 +1595,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
struct net_device *dev;
const struct pcnet32_access *a = NULL;
u8 promaddr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
int ret = -ENODEV;
/* reset the chip */
@@ -1760,9 +1761,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
unsigned int val;
val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
/* There may be endianness issues here. */
- dev->dev_addr[2 * i] = val & 0x0ff;
- dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+ addr[2 * i] = val & 0x0ff;
+ addr[2 * i + 1] = (val >> 8) & 0x0ff;
}
+ eth_hw_addr_set(dev, addr);
/* read PROM address and compare with CSR address */
for (i = 0; i < ETH_ALEN; i++)
@@ -1775,13 +1777,16 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
pr_cont(" warning: CSR address invalid,\n");
pr_info(" using instead PROM address of");
}
- memcpy(dev->dev_addr, promaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, promaddr);
}
}
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->dev_addr))
- eth_zero_addr(dev->dev_addr);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(dev, zero_addr);
+ }
if (pcnet32_debug & NETIF_MSG_PROBE) {
pr_cont(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 4a845bc071b2..007bd7787291 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -305,7 +305,6 @@ static int __init lance_probe( struct net_device *dev)
unsigned long ioaddr;
struct lance_private *lp;
- int i;
static int did_version;
volatile unsigned short *ioaddr_probe;
unsigned short tmp1, tmp2;
@@ -373,8 +372,7 @@ static int __init lance_probe( struct net_device *dev)
dev->irq);
/* copy in the ethernet address from the prom */
- for(i = 0; i < 6 ; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* tell the card it's ether address, bytes swapped */
MEM->init.hwaddr[0] = dev->dev_addr[1];
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ddece276ae23..22d609563af8 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1301,7 +1301,6 @@ static int sparc_lance_probe_one(struct platform_device *op,
struct device_node *dp = op->dev.of_node;
struct lance_private *lp;
struct net_device *dev;
- int i;
dev = alloc_etherdev(sizeof(struct lance_private) + 8);
if (!dev)
@@ -1315,8 +1314,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
* will copy the address in the device structure to the lance
* initialization block.
*/
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* Get the IO region */
lp->lregs = of_ioremap(&op->resource[0], 0,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index d5fd49dd25f3..3936543a74d8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1080,7 +1080,7 @@ static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
return 0;
}
-static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr)
{
unsigned int mac_addr_hi, mac_addr_lo;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 17a585adfb49..30d24d19f40d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1912,10 +1912,8 @@ static int xgbe_close(struct net_device *netdev)
clk_disable_unprepare(pdata->ptpclk);
clk_disable_unprepare(pdata->sysclk);
- flush_workqueue(pdata->an_workqueue);
destroy_workqueue(pdata->an_workqueue);
- flush_workqueue(pdata->dev_workqueue);
destroy_workqueue(pdata->dev_workqueue);
set_bit(XGBE_DOWN, &pdata->dev_state);
@@ -2016,7 +2014,7 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, saddr->sa_data);
hw_if->set_mac_address(pdata, netdev->dev_addr);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index a218dc6f2edd..0e8698928e4d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -267,7 +267,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
- memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->mac_addr);
/* Initialize ECC timestamps */
pdata->tx_sec_period = jiffies;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 3305979a9f7c..607a2c90513b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -729,7 +729,7 @@ struct xgbe_ext_stats {
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
- int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+ int (*set_mac_address)(struct xgbe_prv_data *, const u8 *addr);
int (*config_rx_mode)(struct xgbe_prv_data *);
int (*enable_rx_csum)(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
index 2da979e4fad1..6423e22e05b2 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mac.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.c
@@ -65,7 +65,7 @@ void xge_mac_set_speed(struct xge_pdata *pdata)
void xge_mac_set_station_addr(struct xge_pdata *pdata)
{
- u8 *dev_addr = pdata->ndev->dev_addr;
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 80399c8980bd..d022b6db9e06 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -36,7 +36,7 @@ static int xge_get_resources(struct xge_pdata *pdata)
return -ENOMEM;
}
- if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ if (device_get_ethdev_address(dev, ndev))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 5f657879134e..e641dbbea1e2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -378,8 +378,8 @@ u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = pdata->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 5f1fc6582d74..220dc42af31a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1731,7 +1731,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
xgene_get_port_id_acpi(dev, pdata);
#endif
- if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ if (device_get_ethdev_address(dev, ndev))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f482ced2cadd..72b5e8eb0ec7 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -165,8 +165,8 @@ static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
{
+ const u8 *dev_addr = p->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = p->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 304b5d43f236..86607b79c09f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -207,8 +207,8 @@ static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = pdata->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a989d2df59ad..9a650d1c1bdd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -308,7 +308,7 @@ bmac_init_registers(struct net_device *dev)
{
struct bmac_data *bp = netdev_priv(dev);
volatile unsigned short regValue;
- unsigned short *pWord16;
+ const unsigned short *pWord16;
int i;
/* XXDEBUG(("bmac: enter init_registers\n")); */
@@ -371,7 +371,7 @@ bmac_init_registers(struct net_device *dev)
bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
- pWord16 = (unsigned short *)dev->dev_addr;
+ pWord16 = (const unsigned short *)dev->dev_addr;
bmwrite(dev, MADD0, *pWord16++);
bmwrite(dev, MADD1, *pWord16++);
bmwrite(dev, MADD2, *pWord16);
@@ -521,19 +521,16 @@ static int bmac_resume(struct macio_dev *mdev)
static int bmac_set_address(struct net_device *dev, void *addr)
{
struct bmac_data *bp = netdev_priv(dev);
- unsigned char *p = addr;
- unsigned short *pWord16;
+ const unsigned short *pWord16;
unsigned long flags;
- int i;
XXDEBUG(("bmac: enter set_address\n"));
spin_lock_irqsave(&bp->lock, flags);
- for (i = 0; i < 6; ++i) {
- dev->dev_addr[i] = p[i];
- }
+ eth_hw_addr_set(dev, addr);
+
/* load up the hardware address */
- pWord16 = (unsigned short *)dev->dev_addr;
+ pWord16 = (const unsigned short *)dev->dev_addr;
bmwrite(dev, MADD0, *pWord16++);
bmwrite(dev, MADD1, *pWord16++);
bmwrite(dev, MADD2, *pWord16);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bed481816ea3..062a300a566a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -217,7 +217,7 @@ struct aq_hw_ops {
int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
- int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_set_mac_address)(struct aq_hw_s *self, const u8 *mac_addr);
int (*hw_soft_reset)(struct aq_hw_s *self);
@@ -226,7 +226,7 @@ struct aq_hw_ops {
int (*hw_reset)(struct aq_hw_s *self);
- int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_init)(struct aq_hw_s *self, const u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
@@ -373,7 +373,7 @@ struct aq_fw_ops {
int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac);
+ const u8 *mac);
int (*send_fw_request)(struct aq_hw_s *self,
const struct hw_fw_request_iface *fw_req,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 4a6dfac857ca..02058fe79f52 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -35,7 +35,7 @@ static int aq_apply_macsec_cfg(struct aq_nic_s *nic);
static int aq_apply_secy_cfg(struct aq_nic_s *nic,
const struct macsec_secy *secy);
-static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac)
+static void aq_ether_addr_to_mac(u32 mac[2], const unsigned char *emac)
{
u32 tmp[2] = { 0 };
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 6c049864dac0..1acf544afeb4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -300,6 +300,7 @@ static bool aq_nic_is_valid_ether_addr(const u8 *addr)
int aq_nic_ndev_register(struct aq_nic_s *self)
{
+ u8 addr[ETH_ALEN];
int err = 0;
if (!self->ndev) {
@@ -316,12 +317,13 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
#endif
mutex_lock(&self->fwreq_mutex);
- err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
- self->ndev->dev_addr);
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
mutex_unlock(&self->fwreq_mutex);
if (err)
goto err_exit;
+ eth_hw_addr_set(self->ndev, addr);
+
if (!is_valid_ether_addr(self->ndev->dev_addr) ||
!aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
netdev_warn(self->ndev, "MAC is invalid, will use random.");
@@ -332,7 +334,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
{
static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
- ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
+ eth_hw_addr_set(self->ndev, mac_addr_permanent);
}
#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index dee9ff74d6d6..d4b1976ee69b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -413,13 +413,13 @@ static int atl_resume_common(struct device *dev, bool deep)
if (deep) {
/* Reinitialize Nic/Vecs objects */
aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ }
+ if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
goto err_exit;
- }
- if (netif_running(nic->ndev)) {
ret = aq_nic_start(nic);
if (ret)
goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 611875ef2cd1..4625ccb79499 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -322,7 +322,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -348,7 +348,7 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 9f1b15077e7d..d875ce3ec759 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -533,7 +533,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -558,7 +558,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index d8db972113ec..5298846dd9f7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -58,7 +58,7 @@ int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
-int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr);
int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc);
int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 404cbf60d3f2..fc0e66006644 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -944,7 +944,7 @@ u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
}
static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
- u8 *mac)
+ const u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
@@ -987,7 +987,7 @@ err_exit:
}
static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+ const u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index ee0c22d04935..eac631c45c56 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -358,7 +358,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
return 0;
}
-static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_set_wol(struct aq_hw_s *self, const u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
struct offload_info *info = NULL;
@@ -404,7 +404,7 @@ err_exit:
}
static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+ const u8 *mac)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 92f64048bf69..c98708bb044c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -516,7 +516,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl2_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 37a41773dd43..0a67612af228 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -21,11 +21,12 @@ config ARC_EMAC_CORE
depends on ARC || ARCH_ROCKCHIP || COMPILE_TEST
select MII
select PHYLIB
+ select CRC32
config ARC_EMAC
tristate "ARC EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET
+ depends on OF_IRQ
depends on ARC || COMPILE_TEST
help
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
@@ -35,7 +36,7 @@ config ARC_EMAC
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && REGULATOR
+ depends on OF_IRQ && REGULATOR
depends on ARCH_ROCKCHIP || COMPILE_TEST
help
Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 38c288ec9059..c642c3d3e600 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -773,7 +773,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
arc_emac_set_address_internal(ndev);
@@ -941,7 +941,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
}
/* Get MAC address from device tree */
- err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, ndev);
if (err)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/asix/Kconfig b/drivers/net/ethernet/asix/Kconfig
new file mode 100644
index 000000000000..eed02453314c
--- /dev/null
+++ b/drivers/net/ethernet/asix/Kconfig
@@ -0,0 +1,35 @@
+#
+# Asix network device configuration
+#
+
+config NET_VENDOR_ASIX
+ bool "Asix devices"
+ default y
+ help
+ If you have a network (Ethernet, non-USB, not NE2000 compatible)
+ interface based on a chip from ASIX, say Y.
+
+if NET_VENDOR_ASIX
+
+config SPI_AX88796C
+ tristate "Asix AX88796C-SPI support"
+ select PHYLIB
+ depends on SPI
+ depends on GPIOLIB
+ help
+ Say Y here if you intend to use ASIX AX88796C attached in SPI mode.
+
+config SPI_AX88796C_COMPRESSION
+ bool "SPI transfer compression"
+ default n
+ depends on SPI_AX88796C
+ help
+ Say Y here to enable SPI transfer compression. It saves up
+ to 24 dummy cycles during each transfer which may noticeably
+ speed up short transfers. This sets the default value that is
+ inherited by network interfaces during probe. It can be
+ changed at run time via spi-compression ethtool tunable.
+
+ If unsure say N.
+
+endif # NET_VENDOR_ASIX
diff --git a/drivers/net/ethernet/asix/Makefile b/drivers/net/ethernet/asix/Makefile
new file mode 100644
index 000000000000..0bfbbb042634
--- /dev/null
+++ b/drivers/net/ethernet/asix/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Asix network device drivers.
+#
+
+obj-$(CONFIG_SPI_AX88796C) += ax88796c.o
+ax88796c-y := ax88796c_main.o ax88796c_ioctl.o ax88796c_spi.o
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.c b/drivers/net/ethernet/asix/ax88796c_ioctl.c
new file mode 100644
index 000000000000..916ae380a004
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#include "ax88796c_main.h"
+#include "ax88796c_ioctl.h"
+
+static const char ax88796c_priv_flag_names[][ETH_GSTRING_LEN] = {
+ "SPICompression",
+};
+
+static void
+ax88796c_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ /* Inherit standard device info */
+ strncpy(info->driver, DRV_NAME, sizeof(info->driver));
+}
+
+static u32 ax88796c_get_msglevel(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ return ax_local->msg_enable;
+}
+
+static void ax88796c_set_msglevel(struct net_device *ndev, u32 level)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ ax_local->msg_enable = level;
+}
+
+static void
+ax88796c_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ pause->tx_pause = !!(ax_local->flowctrl & AX_FC_TX);
+ pause->rx_pause = !!(ax_local->flowctrl & AX_FC_RX);
+ pause->autoneg = (ax_local->flowctrl & AX_FC_ANEG) ?
+ AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
+}
+
+static int
+ax88796c_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int fc;
+
+ /* The following logic comes from phylink_ethtool_set_pauseparam() */
+ fc = pause->tx_pause ? AX_FC_TX : 0;
+ fc |= pause->rx_pause ? AX_FC_RX : 0;
+ fc |= pause->autoneg ? AX_FC_ANEG : 0;
+
+ ax_local->flowctrl = fc;
+
+ if (pause->autoneg) {
+ phy_set_asym_pause(ax_local->phydev, pause->tx_pause,
+ pause->rx_pause);
+ } else {
+ int maccr = 0;
+
+ phy_set_asym_pause(ax_local->phydev, 0, 0);
+ maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
+ maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
+
+ mutex_lock(&ax_local->spi_lock);
+
+ maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
+ ~(MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
+ AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+ }
+
+ return 0;
+}
+
+static int ax88796c_get_regs_len(struct net_device *ndev)
+{
+ return AX88796C_REGDUMP_LEN + AX88796C_PHY_REGDUMP_LEN;
+}
+
+static void
+ax88796c_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *_p)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int offset, i;
+ u16 *p = _p;
+
+ memset(p, 0, ax88796c_get_regs_len(ndev));
+
+ mutex_lock(&ax_local->spi_lock);
+
+ for (offset = 0; offset < AX88796C_REGDUMP_LEN; offset += 2) {
+ if (!test_bit(offset / 2, ax88796c_no_regs_mask))
+ *p = AX_READ(&ax_local->ax_spi, offset);
+ p++;
+ }
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ for (i = 0; i < AX88796C_PHY_REGDUMP_LEN / 2; i++) {
+ *p = phy_read(ax_local->phydev, i);
+ p++;
+ }
+}
+
+static void
+ax88796c_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, ax88796c_priv_flag_names,
+ sizeof(ax88796c_priv_flag_names));
+ break;
+ }
+}
+
+static int
+ax88796c_get_sset_count(struct net_device *ndev, int stringset)
+{
+ int ret = 0;
+
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ ret = ARRAY_SIZE(ax88796c_priv_flag_names);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ax88796c_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ if (flags & ~AX_PRIV_FLAGS_MASK)
+ return -EOPNOTSUPP;
+
+ if ((ax_local->priv_flags ^ flags) & AX_CAP_COMP)
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ ax_local->priv_flags = flags;
+
+ return 0;
+}
+
+static u32 ax88796c_get_priv_flags(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ return ax_local->priv_flags;
+}
+
+int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc)
+{
+ struct ax88796c_device *ax_local = mdiobus->priv;
+ int ret;
+
+ mutex_lock(&ax_local->spi_lock);
+ AX_WRITE(&ax_local->ax_spi, MDIOCR_RADDR(loc)
+ | MDIOCR_FADDR(phy_id) | MDIOCR_READ, P2_MDIOCR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret != 0),
+ 0, jiffies_to_usecs(HZ / 100), false,
+ &ax_local->ax_spi, P2_MDIOCR);
+ if (!ret)
+ ret = AX_READ(&ax_local->ax_spi, P2_MDIODR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ return ret;
+}
+
+int
+ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val)
+{
+ struct ax88796c_device *ax_local = mdiobus->priv;
+ int ret;
+
+ mutex_lock(&ax_local->spi_lock);
+ AX_WRITE(&ax_local->ax_spi, val, P2_MDIODR);
+
+ AX_WRITE(&ax_local->ax_spi,
+ MDIOCR_RADDR(loc) | MDIOCR_FADDR(phy_id)
+ | MDIOCR_WRITE, P2_MDIOCR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ ((ret & MDIOCR_VALID) != 0), 0,
+ jiffies_to_usecs(HZ / 100), false,
+ &ax_local->ax_spi, P2_MDIOCR);
+ mutex_unlock(&ax_local->spi_lock);
+
+ return ret;
+}
+
+const struct ethtool_ops ax88796c_ethtool_ops = {
+ .get_drvinfo = ax88796c_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ax88796c_get_msglevel,
+ .set_msglevel = ax88796c_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_pauseparam = ax88796c_get_pauseparam,
+ .set_pauseparam = ax88796c_set_pauseparam,
+ .get_regs_len = ax88796c_get_regs_len,
+ .get_regs = ax88796c_get_regs,
+ .get_strings = ax88796c_get_strings,
+ .get_sset_count = ax88796c_get_sset_count,
+ .get_priv_flags = ax88796c_get_priv_flags,
+ .set_priv_flags = ax88796c_set_priv_flags,
+};
+
+int ax88796c_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ int ret;
+
+ ret = phy_mii_ioctl(ndev->phydev, ifr, cmd);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.h b/drivers/net/ethernet/asix/ax88796c_ioctl.h
new file mode 100644
index 000000000000..34d2a7dcc5ef
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_IOCTL_H
+#define _AX88796C_IOCTL_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "ax88796c_main.h"
+
+extern const struct ethtool_ops ax88796c_ethtool_ops;
+
+bool ax88796c_check_power(const struct ax88796c_device *ax_local);
+bool ax88796c_check_power_and_wake(struct ax88796c_device *ax_local);
+void ax88796c_set_power_saving(struct ax88796c_device *ax_local, u8 ps_level);
+int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc);
+int ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val);
+int ax88796c_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+#endif
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
new file mode 100644
index 000000000000..cfc597f72e3d
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -0,0 +1,1163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include "ax88796c_main.h"
+#include "ax88796c_ioctl.h"
+
+#include <linux/bitmap.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/lockdep.h>
+#include <linux/mdio.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+
+static int comp = IS_ENABLED(CONFIG_SPI_AX88796C_COMPRESSION);
+static int msg_enable = NETIF_MSG_PROBE |
+ NETIF_MSG_LINK |
+ NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR;
+
+static const char *no_regs_list = "80018001,e1918001,8001a001,fc0d0000";
+unsigned long ax88796c_no_regs_mask[AX88796C_REGDUMP_LEN / (sizeof(unsigned long) * 8)];
+
+module_param(msg_enable, int, 0444);
+MODULE_PARM_DESC(msg_enable, "Message mask (see linux/netdevice.h for bitmap)");
+
+static int ax88796c_soft_reset(struct ax88796c_device *ax_local)
+{
+ u16 temp;
+ int ret;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, PSR_RESET, P0_PSR);
+ AX_WRITE(&ax_local->ax_spi, PSR_RESET_CLR, P0_PSR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret & PSR_DEV_READY),
+ 0, jiffies_to_usecs(160 * HZ / 1000), false,
+ &ax_local->ax_spi, P0_PSR);
+ if (ret)
+ return ret;
+
+ temp = AX_READ(&ax_local->ax_spi, P4_SPICR);
+ if (ax_local->priv_flags & AX_CAP_COMP) {
+ AX_WRITE(&ax_local->ax_spi,
+ (temp | SPICR_RCEN | SPICR_QCEN), P4_SPICR);
+ ax_local->ax_spi.comp = 1;
+ } else {
+ AX_WRITE(&ax_local->ax_spi,
+ (temp & ~(SPICR_RCEN | SPICR_QCEN)), P4_SPICR);
+ ax_local->ax_spi.comp = 0;
+ }
+
+ return 0;
+}
+
+static int ax88796c_reload_eeprom(struct ax88796c_device *ax_local)
+{
+ int ret;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, EECR_RELOAD, P3_EECR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret & PSR_DEV_READY),
+ 0, jiffies_to_usecs(2 * HZ / 1000), false,
+ &ax_local->ax_spi, P0_PSR);
+ if (ret) {
+ dev_err(&ax_local->spi->dev,
+ "timeout waiting for reload eeprom\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ax88796c_set_hw_multicast(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int mc_count = netdev_mc_count(ndev);
+ u16 rx_ctl = RXCR_AB;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ memset(ax_local->multi_filter, 0, AX_MCAST_FILTER_SIZE);
+
+ if (ndev->flags & IFF_PROMISC) {
+ rx_ctl |= RXCR_PRO;
+
+ } else if (ndev->flags & IFF_ALLMULTI || mc_count > AX_MAX_MCAST) {
+ rx_ctl |= RXCR_AMALL;
+
+ } else if (mc_count == 0) {
+ /* just broadcast and directed */
+ } else {
+ u32 crc_bits;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr);
+ ax_local->multi_filter[crc_bits >> 29] |=
+ (1 << ((crc_bits >> 26) & 7));
+ }
+
+ for (i = 0; i < 4; i++) {
+ AX_WRITE(&ax_local->ax_spi,
+ ((ax_local->multi_filter[i * 2 + 1] << 8) |
+ ax_local->multi_filter[i * 2]), P3_MFAR(i));
+ }
+ }
+
+ AX_WRITE(&ax_local->ax_spi, rx_ctl, P2_RXCR);
+}
+
+static void ax88796c_set_mac_addr(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[4] << 8) |
+ (u16)ndev->dev_addr[5]), P3_MACASR0);
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[2] << 8) |
+ (u16)ndev->dev_addr[3]), P3_MACASR1);
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[0] << 8) |
+ (u16)ndev->dev_addr[1]), P3_MACASR2);
+}
+
+static void ax88796c_load_mac_addr(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u16 temp;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ /* Try the device tree first */
+ if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) &&
+ is_valid_ether_addr(ndev->dev_addr)) {
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev,
+ "MAC address read from device tree\n");
+ return;
+ }
+
+ /* Read the MAC address from AX88796C */
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
+ ndev->dev_addr[5] = (u8)temp;
+ ndev->dev_addr[4] = (u8)(temp >> 8);
+
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
+ ndev->dev_addr[3] = (u8)temp;
+ ndev->dev_addr[2] = (u8)(temp >> 8);
+
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
+ ndev->dev_addr[1] = (u8)temp;
+ ndev->dev_addr[0] = (u8)(temp >> 8);
+
+ if (is_valid_ether_addr(ndev->dev_addr)) {
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev,
+ "MAC address read from ASIX chip\n");
+ return;
+ }
+
+ /* Use random address if none found */
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev, "Use random MAC address\n");
+ eth_hw_addr_random(ndev);
+}
+
+static void ax88796c_proc_tx_hdr(struct tx_pkt_info *info, u8 ip_summed)
+{
+ u16 pkt_len_bar = (~info->pkt_len & TX_HDR_SOP_PKTLENBAR);
+
+ /* Prepare SOP header */
+ info->sop.flags_len = info->pkt_len |
+ ((ip_summed == CHECKSUM_NONE) ||
+ (ip_summed == CHECKSUM_UNNECESSARY) ? TX_HDR_SOP_DICF : 0);
+
+ info->sop.seq_lenbar = ((info->seq_num << 11) & TX_HDR_SOP_SEQNUM)
+ | pkt_len_bar;
+ cpu_to_be16s(&info->sop.flags_len);
+ cpu_to_be16s(&info->sop.seq_lenbar);
+
+ /* Prepare Segment header */
+ info->seg.flags_seqnum_seglen = TX_HDR_SEG_FS | TX_HDR_SEG_LS
+ | info->pkt_len;
+
+ info->seg.eo_so_seglenbar = pkt_len_bar;
+
+ cpu_to_be16s(&info->seg.flags_seqnum_seglen);
+ cpu_to_be16s(&info->seg.eo_so_seglenbar);
+
+ /* Prepare EOP header */
+ info->eop.seq_len = ((info->seq_num << 11) &
+ TX_HDR_EOP_SEQNUM) | info->pkt_len;
+ info->eop.seqbar_lenbar = ((~info->seq_num << 11) &
+ TX_HDR_EOP_SEQNUMBAR) | pkt_len_bar;
+
+ cpu_to_be16s(&info->eop.seq_len);
+ cpu_to_be16s(&info->eop.seqbar_lenbar);
+}
+
+static int
+ax88796c_check_free_pages(struct ax88796c_device *ax_local, u8 need_pages)
+{
+ u8 free_pages;
+ u16 tmp;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ free_pages = AX_READ(&ax_local->ax_spi, P0_TFBFCR) & TX_FREEBUF_MASK;
+ if (free_pages < need_pages) {
+ /* schedule free page interrupt */
+ tmp = AX_READ(&ax_local->ax_spi, P0_TFBFCR)
+ & TFBFCR_SCHE_FREE_PAGE;
+ AX_WRITE(&ax_local->ax_spi, tmp | TFBFCR_TX_PAGE_SET |
+ TFBFCR_SET_FREE_PAGE(need_pages),
+ P0_TFBFCR);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *
+ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u8 spi_len = ax_local->ax_spi.comp ? 1 : 4;
+ struct sk_buff *skb;
+ struct tx_pkt_info info;
+ struct skb_data *entry;
+ u16 pkt_len;
+ u8 padlen, seq_num;
+ u8 need_pages;
+ int headroom;
+ int tailroom;
+
+ if (skb_queue_empty(q))
+ return NULL;
+
+ skb = skb_peek(q);
+ pkt_len = skb->len;
+ need_pages = (pkt_len + TX_OVERHEAD + 127) >> 7;
+ if (ax88796c_check_free_pages(ax_local, need_pages) != 0)
+ return NULL;
+
+ headroom = skb_headroom(skb);
+ tailroom = skb_tailroom(skb);
+ padlen = round_up(pkt_len, 4) - pkt_len;
+ seq_num = ++ax_local->seq_num & 0x1F;
+
+ info.pkt_len = pkt_len;
+
+ if (skb_cloned(skb) ||
+ (headroom < (TX_OVERHEAD + spi_len)) ||
+ (tailroom < (padlen + TX_EOP_SIZE))) {
+ size_t h = max((TX_OVERHEAD + spi_len) - headroom, 0);
+ size_t t = max((padlen + TX_EOP_SIZE) - tailroom, 0);
+
+ if (pskb_expand_head(skb, h, t, GFP_KERNEL))
+ return NULL;
+ }
+
+ info.seq_num = seq_num;
+ ax88796c_proc_tx_hdr(&info, skb->ip_summed);
+
+ /* SOP and SEG header */
+ memcpy(skb_push(skb, TX_OVERHEAD), &info.sop, TX_OVERHEAD);
+
+ /* Write SPI TXQ header */
+ memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len);
+
+ /* Make 32-bit alignment */
+ skb_put(skb, padlen);
+
+ /* EOP header */
+ memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+
+ skb_unlink(skb, q);
+
+ entry = (struct skb_data *)skb->cb;
+ memset(entry, 0, sizeof(*entry));
+ entry->len = pkt_len;
+
+ if (netif_msg_pktdata(ax_local)) {
+ char pfx[IFNAMSIZ + 7];
+
+ snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
+
+ netdev_info(ndev, "TX packet len %d, total len %d, seq %d\n",
+ pkt_len, skb->len, seq_num);
+
+ netdev_info(ndev, " SPI Header:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, 4, 0);
+
+ netdev_info(ndev, " TX SOP:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + 4, TX_OVERHEAD, 0);
+
+ netdev_info(ndev, " TX packet:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + 4 + TX_OVERHEAD,
+ skb->len - TX_EOP_SIZE - 4 - TX_OVERHEAD, 0);
+
+ netdev_info(ndev, " TX EOP:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + skb->len - 4, 4, 0);
+ }
+
+ return skb;
+}
+
+static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
+{
+ struct ax88796c_pcpu_stats *stats;
+ struct sk_buff *tx_skb;
+ struct skb_data *entry;
+ unsigned long flags;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ stats = this_cpu_ptr(ax_local->stats);
+ tx_skb = ax88796c_tx_fixup(ax_local->ndev, &ax_local->tx_wait_q);
+
+ if (!tx_skb) {
+ this_cpu_inc(ax_local->stats->tx_dropped);
+ return 0;
+ }
+ entry = (struct skb_data *)tx_skb->cb;
+
+ AX_WRITE(&ax_local->ax_spi,
+ (TSNR_TXB_START | TSNR_PKT_CNT(1)), P0_TSNR);
+
+ axspi_write_txq(&ax_local->ax_spi, tx_skb->data, tx_skb->len);
+
+ if (((AX_READ(&ax_local->ax_spi, P0_TSNR) & TXNR_TXB_IDLE) == 0) ||
+ ((ISR_TXERR & AX_READ(&ax_local->ax_spi, P0_ISR)) != 0)) {
+ /* Ack tx error int */
+ AX_WRITE(&ax_local->ax_spi, ISR_TXERR, P0_ISR);
+
+ this_cpu_inc(ax_local->stats->tx_dropped);
+
+ if (net_ratelimit())
+ netif_err(ax_local, tx_err, ax_local->ndev,
+ "TX FIFO error, re-initialize the TX bridge\n");
+
+ /* Reinitial tx bridge */
+ AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT |
+ AX_READ(&ax_local->ax_spi, P0_TSNR), P0_TSNR);
+ ax_local->seq_num = 0;
+ } else {
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, entry->len);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ }
+
+ entry->state = tx_done;
+ dev_kfree_skb(tx_skb);
+
+ return 1;
+}
+
+static int
+ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ skb_queue_tail(&ax_local->tx_wait_q, skb);
+ if (skb_queue_len(&ax_local->tx_wait_q) > TX_QUEUE_HIGH_WATER)
+ netif_stop_queue(ndev);
+
+ set_bit(EVENT_TX, &ax_local->flags);
+ schedule_work(&ax_local->ax_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void
+ax88796c_skb_return(struct ax88796c_device *ax_local,
+ struct sk_buff *skb, struct rx_header *rxhdr)
+{
+ struct net_device *ndev = ax_local->ndev;
+ struct ax88796c_pcpu_stats *stats;
+ unsigned long flags;
+ int status;
+
+ stats = this_cpu_ptr(ax_local->stats);
+
+ do {
+ if (!(ndev->features & NETIF_F_RXCSUM))
+ break;
+
+ /* checksum error bit is set */
+ if ((rxhdr->flags & RX_HDR3_L3_ERR) ||
+ (rxhdr->flags & RX_HDR3_L4_ERR))
+ break;
+
+ /* Other types may be indicated by more than one bit. */
+ if ((rxhdr->flags & RX_HDR3_L4_TYPE_TCP) ||
+ (rxhdr->flags & RX_HDR3_L4_TYPE_UDP))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } while (0);
+
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, skb->len);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ax_local->ndev);
+
+ netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
+ skb->len + sizeof(struct ethhdr), skb->protocol);
+
+ status = netif_rx_ni(skb);
+ if (status != NET_RX_SUCCESS && net_ratelimit())
+ netif_info(ax_local, rx_err, ndev,
+ "netif_rx status %d\n", status);
+}
+
+static void
+ax88796c_rx_fixup(struct ax88796c_device *ax_local, struct sk_buff *rx_skb)
+{
+ struct rx_header *rxhdr = (struct rx_header *)rx_skb->data;
+ struct net_device *ndev = ax_local->ndev;
+ u16 len;
+
+ be16_to_cpus(&rxhdr->flags_len);
+ be16_to_cpus(&rxhdr->seq_lenbar);
+ be16_to_cpus(&rxhdr->flags);
+
+ if ((rxhdr->flags_len & RX_HDR1_PKT_LEN) !=
+ (~rxhdr->seq_lenbar & 0x7FF)) {
+ netif_err(ax_local, rx_err, ndev, "Header error\n");
+
+ this_cpu_inc(ax_local->stats->rx_frame_errors);
+ kfree_skb(rx_skb);
+ return;
+ }
+
+ if ((rxhdr->flags_len & RX_HDR1_MII_ERR) ||
+ (rxhdr->flags_len & RX_HDR1_CRC_ERR)) {
+ netif_err(ax_local, rx_err, ndev, "CRC or MII error\n");
+
+ this_cpu_inc(ax_local->stats->rx_crc_errors);
+ kfree_skb(rx_skb);
+ return;
+ }
+
+ len = rxhdr->flags_len & RX_HDR1_PKT_LEN;
+ if (netif_msg_pktdata(ax_local)) {
+ char pfx[IFNAMSIZ + 7];
+
+ snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
+ netdev_info(ndev, "RX data, total len %d, packet len %d\n",
+ rx_skb->len, len);
+
+ netdev_info(ndev, " Dump RX packet header:");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ rx_skb->data, sizeof(*rxhdr), 0);
+
+ netdev_info(ndev, " Dump RX packet:");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ rx_skb->data + sizeof(*rxhdr), len, 0);
+ }
+
+ skb_pull(rx_skb, sizeof(*rxhdr));
+ pskb_trim(rx_skb, len);
+
+ ax88796c_skb_return(ax_local, rx_skb, rxhdr);
+}
+
+static int ax88796c_receive(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ struct skb_data *entry;
+ u16 w_count, pkt_len;
+ struct sk_buff *skb;
+ u8 pkt_cnt;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ /* check rx packet and total word count */
+ AX_WRITE(&ax_local->ax_spi, AX_READ(&ax_local->ax_spi, P0_RTWCR)
+ | RTWCR_RX_LATCH, P0_RTWCR);
+
+ pkt_cnt = AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_PKT_MASK;
+ if (!pkt_cnt)
+ return 0;
+
+ pkt_len = AX_READ(&ax_local->ax_spi, P0_RCPHR) & 0x7FF;
+
+ w_count = round_up(pkt_len + 6, 4) >> 1;
+
+ skb = netdev_alloc_skb(ndev, w_count * 2);
+ if (!skb) {
+ AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_DISCARD, P0_RXBCR1);
+ this_cpu_inc(ax_local->stats->rx_dropped);
+ return 0;
+ }
+ entry = (struct skb_data *)skb->cb;
+
+ AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_START | w_count, P0_RXBCR1);
+
+ axspi_read_rxq(&ax_local->ax_spi,
+ skb_put(skb, w_count * 2), skb->len);
+
+ /* Check if rx bridge is idle */
+ if ((AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_RXB_IDLE) == 0) {
+ if (net_ratelimit())
+ netif_err(ax_local, rx_err, ndev,
+ "Rx Bridge is not idle\n");
+ AX_WRITE(&ax_local->ax_spi, RXBCR2_RXB_REINIT, P0_RXBCR2);
+
+ entry->state = rx_err;
+ } else {
+ entry->state = rx_done;
+ }
+
+ AX_WRITE(&ax_local->ax_spi, ISR_RXPKT, P0_ISR);
+
+ ax88796c_rx_fixup(ax_local, skb);
+
+ return 1;
+}
+
+static int ax88796c_process_isr(struct ax88796c_device *ax_local)
+{
+ struct net_device *ndev = ax_local->ndev;
+ int todo = 0;
+ u16 isr;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ isr = AX_READ(&ax_local->ax_spi, P0_ISR);
+ AX_WRITE(&ax_local->ax_spi, isr, P0_ISR);
+
+ netif_dbg(ax_local, intr, ndev, " ISR 0x%04x\n", isr);
+
+ if (isr & ISR_TXERR) {
+ netif_dbg(ax_local, intr, ndev, " TXERR interrupt\n");
+ AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT, P0_TSNR);
+ ax_local->seq_num = 0x1f;
+ }
+
+ if (isr & ISR_TXPAGES) {
+ netif_dbg(ax_local, intr, ndev, " TXPAGES interrupt\n");
+ set_bit(EVENT_TX, &ax_local->flags);
+ }
+
+ if (isr & ISR_LINK) {
+ netif_dbg(ax_local, intr, ndev, " Link change interrupt\n");
+ phy_mac_interrupt(ax_local->ndev->phydev);
+ }
+
+ if (isr & ISR_RXPKT) {
+ netif_dbg(ax_local, intr, ndev, " RX interrupt\n");
+ todo = ax88796c_receive(ax_local->ndev);
+ }
+
+ return todo;
+}
+
+static irqreturn_t ax88796c_interrupt(int irq, void *dev_instance)
+{
+ struct ax88796c_device *ax_local;
+ struct net_device *ndev;
+
+ ndev = dev_instance;
+ if (!ndev) {
+ pr_err("irq %d for unknown device.\n", irq);
+ return IRQ_RETVAL(0);
+ }
+ ax_local = to_ax88796c_device(ndev);
+
+ disable_irq_nosync(irq);
+
+ netif_dbg(ax_local, intr, ndev, "Interrupt occurred\n");
+
+ set_bit(EVENT_INTR, &ax_local->flags);
+ schedule_work(&ax_local->ax_work);
+
+ return IRQ_HANDLED;
+}
+
+static void ax88796c_work(struct work_struct *work)
+{
+ struct ax88796c_device *ax_local =
+ container_of(work, struct ax88796c_device, ax_work);
+
+ mutex_lock(&ax_local->spi_lock);
+
+ if (test_bit(EVENT_SET_MULTI, &ax_local->flags)) {
+ ax88796c_set_hw_multicast(ax_local->ndev);
+ clear_bit(EVENT_SET_MULTI, &ax_local->flags);
+ }
+
+ if (test_bit(EVENT_INTR, &ax_local->flags)) {
+ AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
+
+ while (ax88796c_process_isr(ax_local))
+ /* nothing */;
+
+ clear_bit(EVENT_INTR, &ax_local->flags);
+
+ AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
+
+ enable_irq(ax_local->ndev->irq);
+ }
+
+ if (test_bit(EVENT_TX, &ax_local->flags)) {
+ while (skb_queue_len(&ax_local->tx_wait_q)) {
+ if (!ax88796c_hard_xmit(ax_local))
+ break;
+ }
+
+ clear_bit(EVENT_TX, &ax_local->flags);
+
+ if (netif_queue_stopped(ax_local->ndev) &&
+ (skb_queue_len(&ax_local->tx_wait_q) < TX_QUEUE_LOW_WATER))
+ netif_wake_queue(ax_local->ndev);
+ }
+
+ mutex_unlock(&ax_local->spi_lock);
+}
+
+static void ax88796c_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u32 rx_frame_errors = 0, rx_crc_errors = 0;
+ u32 rx_dropped = 0, tx_dropped = 0;
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct ax88796c_pcpu_stats *s;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+
+ s = per_cpu_ptr(ax_local->stats, cpu);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&s->syncp);
+ rx_packets = u64_stats_read(&s->rx_packets);
+ rx_bytes = u64_stats_read(&s->rx_bytes);
+ tx_packets = u64_stats_read(&s->tx_packets);
+ tx_bytes = u64_stats_read(&s->tx_bytes);
+ } while (u64_stats_fetch_retry_irq(&s->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+
+ rx_dropped += stats->rx_dropped;
+ tx_dropped += stats->tx_dropped;
+ rx_frame_errors += stats->rx_frame_errors;
+ rx_crc_errors += stats->rx_crc_errors;
+ }
+
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
+ stats->rx_frame_errors = rx_frame_errors;
+ stats->rx_crc_errors = rx_crc_errors;
+}
+
+static void ax88796c_set_mac(struct ax88796c_device *ax_local)
+{
+ u16 maccr;
+
+ maccr = (ax_local->link) ? MACCR_RXEN : 0;
+
+ switch (ax_local->speed) {
+ case SPEED_100:
+ maccr |= MACCR_SPEED_100;
+ case SPEED_10:
+ case SPEED_UNKNOWN:
+ break;
+ default:
+ return;
+ }
+
+ switch (ax_local->duplex) {
+ case DUPLEX_FULL:
+ maccr |= MACCR_SPEED_100;
+ case DUPLEX_HALF:
+ case DUPLEX_UNKNOWN:
+ break;
+ default:
+ return;
+ }
+
+ if (ax_local->flowctrl & AX_FC_ANEG &&
+ ax_local->phydev->autoneg) {
+ maccr |= ax_local->pause ? MACCR_RXFC_ENABLE : 0;
+ maccr |= !ax_local->pause != !ax_local->asym_pause ?
+ MACCR_TXFC_ENABLE : 0;
+ } else {
+ maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
+ maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
+ }
+
+ mutex_lock(&ax_local->spi_lock);
+
+ maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
+ ~(MACCR_DUPLEX_FULL | MACCR_SPEED_100 |
+ MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
+ AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+}
+
+static void ax88796c_handle_link_change(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool update = false;
+
+ if (phydev->link && (ax_local->speed != phydev->speed ||
+ ax_local->duplex != phydev->duplex ||
+ ax_local->pause != phydev->pause ||
+ ax_local->asym_pause != phydev->asym_pause)) {
+ ax_local->speed = phydev->speed;
+ ax_local->duplex = phydev->duplex;
+ ax_local->pause = phydev->pause;
+ ax_local->asym_pause = phydev->asym_pause;
+ update = true;
+ }
+
+ if (phydev->link != ax_local->link) {
+ if (!phydev->link) {
+ ax_local->speed = SPEED_UNKNOWN;
+ ax_local->duplex = DUPLEX_UNKNOWN;
+ }
+
+ ax_local->link = phydev->link;
+ update = true;
+ }
+
+ if (update)
+ ax88796c_set_mac(ax_local);
+
+ if (net_ratelimit())
+ phy_print_status(ndev->phydev);
+}
+
+static void ax88796c_set_csums(struct ax88796c_device *ax_local)
+{
+ struct net_device *ndev = ax_local->ndev;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ if (ndev->features & NETIF_F_RXCSUM) {
+ AX_WRITE(&ax_local->ax_spi, COERCR0_DEFAULT, P4_COERCR0);
+ AX_WRITE(&ax_local->ax_spi, COERCR1_DEFAULT, P4_COERCR1);
+ } else {
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR0);
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR1);
+ }
+
+ if (ndev->features & NETIF_F_HW_CSUM) {
+ AX_WRITE(&ax_local->ax_spi, COETCR0_DEFAULT, P4_COETCR0);
+ AX_WRITE(&ax_local->ax_spi, COETCR1_TXPPPE, P4_COETCR1);
+ } else {
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR0);
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR1);
+ }
+}
+
+static int
+ax88796c_open(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ unsigned long irq_flag = 0;
+ int fc = AX_FC_NONE;
+ int ret;
+ u16 t;
+
+ ret = request_irq(ndev->irq, ax88796c_interrupt,
+ irq_flag, ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to get IRQ %d (errno=%d).\n",
+ ndev->irq, ret);
+ return ret;
+ }
+
+ mutex_lock(&ax_local->spi_lock);
+
+ ret = ax88796c_soft_reset(ax_local);
+ if (ret < 0) {
+ free_irq(ndev->irq, ndev);
+ mutex_unlock(&ax_local->spi_lock);
+ return ret;
+ }
+ ax_local->seq_num = 0x1f;
+
+ ax88796c_set_mac_addr(ndev);
+ ax88796c_set_csums(ax_local);
+
+ /* Disable stuffing packet */
+ t = AX_READ(&ax_local->ax_spi, P1_RXBSPCR);
+ t &= ~RXBSPCR_STUF_ENABLE;
+ AX_WRITE(&ax_local->ax_spi, t, P1_RXBSPCR);
+
+ /* Enable RX packet process */
+ AX_WRITE(&ax_local->ax_spi, RPPER_RXEN, P1_RPPER);
+
+ t = AX_READ(&ax_local->ax_spi, P0_FER);
+ t |= FER_RXEN | FER_TXEN | FER_BSWAP | FER_IRQ_PULL;
+ AX_WRITE(&ax_local->ax_spi, t, P0_FER);
+
+ /* Setup LED mode */
+ AX_WRITE(&ax_local->ax_spi,
+ (LCR_LED0_EN | LCR_LED0_DUPLEX | LCR_LED1_EN |
+ LCR_LED1_100MODE), P2_LCR0);
+ AX_WRITE(&ax_local->ax_spi,
+ (AX_READ(&ax_local->ax_spi, P2_LCR1) & LCR_LED2_MASK) |
+ LCR_LED2_EN | LCR_LED2_LINK, P2_LCR1);
+
+ /* Disable PHY auto-polling */
+ AX_WRITE(&ax_local->ax_spi, PCR_PHYID(AX88796C_PHY_ID), P2_PCR);
+
+ /* Enable MAC interrupts */
+ AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ /* Setup flow-control configuration */
+ phy_support_asym_pause(ax_local->phydev);
+
+ if (ax_local->phydev->advertising &&
+ (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ax_local->phydev->advertising)))
+ fc |= AX_FC_ANEG;
+
+ fc |= linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) ? AX_FC_RX : 0;
+ fc |= (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) !=
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ax_local->phydev->advertising)) ? AX_FC_TX : 0;
+ ax_local->flowctrl = fc;
+
+ phy_start(ax_local->ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ spi_message_init(&ax_local->ax_spi.rx_msg);
+
+ return 0;
+}
+
+static int
+ax88796c_close(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ phy_stop(ndev->phydev);
+
+ /* We lock the mutex early not only to protect the device
+ * against concurrent access, but also avoid waking up the
+ * queue in ax88796c_work(). phy_stop() needs to be called
+ * before because it locks the mutex to access SPI.
+ */
+ mutex_lock(&ax_local->spi_lock);
+
+ netif_stop_queue(ndev);
+
+ /* No more work can be scheduled now. Make any pending work,
+ * including one already waiting for the mutex to be unlocked,
+ * NOP.
+ */
+ netif_dbg(ax_local, ifdown, ndev, "clearing bits\n");
+ clear_bit(EVENT_SET_MULTI, &ax_local->flags);
+ clear_bit(EVENT_INTR, &ax_local->flags);
+ clear_bit(EVENT_TX, &ax_local->flags);
+
+ /* Disable MAC interrupts */
+ AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
+ __skb_queue_purge(&ax_local->tx_wait_q);
+ ax88796c_soft_reset(ax_local);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ cancel_work_sync(&ax_local->ax_work);
+
+ free_irq(ndev->irq, ndev);
+
+ return 0;
+}
+
+static int
+ax88796c_set_features(struct net_device *ndev, netdev_features_t features)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ netdev_features_t changed = features ^ ndev->features;
+
+ if (!(changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM)))
+ return 0;
+
+ ndev->features = features;
+
+ if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM))
+ ax88796c_set_csums(ax_local);
+
+ return 0;
+}
+
+static const struct net_device_ops ax88796c_netdev_ops = {
+ .ndo_open = ax88796c_open,
+ .ndo_stop = ax88796c_close,
+ .ndo_start_xmit = ax88796c_start_xmit,
+ .ndo_get_stats64 = ax88796c_get_stats64,
+ .ndo_do_ioctl = ax88796c_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_features = ax88796c_set_features,
+};
+
+static int ax88796c_hard_reset(struct ax88796c_device *ax_local)
+{
+ struct device *dev = (struct device *)&ax_local->spi->dev;
+ struct gpio_desc *reset_gpio;
+
+ /* reset info */
+ reset_gpio = gpiod_get(dev, "reset", 0);
+ if (IS_ERR(reset_gpio)) {
+ dev_err(dev, "Could not get 'reset' GPIO: %ld", PTR_ERR(reset_gpio));
+ return PTR_ERR(reset_gpio);
+ }
+
+ /* set reset */
+ gpiod_direction_output(reset_gpio, 1);
+ msleep(100);
+ gpiod_direction_output(reset_gpio, 0);
+ gpiod_put(reset_gpio);
+ msleep(20);
+
+ return 0;
+}
+
+static int ax88796c_probe(struct spi_device *spi)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ struct ax88796c_device *ax_local;
+ struct net_device *ndev;
+ u16 temp;
+ int ret;
+
+ ndev = devm_alloc_etherdev(&spi->dev, sizeof(*ax_local));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ ax_local = to_ax88796c_device(ndev);
+
+ dev_set_drvdata(&spi->dev, ax_local);
+ ax_local->spi = spi;
+ ax_local->ax_spi.spi = spi;
+
+ ax_local->stats =
+ devm_netdev_alloc_pcpu_stats(&spi->dev,
+ struct ax88796c_pcpu_stats);
+ if (!ax_local->stats)
+ return -ENOMEM;
+
+ ax_local->ndev = ndev;
+ ax_local->priv_flags |= comp ? AX_CAP_COMP : 0;
+ ax_local->msg_enable = msg_enable;
+ mutex_init(&ax_local->spi_lock);
+
+ ax_local->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!ax_local->mdiobus)
+ return -ENOMEM;
+
+ ax_local->mdiobus->priv = ax_local;
+ ax_local->mdiobus->read = ax88796c_mdio_read;
+ ax_local->mdiobus->write = ax88796c_mdio_write;
+ ax_local->mdiobus->name = "ax88976c-mdiobus";
+ ax_local->mdiobus->phy_mask = (u32)~BIT(AX88796C_PHY_ID);
+ ax_local->mdiobus->parent = &spi->dev;
+
+ snprintf(ax_local->mdiobus->id, MII_BUS_ID_SIZE,
+ "ax88796c-%s.%u", dev_name(&spi->dev), spi->chip_select);
+
+ ret = devm_mdiobus_register(&spi->dev, ax_local->mdiobus);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+ return ret;
+ }
+
+ if (netif_msg_probe(ax_local)) {
+ dev_info(&spi->dev, "AX88796C-SPI Configuration:\n");
+ dev_info(&spi->dev, " Compression : %s\n",
+ ax_local->priv_flags & AX_CAP_COMP ? "ON" : "OFF");
+ }
+
+ ndev->irq = spi->irq;
+ ndev->netdev_ops = &ax88796c_netdev_ops;
+ ndev->ethtool_ops = &ax88796c_ethtool_ops;
+ ndev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->needed_headroom = TX_OVERHEAD;
+ ndev->needed_tailroom = TX_EOP_SIZE;
+
+ mutex_lock(&ax_local->spi_lock);
+
+ /* ax88796c gpio reset */
+ ax88796c_hard_reset(ax_local);
+
+ /* Reset AX88796C */
+ ret = ax88796c_soft_reset(ax_local);
+ if (ret < 0) {
+ ret = -ENODEV;
+ mutex_unlock(&ax_local->spi_lock);
+ goto err;
+ }
+ /* Check board revision */
+ temp = AX_READ(&ax_local->ax_spi, P2_CRIR);
+ if ((temp & 0xF) != 0x0) {
+ dev_err(&spi->dev, "spi read failed: %d\n", temp);
+ ret = -ENODEV;
+ mutex_unlock(&ax_local->spi_lock);
+ goto err;
+ }
+
+ /*Reload EEPROM*/
+ ax88796c_reload_eeprom(ax_local);
+
+ ax88796c_load_mac_addr(ndev);
+
+ if (netif_msg_probe(ax_local))
+ dev_info(&spi->dev,
+ "irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ ndev->irq,
+ ndev->dev_addr[0], ndev->dev_addr[1],
+ ndev->dev_addr[2], ndev->dev_addr[3],
+ ndev->dev_addr[4], ndev->dev_addr[5]);
+
+ /* Disable power saving */
+ AX_WRITE(&ax_local->ax_spi, (AX_READ(&ax_local->ax_spi, P0_PSCR)
+ & PSCR_PS_MASK) | PSCR_PS_D0, P0_PSCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ INIT_WORK(&ax_local->ax_work, ax88796c_work);
+
+ skb_queue_head_init(&ax_local->tx_wait_q);
+
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
+ ax_local->mdiobus->id, AX88796C_PHY_ID);
+ ax_local->phydev = phy_connect(ax_local->ndev, phy_id,
+ ax88796c_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(ax_local->phydev)) {
+ ret = PTR_ERR(ax_local->phydev);
+ goto err;
+ }
+ ax_local->phydev->irq = PHY_POLL;
+
+ ret = devm_register_netdev(&spi->dev, ndev);
+ if (ret) {
+ dev_err(&spi->dev, "failed to register a network device\n");
+ goto err_phy_dis;
+ }
+
+ netif_info(ax_local, probe, ndev, "%s %s registered\n",
+ dev_driver_string(&spi->dev),
+ dev_name(&spi->dev));
+ phy_attached_info(ax_local->phydev);
+
+ return 0;
+
+err_phy_dis:
+ phy_disconnect(ax_local->phydev);
+err:
+ return ret;
+}
+
+static int ax88796c_remove(struct spi_device *spi)
+{
+ struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
+ struct net_device *ndev = ax_local->ndev;
+
+ phy_disconnect(ndev->phydev);
+
+ netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
+ dev_driver_string(&spi->dev),
+ dev_name(&spi->dev));
+
+ return 0;
+}
+
+static const struct of_device_id ax88796c_dt_ids[] = {
+ { .compatible = "asix,ax88796c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ax88796c_dt_ids);
+
+static const struct spi_device_id asix_id[] = {
+ { "ax88796c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, asix_id);
+
+static struct spi_driver ax88796c_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(ax88796c_dt_ids),
+ },
+ .probe = ax88796c_probe,
+ .remove = ax88796c_remove,
+ .id_table = asix_id,
+};
+
+static __init int ax88796c_spi_init(void)
+{
+ int ret;
+
+ bitmap_zero(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ ret = bitmap_parse(no_regs_list, 35,
+ ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ if (ret) {
+ bitmap_fill(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ pr_err("Invalid bitmap description, masking all registers\n");
+ }
+
+ return spi_register_driver(&ax88796c_spi_driver);
+}
+
+static __exit void ax88796c_spi_exit(void)
+{
+ spi_unregister_driver(&ax88796c_spi_driver);
+}
+
+module_init(ax88796c_spi_init);
+module_exit(ax88796c_spi_exit);
+
+MODULE_AUTHOR("Łukasz Stelmach <[email protected]>");
+MODULE_DESCRIPTION("ASIX AX88796C SPI Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h
new file mode 100644
index 000000000000..80263c3cef75
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_main.h
@@ -0,0 +1,568 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_MAIN_H
+#define _AX88796C_MAIN_H
+
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+
+#include "ax88796c_spi.h"
+
+/* These identify the driver base version and may not be removed. */
+#define DRV_NAME "ax88796c"
+#define ADP_NAME "ASIX AX88796C SPI Ethernet Adapter"
+
+#define TX_QUEUE_HIGH_WATER 45 /* Tx queue high water mark */
+#define TX_QUEUE_LOW_WATER 20 /* Tx queue low water mark */
+
+#define AX88796C_REGDUMP_LEN 256
+#define AX88796C_PHY_REGDUMP_LEN 14
+#define AX88796C_PHY_ID 0x10
+
+#define TX_OVERHEAD 8
+#define TX_EOP_SIZE 4
+
+#define AX_MCAST_FILTER_SIZE 8
+#define AX_MAX_MCAST 64
+#define AX_MAX_CLK 80000000
+#define TX_HDR_SOP_DICF 0x8000
+#define TX_HDR_SOP_CPHI 0x4000
+#define TX_HDR_SOP_INT 0x2000
+#define TX_HDR_SOP_MDEQ 0x1000
+#define TX_HDR_SOP_PKTLEN 0x07FF
+#define TX_HDR_SOP_SEQNUM 0xF800
+#define TX_HDR_SOP_PKTLENBAR 0x07FF
+
+#define TX_HDR_SEG_FS 0x8000
+#define TX_HDR_SEG_LS 0x4000
+#define TX_HDR_SEG_SEGNUM 0x3800
+#define TX_HDR_SEG_SEGLEN 0x0700
+#define TX_HDR_SEG_EOFST 0xC000
+#define TX_HDR_SEG_SOFST 0x3800
+#define TX_HDR_SEG_SEGLENBAR 0x07FF
+
+#define TX_HDR_EOP_SEQNUM 0xF800
+#define TX_HDR_EOP_PKTLEN 0x07FF
+#define TX_HDR_EOP_SEQNUMBAR 0xF800
+#define TX_HDR_EOP_PKTLENBAR 0x07FF
+
+/* Rx header fields mask */
+#define RX_HDR1_MCBC 0x8000
+#define RX_HDR1_STUFF_PKT 0x4000
+#define RX_HDR1_MII_ERR 0x2000
+#define RX_HDR1_CRC_ERR 0x1000
+#define RX_HDR1_PKT_LEN 0x07FF
+
+#define RX_HDR2_SEQ_NUM 0xF800
+#define RX_HDR2_PKT_LEN_BAR 0x7FFF
+
+#define RX_HDR3_PE 0x8000
+#define RX_HDR3_L3_TYPE_IPV4V6 0x6000
+#define RX_HDR3_L3_TYPE_IP 0x4000
+#define RX_HDR3_L3_TYPE_IPV6 0x2000
+#define RX_HDR3_L4_TYPE_ICMPV6 0x1400
+#define RX_HDR3_L4_TYPE_TCP 0x1000
+#define RX_HDR3_L4_TYPE_IGMP 0x0c00
+#define RX_HDR3_L4_TYPE_ICMP 0x0800
+#define RX_HDR3_L4_TYPE_UDP 0x0400
+#define RX_HDR3_L3_ERR 0x0200
+#define RX_HDR3_L4_ERR 0x0100
+#define RX_HDR3_PRIORITY(x) ((x) << 4)
+#define RX_HDR3_STRIP 0x0008
+#define RX_HDR3_VLAN_ID 0x0007
+
+struct ax88796c_pcpu_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_dropped;
+ u32 tx_dropped;
+ u32 rx_frame_errors;
+ u32 rx_crc_errors;
+};
+
+struct ax88796c_device {
+ struct spi_device *spi;
+ struct net_device *ndev;
+ struct ax88796c_pcpu_stats __percpu *stats;
+
+ struct work_struct ax_work;
+
+ struct mutex spi_lock; /* device access */
+
+ struct sk_buff_head tx_wait_q;
+
+ struct axspi_data ax_spi;
+
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+
+ int msg_enable;
+
+ u16 seq_num;
+
+ u8 multi_filter[AX_MCAST_FILTER_SIZE];
+
+ int link;
+ int speed;
+ int duplex;
+ int pause;
+ int asym_pause;
+ int flowctrl;
+ #define AX_FC_NONE 0
+ #define AX_FC_RX BIT(0)
+ #define AX_FC_TX BIT(1)
+ #define AX_FC_ANEG BIT(2)
+
+ u32 priv_flags;
+ #define AX_CAP_COMP BIT(0)
+ #define AX_PRIV_FLAGS_MASK (AX_CAP_COMP)
+
+ unsigned long flags;
+ #define EVENT_INTR BIT(0)
+ #define EVENT_TX BIT(1)
+ #define EVENT_SET_MULTI BIT(2)
+
+};
+
+#define to_ax88796c_device(ndev) ((struct ax88796c_device *)netdev_priv(ndev))
+
+enum skb_state {
+ illegal = 0,
+ tx_done,
+ rx_done,
+ rx_err,
+};
+
+struct skb_data {
+ enum skb_state state;
+ size_t len;
+};
+
+/* A88796C register definition */
+ /* Definition of PAGE0 */
+#define P0_PSR (0x00)
+ #define PSR_DEV_READY BIT(7)
+ #define PSR_RESET (0 << 15)
+ #define PSR_RESET_CLR BIT(15)
+#define P0_BOR (0x02)
+#define P0_FER (0x04)
+ #define FER_IPALM BIT(0)
+ #define FER_DCRC BIT(1)
+ #define FER_RH3M BIT(2)
+ #define FER_HEADERSWAP BIT(7)
+ #define FER_WSWAP BIT(8)
+ #define FER_BSWAP BIT(9)
+ #define FER_INTHI BIT(10)
+ #define FER_INTLO (0 << 10)
+ #define FER_IRQ_PULL BIT(11)
+ #define FER_RXEN BIT(14)
+ #define FER_TXEN BIT(15)
+#define P0_ISR (0x06)
+ #define ISR_RXPKT BIT(0)
+ #define ISR_MDQ BIT(4)
+ #define ISR_TXT BIT(5)
+ #define ISR_TXPAGES BIT(6)
+ #define ISR_TXERR BIT(8)
+ #define ISR_LINK BIT(9)
+#define P0_IMR (0x08)
+ #define IMR_RXPKT BIT(0)
+ #define IMR_MDQ BIT(4)
+ #define IMR_TXT BIT(5)
+ #define IMR_TXPAGES BIT(6)
+ #define IMR_TXERR BIT(8)
+ #define IMR_LINK BIT(9)
+ #define IMR_MASKALL (0xFFFF)
+ #define IMR_DEFAULT (IMR_TXERR)
+#define P0_WFCR (0x0A)
+ #define WFCR_PMEIND BIT(0) /* PME indication */
+ #define WFCR_PMETYPE BIT(1) /* PME I/O type */
+ #define WFCR_PMEPOL BIT(2) /* PME polarity */
+ #define WFCR_PMERST BIT(3) /* Reset PME */
+ #define WFCR_SLEEP BIT(4) /* Enable sleep mode */
+ #define WFCR_WAKEUP BIT(5) /* Enable wakeup mode */
+ #define WFCR_WAITEVENT BIT(6) /* Reserved */
+ #define WFCR_CLRWAKE BIT(7) /* Clear wakeup */
+ #define WFCR_LINKCH BIT(8) /* Enable link change */
+ #define WFCR_MAGICP BIT(9) /* Enable magic packet */
+ #define WFCR_WAKEF BIT(10) /* Enable wakeup frame */
+ #define WFCR_PMEEN BIT(11) /* Enable PME pin */
+ #define WFCR_LINKCHS BIT(12) /* Link change status */
+ #define WFCR_MAGICPS BIT(13) /* Magic packet status */
+ #define WFCR_WAKEFS BIT(14) /* Wakeup frame status */
+ #define WFCR_PMES BIT(15) /* PME pin status */
+#define P0_PSCR (0x0C)
+ #define PSCR_PS_MASK (0xFFF0)
+ #define PSCR_PS_D0 (0)
+ #define PSCR_PS_D1 BIT(0)
+ #define PSCR_PS_D2 BIT(1)
+ #define PSCR_FPS BIT(3) /* Enable fiber mode PS */
+ #define PSCR_SWPS BIT(4) /* Enable software */
+ /* PS control */
+ #define PSCR_WOLPS BIT(5) /* Enable WOL PS */
+ #define PSCR_SWWOL BIT(6) /* Enable software select */
+ /* WOL PS */
+ #define PSCR_PHYOSC BIT(7) /* Internal PHY OSC control */
+ #define PSCR_FOFEF BIT(8) /* Force PHY generate FEF */
+ #define PSCR_FOF BIT(9) /* Force PHY in fiber mode */
+ #define PSCR_PHYPD BIT(10) /* PHY power down. */
+ /* Active high */
+ #define PSCR_PHYRST BIT(11) /* PHY reset signal. */
+ /* Active low */
+ #define PSCR_PHYCSIL BIT(12) /* PHY cable energy detect */
+ #define PSCR_PHYCOFF BIT(13) /* PHY cable off */
+ #define PSCR_PHYLINK BIT(14) /* PHY link status */
+ #define PSCR_EEPOK BIT(15) /* EEPROM load complete */
+#define P0_MACCR (0x0E)
+ #define MACCR_RXEN BIT(0) /* Enable RX */
+ #define MACCR_DUPLEX_FULL BIT(1) /* 1: Full, 0: Half */
+ #define MACCR_SPEED_100 BIT(2) /* 1: 100Mbps, 0: 10Mbps */
+ #define MACCR_RXFC_ENABLE BIT(3)
+ #define MACCR_RXFC_MASK 0xFFF7
+ #define MACCR_TXFC_ENABLE BIT(4)
+ #define MACCR_TXFC_MASK 0xFFEF
+ #define MACCR_PSI BIT(6) /* Software Cable-Off */
+ /* Power Saving Interrupt */
+ #define MACCR_PF BIT(7)
+ #define MACCR_PMM_BITS 8
+ #define MACCR_PMM_MASK (0x1F00)
+ #define MACCR_PMM_RESET BIT(8)
+ #define MACCR_PMM_WAIT (2 << 8)
+ #define MACCR_PMM_READY (3 << 8)
+ #define MACCR_PMM_D1 (4 << 8)
+ #define MACCR_PMM_D2 (5 << 8)
+ #define MACCR_PMM_WAKE (7 << 8)
+ #define MACCR_PMM_D1_WAKE (8 << 8)
+ #define MACCR_PMM_D2_WAKE (9 << 8)
+ #define MACCR_PMM_SLEEP (10 << 8)
+ #define MACCR_PMM_PHY_RESET (11 << 8)
+ #define MACCR_PMM_SOFT_D1 (16 << 8)
+ #define MACCR_PMM_SOFT_D2 (17 << 8)
+#define P0_TFBFCR (0x10)
+ #define TFBFCR_SCHE_FREE_PAGE 0xE07F
+ #define TFBFCR_FREE_PAGE_BITS 0x07
+ #define TFBFCR_FREE_PAGE_LATCH BIT(6)
+ #define TFBFCR_SET_FREE_PAGE(x) (((x) & 0x3F) << TFBFCR_FREE_PAGE_BITS)
+ #define TFBFCR_TX_PAGE_SET BIT(13)
+ #define TFBFCR_MANU_ENTX BIT(15)
+ #define TX_FREEBUF_MASK 0x003F
+ #define TX_DPTSTART 0x4000
+
+#define P0_TSNR (0x12)
+ #define TXNR_TXB_ERR BIT(5)
+ #define TXNR_TXB_IDLE BIT(6)
+ #define TSNR_PKT_CNT(x) (((x) & 0x3F) << 8)
+ #define TXNR_TXB_REINIT BIT(14)
+ #define TSNR_TXB_START BIT(15)
+#define P0_RTDPR (0x14)
+#define P0_RXBCR1 (0x16)
+ #define RXBCR1_RXB_DISCARD BIT(14)
+ #define RXBCR1_RXB_START BIT(15)
+#define P0_RXBCR2 (0x18)
+ #define RXBCR2_PKT_MASK (0xFF)
+ #define RXBCR2_RXPC_MASK (0x7F)
+ #define RXBCR2_RXB_READY BIT(13)
+ #define RXBCR2_RXB_IDLE BIT(14)
+ #define RXBCR2_RXB_REINIT BIT(15)
+#define P0_RTWCR (0x1A)
+ #define RTWCR_RXWC_MASK (0x3FFF)
+ #define RTWCR_RX_LATCH BIT(15)
+#define P0_RCPHR (0x1C)
+
+ /* Definition of PAGE1 */
+#define P1_RPPER (0x22)
+ #define RPPER_RXEN BIT(0)
+#define P1_MRCR (0x28)
+#define P1_MDR (0x2A)
+#define P1_RMPR (0x2C)
+#define P1_TMPR (0x2E)
+#define P1_RXBSPCR (0x30)
+ #define RXBSPCR_STUF_WORD_CNT(x) (((x) & 0x7000) >> 12)
+ #define RXBSPCR_STUF_ENABLE BIT(15)
+#define P1_MCR (0x32)
+ #define MCR_SBP BIT(8)
+ #define MCR_SM BIT(9)
+ #define MCR_CRCENLAN BIT(11)
+ #define MCR_STP BIT(12)
+ /* Definition of PAGE2 */
+#define P2_CIR (0x42)
+#define P2_PCR (0x44)
+ #define PCR_POLL_EN BIT(0)
+ #define PCR_POLL_FLOWCTRL BIT(1)
+ #define PCR_POLL_BMCR BIT(2)
+ #define PCR_PHYID(x) ((x) << 8)
+#define P2_PHYSR (0x46)
+#define P2_MDIODR (0x48)
+#define P2_MDIOCR (0x4A)
+ #define MDIOCR_RADDR(x) ((x) & 0x1F)
+ #define MDIOCR_FADDR(x) (((x) & 0x1F) << 8)
+ #define MDIOCR_VALID BIT(13)
+ #define MDIOCR_READ BIT(14)
+ #define MDIOCR_WRITE BIT(15)
+#define P2_LCR0 (0x4C)
+ #define LCR_LED0_EN BIT(0)
+ #define LCR_LED0_100MODE BIT(1)
+ #define LCR_LED0_DUPLEX BIT(2)
+ #define LCR_LED0_LINK BIT(3)
+ #define LCR_LED0_ACT BIT(4)
+ #define LCR_LED0_COL BIT(5)
+ #define LCR_LED0_10MODE BIT(6)
+ #define LCR_LED0_DUPCOL BIT(7)
+ #define LCR_LED1_EN BIT(8)
+ #define LCR_LED1_100MODE BIT(9)
+ #define LCR_LED1_DUPLEX BIT(10)
+ #define LCR_LED1_LINK BIT(11)
+ #define LCR_LED1_ACT BIT(12)
+ #define LCR_LED1_COL BIT(13)
+ #define LCR_LED1_10MODE BIT(14)
+ #define LCR_LED1_DUPCOL BIT(15)
+#define P2_LCR1 (0x4E)
+ #define LCR_LED2_MASK (0xFF00)
+ #define LCR_LED2_EN BIT(0)
+ #define LCR_LED2_100MODE BIT(1)
+ #define LCR_LED2_DUPLEX BIT(2)
+ #define LCR_LED2_LINK BIT(3)
+ #define LCR_LED2_ACT BIT(4)
+ #define LCR_LED2_COL BIT(5)
+ #define LCR_LED2_10MODE BIT(6)
+ #define LCR_LED2_DUPCOL BIT(7)
+#define P2_IPGCR (0x50)
+#define P2_CRIR (0x52)
+#define P2_FLHWCR (0x54)
+#define P2_RXCR (0x56)
+ #define RXCR_PRO BIT(0)
+ #define RXCR_AMALL BIT(1)
+ #define RXCR_SEP BIT(2)
+ #define RXCR_AB BIT(3)
+ #define RXCR_AM BIT(4)
+ #define RXCR_AP BIT(5)
+ #define RXCR_ARP BIT(6)
+#define P2_JLCR (0x58)
+#define P2_MPLR (0x5C)
+
+ /* Definition of PAGE3 */
+#define P3_MACASR0 (0x62)
+ #define P3_MACASR(x) (P3_MACASR0 + 2 * (x))
+ #define MACASR_LOWBYTE_MASK 0x00FF
+ #define MACASR_HIGH_BITS 0x08
+#define P3_MACASR1 (0x64)
+#define P3_MACASR2 (0x66)
+#define P3_MFAR01 (0x68)
+#define P3_MFAR_BASE (0x68)
+ #define P3_MFAR(x) (P3_MFAR_BASE + 2 * (x))
+
+#define P3_MFAR23 (0x6A)
+#define P3_MFAR45 (0x6C)
+#define P3_MFAR67 (0x6E)
+#define P3_VID0FR (0x70)
+#define P3_VID1FR (0x72)
+#define P3_EECSR (0x74)
+#define P3_EEDR (0x76)
+#define P3_EECR (0x78)
+ #define EECR_ADDR_MASK (0x00FF)
+ #define EECR_READ_ACT BIT(8)
+ #define EECR_WRITE_ACT BIT(9)
+ #define EECR_WRITE_DISABLE BIT(10)
+ #define EECR_WRITE_ENABLE BIT(11)
+ #define EECR_EE_READY BIT(13)
+ #define EECR_RELOAD BIT(14)
+ #define EECR_RESET BIT(15)
+#define P3_TPCR (0x7A)
+ #define TPCR_PATT_MASK (0xFF)
+ #define TPCR_RAND_PKT_EN BIT(14)
+ #define TPCR_FIXED_PKT_EN BIT(15)
+#define P3_TPLR (0x7C)
+ /* Definition of PAGE4 */
+#define P4_SPICR (0x8A)
+ #define SPICR_RCEN BIT(0)
+ #define SPICR_QCEN BIT(1)
+ #define SPICR_RBRE BIT(3)
+ #define SPICR_PMM BIT(4)
+ #define SPICR_LOOPBACK BIT(8)
+ #define SPICR_CORE_RES_CLR BIT(10)
+ #define SPICR_SPI_RES_CLR BIT(11)
+#define P4_SPIISMR (0x8C)
+
+#define P4_COERCR0 (0x92)
+ #define COERCR0_RXIPCE BIT(0)
+ #define COERCR0_RXIPVE BIT(1)
+ #define COERCR0_RXV6PE BIT(2)
+ #define COERCR0_RXTCPE BIT(3)
+ #define COERCR0_RXUDPE BIT(4)
+ #define COERCR0_RXICMP BIT(5)
+ #define COERCR0_RXIGMP BIT(6)
+ #define COERCR0_RXICV6 BIT(7)
+
+ #define COERCR0_RXTCPV6 BIT(8)
+ #define COERCR0_RXUDPV6 BIT(9)
+ #define COERCR0_RXICMV6 BIT(10)
+ #define COERCR0_RXIGMV6 BIT(11)
+ #define COERCR0_RXICV6V6 BIT(12)
+
+ #define COERCR0_DEFAULT (COERCR0_RXIPCE | COERCR0_RXV6PE | \
+ COERCR0_RXTCPE | COERCR0_RXUDPE | \
+ COERCR0_RXTCPV6 | COERCR0_RXUDPV6)
+#define P4_COERCR1 (0x94)
+ #define COERCR1_IPCEDP BIT(0)
+ #define COERCR1_IPVEDP BIT(1)
+ #define COERCR1_V6VEDP BIT(2)
+ #define COERCR1_TCPEDP BIT(3)
+ #define COERCR1_UDPEDP BIT(4)
+ #define COERCR1_ICMPDP BIT(5)
+ #define COERCR1_IGMPDP BIT(6)
+ #define COERCR1_ICV6DP BIT(7)
+ #define COERCR1_RX64TE BIT(8)
+ #define COERCR1_RXPPPE BIT(9)
+ #define COERCR1_TCP6DP BIT(10)
+ #define COERCR1_UDP6DP BIT(11)
+ #define COERCR1_IC6DP BIT(12)
+ #define COERCR1_IG6DP BIT(13)
+ #define COERCR1_ICV66DP BIT(14)
+ #define COERCR1_RPCE BIT(15)
+
+ #define COERCR1_DEFAULT (COERCR1_RXPPPE)
+
+#define P4_COETCR0 (0x96)
+ #define COETCR0_TXIP BIT(0)
+ #define COETCR0_TXTCP BIT(1)
+ #define COETCR0_TXUDP BIT(2)
+ #define COETCR0_TXICMP BIT(3)
+ #define COETCR0_TXIGMP BIT(4)
+ #define COETCR0_TXICV6 BIT(5)
+ #define COETCR0_TXTCPV6 BIT(8)
+ #define COETCR0_TXUDPV6 BIT(9)
+ #define COETCR0_TXICMV6 BIT(10)
+ #define COETCR0_TXIGMV6 BIT(11)
+ #define COETCR0_TXICV6V6 BIT(12)
+
+ #define COETCR0_DEFAULT (COETCR0_TXIP | COETCR0_TXTCP | \
+ COETCR0_TXUDP | COETCR0_TXTCPV6 | \
+ COETCR0_TXUDPV6)
+#define P4_COETCR1 (0x98)
+ #define COETCR1_TX64TE BIT(0)
+ #define COETCR1_TXPPPE BIT(1)
+
+#define P4_COECEDR (0x9A)
+#define P4_L2CECR (0x9C)
+
+ /* Definition of PAGE5 */
+#define P5_WFTR (0xA2)
+ #define WFTR_2MS (0x01)
+ #define WFTR_4MS (0x02)
+ #define WFTR_8MS (0x03)
+ #define WFTR_16MS (0x04)
+ #define WFTR_32MS (0x05)
+ #define WFTR_64MS (0x06)
+ #define WFTR_128MS (0x07)
+ #define WFTR_256MS (0x08)
+ #define WFTR_512MS (0x09)
+ #define WFTR_1024MS (0x0A)
+ #define WFTR_2048MS (0x0B)
+ #define WFTR_4096MS (0x0C)
+ #define WFTR_8192MS (0x0D)
+ #define WFTR_16384MS (0x0E)
+ #define WFTR_32768MS (0x0F)
+#define P5_WFCCR (0xA4)
+#define P5_WFCR03 (0xA6)
+ #define WFCR03_F0_EN BIT(0)
+ #define WFCR03_F1_EN BIT(4)
+ #define WFCR03_F2_EN BIT(8)
+ #define WFCR03_F3_EN BIT(12)
+#define P5_WFCR47 (0xA8)
+ #define WFCR47_F4_EN BIT(0)
+ #define WFCR47_F5_EN BIT(4)
+ #define WFCR47_F6_EN BIT(8)
+ #define WFCR47_F7_EN BIT(12)
+#define P5_WF0BMR0 (0xAA)
+#define P5_WF0BMR1 (0xAC)
+#define P5_WF0CR (0xAE)
+#define P5_WF0OBR (0xB0)
+#define P5_WF1BMR0 (0xB2)
+#define P5_WF1BMR1 (0xB4)
+#define P5_WF1CR (0xB6)
+#define P5_WF1OBR (0xB8)
+#define P5_WF2BMR0 (0xBA)
+#define P5_WF2BMR1 (0xBC)
+
+ /* Definition of PAGE6 */
+#define P6_WF2CR (0xC2)
+#define P6_WF2OBR (0xC4)
+#define P6_WF3BMR0 (0xC6)
+#define P6_WF3BMR1 (0xC8)
+#define P6_WF3CR (0xCA)
+#define P6_WF3OBR (0xCC)
+#define P6_WF4BMR0 (0xCE)
+#define P6_WF4BMR1 (0xD0)
+#define P6_WF4CR (0xD2)
+#define P6_WF4OBR (0xD4)
+#define P6_WF5BMR0 (0xD6)
+#define P6_WF5BMR1 (0xD8)
+#define P6_WF5CR (0xDA)
+#define P6_WF5OBR (0xDC)
+
+/* Definition of PAGE7 */
+#define P7_WF6BMR0 (0xE2)
+#define P7_WF6BMR1 (0xE4)
+#define P7_WF6CR (0xE6)
+#define P7_WF6OBR (0xE8)
+#define P7_WF7BMR0 (0xEA)
+#define P7_WF7BMR1 (0xEC)
+#define P7_WF7CR (0xEE)
+#define P7_WF7OBR (0xF0)
+#define P7_WFR01 (0xF2)
+#define P7_WFR23 (0xF4)
+#define P7_WFR45 (0xF6)
+#define P7_WFR67 (0xF8)
+#define P7_WFPC0 (0xFA)
+#define P7_WFPC1 (0xFC)
+
+/* Tx headers structure */
+struct tx_sop_header {
+ /* bit 15-11: flags, bit 10-0: packet length */
+ u16 flags_len;
+ /* bit 15-11: sequence number, bit 11-0: packet length bar */
+ u16 seq_lenbar;
+};
+
+struct tx_segment_header {
+ /* bit 15-14: flags, bit 13-11: segment number */
+ /* bit 10-0: segment length */
+ u16 flags_seqnum_seglen;
+ /* bit 15-14: end offset, bit 13-11: start offset */
+ /* bit 10-0: segment length bar */
+ u16 eo_so_seglenbar;
+};
+
+struct tx_eop_header {
+ /* bit 15-11: sequence number, bit 10-0: packet length */
+ u16 seq_len;
+ /* bit 15-11: sequence number bar, bit 10-0: packet length bar */
+ u16 seqbar_lenbar;
+};
+
+struct tx_pkt_info {
+ struct tx_sop_header sop;
+ struct tx_segment_header seg;
+ struct tx_eop_header eop;
+ u16 pkt_len;
+ u16 seq_num;
+};
+
+/* Rx headers structure */
+struct rx_header {
+ u16 flags_len;
+ u16 seq_lenbar;
+ u16 flags;
+};
+
+extern unsigned long ax88796c_no_regs_mask[];
+
+#endif /* #ifndef _AX88796C_MAIN_H */
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c
new file mode 100644
index 000000000000..94df4f96d2be
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_spi.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include <linux/string.h>
+#include <linux/spi/spi.h>
+
+#include "ax88796c_spi.h"
+
+const u8 ax88796c_rx_cmd_buf[5] = {AX_SPICMD_READ_RXQ, 0xFF, 0xFF, 0xFF, 0xFF};
+const u8 ax88796c_tx_cmd_buf[4] = {AX_SPICMD_WRITE_TXQ, 0xFF, 0xFF, 0xFF};
+
+/* driver bus management functions */
+int axspi_wakeup(struct axspi_data *ax_spi)
+{
+ int ret;
+
+ ax_spi->cmd_buf[0] = AX_SPICMD_EXIT_PWD; /* OP */
+ ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 1);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ return ret;
+}
+
+int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status)
+{
+ int ret;
+
+ /* OP */
+ ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS;
+ ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ else
+ le16_to_cpus(&status->isr);
+
+ return ret;
+}
+
+int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len)
+{
+ struct spi_transfer *xfer = ax_spi->spi_rx_xfer;
+ int ret;
+
+ memcpy(ax_spi->cmd_buf, ax88796c_rx_cmd_buf, 5);
+
+ xfer->tx_buf = ax_spi->cmd_buf;
+ xfer->rx_buf = NULL;
+ xfer->len = ax_spi->comp ? 2 : 5;
+ xfer->bits_per_word = 8;
+ spi_message_add_tail(xfer, &ax_spi->rx_msg);
+
+ xfer++;
+ xfer->rx_buf = data;
+ xfer->tx_buf = NULL;
+ xfer->len = len;
+ xfer->bits_per_word = 8;
+ spi_message_add_tail(xfer, &ax_spi->rx_msg);
+ ret = spi_sync(ax_spi->spi, &ax_spi->rx_msg);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+
+ return ret;
+}
+
+int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len)
+{
+ return spi_write(ax_spi->spi, data, len);
+}
+
+u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg)
+{
+ int ret;
+ int len = ax_spi->comp ? 3 : 4;
+
+ ax_spi->cmd_buf[0] = 0x03; /* OP code read register */
+ ax_spi->cmd_buf[1] = reg; /* register address */
+ ax_spi->cmd_buf[2] = 0xFF; /* dumy cycle */
+ ax_spi->cmd_buf[3] = 0xFF; /* dumy cycle */
+ ret = spi_write_then_read(ax_spi->spi,
+ ax_spi->cmd_buf, len,
+ ax_spi->rx_buf, 2);
+ if (ret) {
+ dev_err(&ax_spi->spi->dev,
+ "%s() failed: ret = %d\n", __func__, ret);
+ return 0xFFFF;
+ }
+
+ le16_to_cpus((u16 *)ax_spi->rx_buf);
+
+ return *(u16 *)ax_spi->rx_buf;
+}
+
+int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value)
+{
+ int ret;
+
+ memset(ax_spi->cmd_buf, 0, sizeof(ax_spi->cmd_buf));
+ ax_spi->cmd_buf[0] = AX_SPICMD_WRITE_REG; /* OP code read register */
+ ax_spi->cmd_buf[1] = reg; /* register address */
+ ax_spi->cmd_buf[2] = value;
+ ax_spi->cmd_buf[3] = value >> 8;
+
+ ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 4);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ return ret;
+}
+
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.h b/drivers/net/ethernet/asix/ax88796c_spi.h
new file mode 100644
index 000000000000..5bcf91f603fb
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_spi.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_SPI_H
+#define _AX88796C_SPI_H
+
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/* Definition of SPI command */
+#define AX_SPICMD_WRITE_TXQ 0x02
+#define AX_SPICMD_READ_REG 0x03
+#define AX_SPICMD_READ_STATUS 0x05
+#define AX_SPICMD_READ_RXQ 0x0B
+#define AX_SPICMD_BIDIR_WRQ 0xB2
+#define AX_SPICMD_WRITE_REG 0xD8
+#define AX_SPICMD_EXIT_PWD 0xAB
+
+extern const u8 ax88796c_rx_cmd_buf[];
+extern const u8 ax88796c_tx_cmd_buf[];
+
+struct axspi_data {
+ struct spi_device *spi;
+ struct spi_message rx_msg;
+ struct spi_transfer spi_rx_xfer[2];
+ u8 cmd_buf[6];
+ u8 rx_buf[6];
+ u8 comp;
+};
+
+struct spi_status {
+ u16 isr;
+ u8 status;
+# define AX_STATUS_READY 0x80
+};
+
+int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len);
+int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len);
+u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg);
+int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value);
+int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status);
+int axspi_wakeup(struct axspi_data *ax_spi);
+
+static inline u16 AX_READ(struct axspi_data *ax_spi, u8 offset)
+{
+ return axspi_read_reg(ax_spi, offset);
+}
+
+static inline int AX_WRITE(struct axspi_data *ax_spi, u16 value, u8 offset)
+{
+ return axspi_write_reg(ax_spi, offset, value);
+}
+
+static inline int AX_READ_STATUS(struct axspi_data *ax_spi,
+ struct spi_status *status)
+{
+ return axspi_read_status(ax_spi, status);
+}
+
+static inline int AX_WAKEUP(struct axspi_data *ax_spi)
+{
+ return axspi_wakeup(ax_spi);
+}
+#endif
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 02ae98aabf91..ada3a9f0c8c8 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1968,10 +1968,10 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc->ctrl = 0;
ag->stop_desc->next = (u32)ag->stop_desc_dma;
- err = of_get_mac_address(np, ndev->dev_addr);
+ err = of_get_ethdev_address(np, ndev);
if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
}
err = of_get_phy_mode(np, &ag->phy_if_mode);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4ea157efca86..4ad3fc72e74e 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -607,7 +607,7 @@ static int alx_set_mac_address(struct net_device *netdev, void *data)
if (netdev->addr_assign_type & NET_ADDR_RANDOM)
netdev->addr_assign_type ^= NET_ADDR_RANDOM;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
alx_set_macaddr(hw, hw->mac_addr);
@@ -1832,7 +1832,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
- memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, hw->mac_addr);
memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
hw->mdio.prtad = 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 20c032ab631b..da595242bc13 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -482,7 +482,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
@@ -1847,7 +1847,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
buffer_info->skb = NULL;
buffer_info->length = 0;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
- netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
+ netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed");
break;
}
buffer_info->dma = mapping;
@@ -2767,7 +2767,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
netdev->addr_assign_type = NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (netif_msg_probe(adapter))
dev_dbg(&pdev->dev, "mac address : %pM\n",
adapter->hw.mac_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 2e22483a9040..56e5f440e666 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -374,7 +374,7 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl1e_hw_set_mac_addr(&adapter->hw);
@@ -2390,7 +2390,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 68f6c0bbd945..b4c9e805e981 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3027,7 +3027,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* mark random mac */
netdev->addr_assign_type = NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index b69298ddb647..bbc4d7b08a49 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -931,7 +931,7 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl2_set_mac_addr(&adapter->hw);
@@ -1405,7 +1405,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
atl2_read_mac_addr(&adapter->hw);
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
goto err_eeprom;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 0941d07d0833..e8cfbf4ff1b5 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -69,7 +69,7 @@ static int atlx_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atlx_set_mac_addr(&adapter->hw);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index fa784953c601..969591bbc066 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -218,7 +218,8 @@ static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index
data[1] = (val >> 0) & 0xFF;
}
-static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
+static inline void __b44_cam_write(struct b44 *bp,
+ const unsigned char *data, int index)
{
u32 val;
@@ -1200,7 +1201,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
&bp->rx_ring_dma, gfp);
if (!bp->rx_ring) {
- /* Allocation may have failed due to pci_alloc_consistent
+ /* Allocation may have failed due to dma_alloc_coherent
insisting on use of GFP_DMA, which is more restrictive
than necessary... */
struct dma_desc *rx_ring;
@@ -1383,7 +1384,7 @@ static int b44_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&bp->lock);
@@ -1507,7 +1508,8 @@ static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
}
}
-static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
+static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
+ int offset)
{
int magicsync = 6;
int k, j, len = offset;
@@ -2171,7 +2173,7 @@ static int b44_get_invariants(struct b44 *bp)
* valid PHY address. */
bp->phy_addr &= 0x1F;
- memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, addr);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
pr_err("Invalid MAC address found in EEPROM\n");
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 02a569500234..7cc5213c575a 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -170,7 +170,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
goto err_free_buf_descs;
}
- ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
+ ring->slots = kcalloc(ring->length, sizeof(*ring->slots), GFP_KERNEL);
if (!ring->slots)
goto err_free_buf_descs;
@@ -715,7 +715,7 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
return err;
SET_NETDEV_DEV(netdev, &pdev->dev);
- err = of_get_mac_address(dev->of_node, netdev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, netdev);
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d56886300ecf..a568994a03a6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -670,7 +670,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
u32 val;
priv = netdev_priv(dev);
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
/* use perfect match register 0 to store my mac address */
val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
@@ -1762,7 +1762,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
pd = dev_get_platdata(&pdev->dev);
if (pd) {
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac_addr);
priv->has_phy = pd->has_phy;
priv->phy_id = pd->phy_id;
priv->has_phy_interrupt = pd->has_phy_interrupt;
@@ -2665,7 +2665,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
pd = dev_get_platdata(&pdev->dev);
if (pd) {
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac_addr);
memcpy(priv->used_ports, pd->used_ports,
sizeof(pd->used_ports));
priv->num_ports = pd->num_ports;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 7fa1b695400d..40933bf5a710 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1818,7 +1818,7 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
}
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
- unsigned char *addr)
+ const unsigned char *addr)
{
u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
addr[3];
@@ -1850,7 +1850,7 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/* interface is disabled, changes to MAC will be reflected on next
* open call
@@ -2555,7 +2555,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
}
/* Initialize netdevice members */
- ret = of_get_mac_address(dn, dev->dev_addr);
+ ret = of_get_ethdev_address(dn, dev);
if (ret) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 6ce80cbcb48e..086739e4f40a 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -10,6 +10,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
+#include <linux/of_mdio.h>
#include "bgmac.h"
static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
@@ -211,6 +212,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
{
struct bcma_device *core = bgmac->bcma.core;
struct mii_bus *mii_bus;
+ struct device_node *np;
int err;
mii_bus = mdiobus_alloc();
@@ -229,7 +231,9 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
mii_bus->parent = &core->dev;
mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
- err = mdiobus_register(mii_bus);
+ np = of_get_child_by_name(core->dev.of_node, "mdio");
+
+ err = of_mdiobus_register(mii_bus, np);
if (err) {
dev_err(&core->dev, "Registration of mii bus failed\n");
goto err_free_bus;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 85fa0ab7201c..e6f48786949c 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "bgmac.h"
@@ -86,17 +87,28 @@ static int bcma_phy_connect(struct bgmac *bgmac)
struct phy_device *phy_dev;
char bus_id[MII_BUS_ID_SIZE + 3];
+ /* DT info should be the most accurate */
+ phy_dev = of_phy_get_and_connect(bgmac->net_dev, bgmac->dev->of_node,
+ bgmac_adjust_link);
+ if (phy_dev)
+ return 0;
+
/* Connect to the PHY */
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
- bgmac->phyaddr);
- phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connection failed\n");
- return PTR_ERR(phy_dev);
+ if (bgmac->mii_bus && bgmac->phyaddr != BGMAC_PHY_NOREGS) {
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
+ bgmac->phyaddr);
+ phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ dev_err(bgmac->dev, "PHY connection failed\n");
+ return PTR_ERR(phy_dev);
+ }
+
+ return 0;
}
- return 0;
+ /* Assume a fixed link to the switch port */
+ return bgmac_phy_connect_direct(bgmac);
}
static const struct bcma_device_id bgmac_bcma_tbl[] = {
@@ -128,7 +140,9 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
- err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
+ err = of_get_ethdev_address(bgmac->dev->of_node, bgmac->net_dev);
+ if (err == -EPROBE_DEFER)
+ return err;
/* If no MAC address assigned via device tree, check SPROM */
if (err) {
@@ -148,7 +162,7 @@ static int bgmac_probe(struct bcma_device *core)
err = -ENOTSUPP;
goto err;
}
- ether_addr_copy(bgmac->net_dev->dev_addr, mac);
+ eth_hw_addr_set(bgmac->net_dev, mac);
}
/* On BCM4706 we need common core to access PHY */
@@ -295,10 +309,7 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = bcma_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32;
- if (bgmac->mii_bus)
- bgmac->phy_connect = bcma_phy_connect;
- else
- bgmac->phy_connect = bgmac_phy_connect_direct;
+ bgmac->phy_connect = bcma_phy_connect;
err = bgmac_enet_probe(bgmac);
if (err)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 4ab5bf64d353..c6412c523637 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -191,7 +191,10 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->dev = &pdev->dev;
bgmac->dma_dev = &pdev->dev;
- ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);
+ ret = of_get_ethdev_address(np, bgmac->net_dev);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
if (ret)
dev_warn(&pdev->dev,
"MAC address not present in device tree\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index fe4d99abd548..7b525c65bacb 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -768,7 +768,7 @@ static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set,
udelay(2);
}
-static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+static void bgmac_write_mac_address(struct bgmac *bgmac, const u8 *addr)
{
u32 tmp;
@@ -1241,7 +1241,7 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
if (ret < 0)
return ret;
- ether_addr_copy(net_dev->dev_addr, sa->sa_data);
+ eth_hw_addr_set(net_dev, sa->sa_data);
bgmac_write_mac_address(bgmac, net_dev->dev_addr);
eth_commit_mac_addr_change(net_dev, addr);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8c83973adca5..babc955ba64e 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2704,7 +2704,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
}
static void
-bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
+bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
{
u32 val;
@@ -7910,7 +7910,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
@@ -8574,7 +8574,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_kdump_kernel())
bnx2_wait_dma_complete(bp);
- memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, bp->mac_addr);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e789430f407c..2b06d78baa08 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1994,7 +1994,7 @@ int bnx2x_idle_chk(struct bnx2x *bp);
* operation has been successfully scheduled and a negative - if a requested
* operations has failed.
*/
-int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index b5d954cb409a..e8e8c2d593c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4336,7 +4336,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
rc = bnx2x_set_eth_mac(bp, true);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ae87296ae1ff..aec666e97683 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8417,7 +8417,7 @@ alloc_mem_err:
* Init service functions
*/
-int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags)
{
@@ -9146,7 +9146,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u8 *mac_addr = bp->dev->dev_addr;
+ const u8 *mac_addr = bp->dev->dev_addr;
struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -11790,7 +11790,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
* as the SAN mac was copied from the primary MAC.
*/
if (IS_MF_FCOE_AFEX(bp))
- memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, fip_mac);
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11823,9 +11823,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
+ u8 addr[ETH_ALEN] = {};
/* Zero primary MAC configuration */
- eth_zero_addr(bp->dev->dev_addr);
+ eth_hw_addr_set(bp->dev, addr);
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -11834,8 +11835,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
- (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
+ }
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -11843,7 +11846,8 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
/* in SF read MACs from port configuration */
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -12291,7 +12295,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
if (rc)
return rc;
} else {
- eth_zero_addr(bp->dev->dev_addr);
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(bp->dev, zero_addr);
}
bnx2x_set_modes_bitmap(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fbf735fca31..74a8931ce1d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -3058,7 +3058,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
!ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
/* update new mac to net device */
- memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bulletin->mac);
}
if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 966d5722c5e2..8c2cf5519787 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -508,7 +508,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading);
-int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid,
+ bool set);
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index ea0e9394f898..c9129b9ba446 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -384,9 +384,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
- memcpy(bp->dev->dev_addr,
- bp->acquire_resp.resc.current_mac_addr,
- ETH_ALEN);
+ eth_hw_addr_set(bp->dev,
+ bp->acquire_resp.resc.current_mac_addr);
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
@@ -722,7 +721,7 @@ out:
}
/* request pf to add a mac for the vf */
-int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set)
{
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
@@ -767,7 +766,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
"vfpf SET MAC failed. Check bulletin board for new posts\n");
/* copy mac from bulletin to device */
- memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bulletin.mac);
/* check if bulletin board was updated */
if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 037767b370d5..66263aa0d96b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -391,7 +391,7 @@ static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
* netif_tx_queue_stopped().
*/
smp_mb();
- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+ if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
netif_tx_wake_queue(txq);
return false;
}
@@ -764,7 +764,7 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
netif_tx_wake_queue(txq);
}
@@ -2416,7 +2416,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
+ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
if (budget)
@@ -3640,7 +3640,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
u16 i;
bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
- MAX_SKB_FRAGS + 1);
+ BNXT_MIN_TX_DESC_CNT);
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
@@ -4869,7 +4869,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
- u8 *mac_addr)
+ const u8 *mac_addr)
{
struct hwrm_cfa_l2_filter_alloc_output *resp;
struct hwrm_cfa_l2_filter_alloc_input *req;
@@ -6366,7 +6366,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (rx_rings != bp->rx_nr_rings) {
netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
rx_rings, bp->rx_nr_rings);
- if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
+ if (netif_is_rxfh_configured(bp->dev) &&
(bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
bnxt_get_max_rss_ring(bp) >= rx_rings)) {
@@ -12369,7 +12369,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (rc)
return rc;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -13103,7 +13103,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
int rc = 0;
if (BNXT_PF(bp)) {
- memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -13111,7 +13111,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
if (is_valid_ether_addr(vf->mac_addr)) {
/* overwrite netdev dev_addr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, vf->mac_addr);
/* Older PF driver or firmware may not approve this
* correctly.
*/
@@ -13370,7 +13370,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnxt_inv_fw_health_reg(bp);
- bnxt_dl_register(bp);
+ rc = bnxt_dl_register(bp);
+ if (rc)
+ goto init_err_dl;
rc = register_netdev(dev);
if (rc)
@@ -13390,6 +13392,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
init_err_cleanup:
bnxt_dl_unregister(bp);
+init_err_dl:
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ec046e7a2484..19fe6478e9b4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -629,6 +629,11 @@ struct nqe_cn {
#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index bf7d3c17049b..951c0c00cc95 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -134,7 +134,7 @@ void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!bp->dl || !health)
+ if (!health)
return;
if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
@@ -188,7 +188,7 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!bp->dl || !health)
+ if (!health)
return;
if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
@@ -736,9 +736,6 @@ static const struct devlink_param bnxt_dl_params[] = {
NULL),
};
-static const struct devlink_param bnxt_dl_port_params[] = {
-};
-
static int bnxt_dl_params_register(struct bnxt *bp)
{
int rc;
@@ -748,22 +745,10 @@ static int bnxt_dl_params_register(struct bnxt *bp)
rc = devlink_params_register(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
- if (rc) {
+ if (rc)
netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
- return rc;
- }
- rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
- if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed\n");
- devlink_params_unregister(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- return rc;
- }
- devlink_params_publish(bp->dl);
-
- return 0;
+ return rc;
}
static void bnxt_dl_params_unregister(struct bnxt *bp)
@@ -773,14 +758,13 @@ static void bnxt_dl_params_unregister(struct bnxt *bp)
devlink_params_unregister(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
- devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
}
int bnxt_dl_register(struct bnxt *bp)
{
const struct devlink_ops *devlink_ops;
struct devlink_port_attrs attrs = {};
+ struct bnxt_dl *bp_dl;
struct devlink *dl;
int rc;
@@ -795,16 +779,17 @@ int bnxt_dl_register(struct bnxt *bp)
return -ENOMEM;
}
- bnxt_link_bp_to_dl(bp, dl);
+ bp->dl = dl;
+ bp_dl = devlink_priv(dl);
+ bp_dl->bp = bp;
/* Add switchdev eswitch mode setting, if SRIOV supported */
if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
bp->hwrm_spec_code > 0x10803)
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- devlink_register(dl);
if (!BNXT_PF(bp))
- return 0;
+ goto out;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = bp->pf.port_id;
@@ -821,12 +806,13 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
+out:
+ devlink_register(dl);
return 0;
err_dl_port_unreg:
devlink_port_unregister(&bp->dl_port);
err_dl_free:
- bnxt_link_bp_to_dl(bp, NULL);
devlink_free(dl);
return rc;
}
@@ -835,13 +821,10 @@ void bnxt_dl_unregister(struct bnxt *bp)
{
struct devlink *dl = bp->dl;
- if (!dl)
- return;
-
+ devlink_unregister(dl);
if (BNXT_PF(bp)) {
bnxt_dl_params_unregister(bp);
devlink_port_unregister(&bp->dl_port);
}
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index d889f240da2b..406dc655a5fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -20,19 +20,6 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
-/* To clear devlink pointer from bp, pass NULL dl */
-static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
-{
- bp->dl = dl;
-
- /* add a back pointer in dl to bp */
- if (dl) {
- struct bnxt_dl *bp_dl = devlink_priv(dl);
-
- bp_dl->bp = bp;
- }
-}
-
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b056e3c29bbd..fbb56b1f70fd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -798,7 +798,7 @@ static int bnxt_set_ringparam(struct net_device *dev,
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
- (ering->tx_pending <= MAX_SKB_FRAGS))
+ (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
if (netif_running(dev))
@@ -909,7 +909,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
- (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
+ netif_is_rxfh_configured(dev)) {
netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 70d8ca3039dc..1d177fed44a6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1151,7 +1151,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
}
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
struct hwrm_func_vf_cfg_input *req;
int rc = 0;
@@ -1217,7 +1217,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
/* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr))
- memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
update_vf_mac_exit:
hwrm_req_drop(bp, req);
if (inform_pf)
@@ -1246,7 +1246,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
{
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 995535e4c11b..9a4bacba477b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -41,5 +41,5 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
-int bnxt_approve_mac(struct bnxt *, u8 *, bool);
+int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 9401936b74fa..8eb28e088582 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -475,7 +475,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->features |= pf_dev->features;
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
dev->perm_addr);
- ether_addr_copy(dev->dev_addr, dev->perm_addr);
+ eth_hw_addr_set(dev, dev->perm_addr);
/* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
dev->max_mtu = max_mtu;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 23c7595d2a1d..5da9c00b43b1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -935,6 +935,48 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
return 0;
}
+static void bcmgenet_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv;
+ u32 umac_cmd;
+
+ priv = netdev_priv(dev);
+
+ epause->autoneg = priv->autoneg_pause;
+
+ if (netif_carrier_ok(dev)) {
+ /* report active state when link is up */
+ umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
+ epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
+ epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
+ } else {
+ /* otherwise report stored settings */
+ epause->tx_pause = priv->tx_pause;
+ epause->rx_pause = priv->rx_pause;
+ }
+}
+
+static int bcmgenet_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ if (!phy_validate_pause(dev->phydev, epause))
+ return -EINVAL;
+
+ priv->autoneg_pause = !!epause->autoneg;
+ priv->tx_pause = !!epause->tx_pause;
+ priv->rx_pause = !!epause->rx_pause;
+
+ bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
+
+ return 0;
+}
+
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
BCMGENET_STAT_NETDEV = -1,
@@ -1587,6 +1629,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = bcmgenet_get_rxnfc,
.set_rxnfc = bcmgenet_set_rxnfc,
+ .get_pauseparam = bcmgenet_get_pauseparam,
+ .set_pauseparam = bcmgenet_set_pauseparam,
};
/* Power down the unimac, based on mode. */
@@ -3222,7 +3266,7 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
}
static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
- unsigned char *addr)
+ const unsigned char *addr)
{
bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
@@ -3364,6 +3408,8 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq1;
}
+ bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
+
bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev);
@@ -3408,11 +3454,6 @@ static void bcmgenet_netif_stop(struct net_device *dev)
*/
cancel_work_sync(&priv->bcmgenet_irq_work);
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-
/* tx reclaim */
bcmgenet_tx_reclaim_all(dev);
bcmgenet_fini_dma(priv);
@@ -3519,7 +3560,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
#define MAX_MDF_FILTER 17
static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
- unsigned char *addr,
+ const unsigned char *addr,
int *i)
{
bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
@@ -3592,7 +3633,7 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -3950,6 +3991,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
+ /* Set default pause parameters */
+ priv->autoneg_pause = 1;
+ priv->tx_pause = 1;
+ priv->rx_pause = 1;
+
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
dev->watchdog_timeo = 2 * HZ;
@@ -4036,11 +4082,15 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
if (pd && !IS_ERR_OR_NULL(pd->mac_address))
- ether_addr_copy(dev->dev_addr, pd->mac_address);
+ eth_hw_addr_set(dev, pd->mac_address);
else
- if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
- if (has_acpi_companion(&pdev->dev))
- bcmgenet_get_hw_addr(priv, dev->dev_addr);
+ if (device_get_ethdev_address(&pdev->dev, dev))
+ if (has_acpi_companion(&pdev->dev)) {
+ u8 addr[ETH_ALEN];
+
+ bcmgenet_get_hw_addr(priv, addr);
+ eth_hw_addr_set(dev, addr);
+ }
if (!is_valid_ether_addr(dev->dev_addr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0a6d91b0f0aa..1cc2838e52c6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -594,6 +594,9 @@ struct bcmgenet_priv {
/* other misc variables */
struct bcmgenet_hw_params *hw_params;
+ unsigned autoneg_pause:1;
+ unsigned tx_pause:1;
+ unsigned rx_pause:1;
/* MDIO bus variables */
wait_queue_head_t wq;
@@ -606,10 +609,6 @@ struct bcmgenet_priv {
bool clk_eee_enabled;
/* PHY device variables */
- int old_link;
- int old_speed;
- int old_duplex;
- int old_pause;
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
@@ -690,6 +689,7 @@ int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index ff1efd52ce16..ad56f54eda0a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -25,92 +25,80 @@
#include "bcmgenet.h"
-/* setup netdev link state when PHY link status change and
- * update UMAC and RGMII block when link up
- */
-void bcmgenet_mii_setup(struct net_device *dev)
+static void bcmgenet_mac_config(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
- bool status_changed = false;
- if (priv->old_link != phydev->link) {
- status_changed = true;
- priv->old_link = phydev->link;
- }
+ /* speed */
+ if (phydev->speed == SPEED_1000)
+ cmd_bits = CMD_SPEED_1000;
+ else if (phydev->speed == SPEED_100)
+ cmd_bits = CMD_SPEED_100;
+ else
+ cmd_bits = CMD_SPEED_10;
+ cmd_bits <<= CMD_SPEED_SHIFT;
- if (phydev->link) {
- /* check speed/duplex/pause changes */
- if (priv->old_speed != phydev->speed) {
- status_changed = true;
- priv->old_speed = phydev->speed;
- }
+ /* duplex */
+ if (phydev->duplex != DUPLEX_FULL) {
+ cmd_bits |= CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+ } else {
+ /* pause capability defaults to Symmetric */
+ if (priv->autoneg_pause) {
+ bool tx_pause = 0, rx_pause = 0;
- if (priv->old_duplex != phydev->duplex) {
- status_changed = true;
- priv->old_duplex = phydev->duplex;
- }
+ if (phydev->autoneg)
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
- if (priv->old_pause != phydev->pause) {
- status_changed = true;
- priv->old_pause = phydev->pause;
+ if (!tx_pause)
+ cmd_bits |= CMD_TX_PAUSE_IGNORE;
+ if (!rx_pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE;
}
- /* done if nothing has changed */
- if (!status_changed)
- return;
-
- /* speed */
- if (phydev->speed == SPEED_1000)
- cmd_bits = CMD_SPEED_1000;
- else if (phydev->speed == SPEED_100)
- cmd_bits = CMD_SPEED_100;
- else
- cmd_bits = CMD_SPEED_10;
- cmd_bits <<= CMD_SPEED_SHIFT;
-
- /* duplex */
- if (phydev->duplex != DUPLEX_FULL)
- cmd_bits |= CMD_HD_EN;
-
- /* pause capability */
- if (!phydev->pause)
- cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
-
- /*
- * Program UMAC and RGMII block based on established
- * link speed, duplex, and pause. The speed set in
- * umac->cmd tell RGMII block which clock to use for
- * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
- * Receive clock is provided by the PHY.
- */
- reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg &= ~OOB_DISABLE;
- reg |= RGMII_LINK;
- bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ /* Manual override */
+ if (!priv->rx_pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE;
+ if (!priv->tx_pause)
+ cmd_bits |= CMD_TX_PAUSE_IGNORE;
+ }
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
- CMD_HD_EN |
- CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
- reg |= cmd_bits;
- if (reg & CMD_SW_RESET) {
- reg &= ~CMD_SW_RESET;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- udelay(2);
- reg |= CMD_TX_EN | CMD_RX_EN;
- }
+ /* Program UMAC and RGMII block based on established
+ * link speed, duplex, and pause. The speed set in
+ * umac->cmd tell RGMII block which clock to use for
+ * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
+ * Receive clock is provided by the PHY.
+ */
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ if (reg & CMD_SW_RESET) {
+ reg &= ~CMD_SW_RESET;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- } else {
- /* done if nothing has changed */
- if (!status_changed)
- return;
-
- /* needed for MoCA fixed PHY to reflect correct link status */
- netif_carrier_off(dev);
+ udelay(2);
+ reg |= CMD_TX_EN | CMD_RX_EN;
}
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+}
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+void bcmgenet_mii_setup(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (phydev->link)
+ bcmgenet_mac_config(dev);
phy_print_status(phydev);
}
@@ -130,6 +118,21 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
return 0;
}
+void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising, rx);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising,
+ rx | tx);
+ phy_start_aneg(phydev);
+
+ mutex_lock(&phydev->lock);
+ if (phydev->link)
+ bcmgenet_mac_config(dev);
+ mutex_unlock(&phydev->lock);
+}
+
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -297,12 +300,6 @@ int bcmgenet_mii_probe(struct net_device *dev)
if (priv->internal_phy)
phy_flags = priv->gphy_rev;
- /* Initialize link state variables that bcmgenet_mii_setup() uses */
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-
/* This is an ugly quirk but we have not been correctly interpreting
* the phy_interface values and we have done that across different
* drivers, so at least we are consistent in our mistakes.
@@ -386,8 +383,6 @@ int bcmgenet_mii_probe(struct net_device *dev)
return ret;
}
- linkmode_copy(phydev->advertising, phydev->supported);
-
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
* that prevents the signaling of link UP interrupts when
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5e0e0e70d801..b1328c5524b5 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3942,7 +3942,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
}
/* tp->lock is held. */
-static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
+ int index)
{
u32 addr_high, addr_low;
@@ -5746,7 +5747,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
udelay(40);
- current_link_up = false;
tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
@@ -9366,7 +9366,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev))
return 0;
@@ -10273,8 +10273,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
- if (tg3_flag(tp, TSO_CAPABLE) &&
- tg3_asic_rev(tp) == ASIC_REV_5705) {
+ if (tg3_flag(tp, TSO_CAPABLE)) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -11213,12 +11212,8 @@ static void tg3_reset_task(struct work_struct *work)
}
tg3_netif_start(tp);
-
tg3_full_unlock(tp);
-
- if (!err)
- tg3_phy_start(tp);
-
+ tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
rtnl_unlock();
@@ -16915,19 +16910,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
return err;
}
-static int tg3_get_device_address(struct tg3 *tp)
+static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
{
- struct net_device *dev = tp->dev;
u32 hi, lo, mac_offset;
int addr_ok = 0;
int err;
- if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
+ if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
return 0;
if (tg3_flag(tp, IS_SSB_CORE)) {
- err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
- if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+ err = ssb_gige_get_macaddr(tp->pdev, addr);
+ if (!err && is_valid_ether_addr(addr))
return 0;
}
@@ -16951,41 +16945,41 @@ static int tg3_get_device_address(struct tg3 *tp)
/* First try to get it from MAC address mailbox. */
tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
if ((hi >> 16) == 0x484b) {
- dev->dev_addr[0] = (hi >> 8) & 0xff;
- dev->dev_addr[1] = (hi >> 0) & 0xff;
+ addr[0] = (hi >> 8) & 0xff;
+ addr[1] = (hi >> 0) & 0xff;
tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
- dev->dev_addr[2] = (lo >> 24) & 0xff;
- dev->dev_addr[3] = (lo >> 16) & 0xff;
- dev->dev_addr[4] = (lo >> 8) & 0xff;
- dev->dev_addr[5] = (lo >> 0) & 0xff;
+ addr[2] = (lo >> 24) & 0xff;
+ addr[3] = (lo >> 16) & 0xff;
+ addr[4] = (lo >> 8) & 0xff;
+ addr[5] = (lo >> 0) & 0xff;
/* Some old bootcode may report a 0 MAC address in SRAM */
- addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+ addr_ok = is_valid_ether_addr(addr);
}
if (!addr_ok) {
/* Next, try NVRAM. */
if (!tg3_flag(tp, NO_NVRAM) &&
!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
!tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
- memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
- memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
+ memcpy(&addr[0], ((char *)&hi) + 2, 2);
+ memcpy(&addr[2], (char *)&lo, sizeof(lo));
}
/* Finally just fetch it out of the MAC control regs. */
else {
hi = tr32(MAC_ADDR_0_HIGH);
lo = tr32(MAC_ADDR_0_LOW);
- dev->dev_addr[5] = lo & 0xff;
- dev->dev_addr[4] = (lo >> 8) & 0xff;
- dev->dev_addr[3] = (lo >> 16) & 0xff;
- dev->dev_addr[2] = (lo >> 24) & 0xff;
- dev->dev_addr[1] = hi & 0xff;
- dev->dev_addr[0] = (hi >> 8) & 0xff;
+ addr[5] = lo & 0xff;
+ addr[4] = (lo >> 8) & 0xff;
+ addr[3] = (lo >> 16) & 0xff;
+ addr[2] = (lo >> 24) & 0xff;
+ addr[1] = hi & 0xff;
+ addr[0] = (hi >> 8) & 0xff;
}
}
- if (!is_valid_ether_addr(&dev->dev_addr[0]))
+ if (!is_valid_ether_addr(addr))
return -EINVAL;
return 0;
}
@@ -17561,6 +17555,7 @@ static int tg3_init_one(struct pci_dev *pdev,
char str[40];
u64 dma_mask, persist_dma_mask;
netdev_features_t features = 0;
+ u8 addr[ETH_ALEN] __aligned(2);
err = pci_enable_device(pdev);
if (err) {
@@ -17783,12 +17778,13 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->rx_pending = 63;
}
- err = tg3_get_device_address(tp);
+ err = tg3_get_device_address(tp, addr);
if (err) {
dev_err(&pdev->dev,
"Could not obtain valid ethernet address, aborting\n");
goto err_out_apeunmap;
}
+ eth_hw_addr_set(dev, addr);
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ba47777d9cff..bbdc829c3524 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -875,7 +875,7 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
if (is_zero_ether_addr(netdev->dev_addr))
- ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
+ eth_hw_addr_set(netdev, bnad->perm_addr);
}
/* Control Path Handlers */
@@ -3249,7 +3249,7 @@ bnad_set_mac_address(struct net_device *netdev, void *addr)
err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
if (!err)
- ether_addr_copy(netdev->dev_addr, sa->sa_data);
+ eth_hw_addr_set(netdev, sa->sa_data);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3515,7 +3515,6 @@ static void
bnad_uninit(struct bnad *bnad)
{
if (bnad->work_q) {
- flush_workqueue(bnad->work_q);
destroy_workqueue(bnad->work_q);
bnad->work_q = NULL;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index e2730b3e1a57..029dea2873e3 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -313,7 +313,7 @@ static void macb_get_hwaddr(struct macb *bp)
addr[5] = (top >> 8) & 0xff;
if (is_valid_ether_addr(addr)) {
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ eth_hw_addr_set(bp->dev, addr);
return;
}
}
@@ -547,13 +547,8 @@ static void macb_validate(struct phylink_config *config,
if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
(state->interface == PHY_INTERFACE_MODE_NA ||
state->interface == PHY_INTERFACE_MODE_10GBASER)) {
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseER_Full);
+ phylink_set_10g_modes(mask);
phylink_set(mask, 10000baseKR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseT_Full);
if (state->interface != PHY_INTERFACE_MODE_NA)
goto out;
}
@@ -4779,7 +4774,7 @@ static int macb_probe(struct platform_device *pdev)
if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
bp->rx_intr_mask |= MACB_BIT(RXUBR);
- err = of_get_mac_address(np, bp->dev->dev_addr);
+ err = of_get_ethdev_address(np, bp->dev);
if (err == -EPROBE_DEFER)
goto err_out_free_netdev;
else if (err)
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index c2e1f163bb14..095c5a2144a7 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -38,7 +38,8 @@ static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
return NULL;
}
-static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
unsigned long flags;
@@ -46,7 +47,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
u32 secl, sech;
spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+ ptp_read_system_prets(sts);
first = gem_readl(bp, TN);
+ ptp_read_system_postts(sts);
secl = gem_readl(bp, TSL);
sech = gem_readl(bp, TSH);
second = gem_readl(bp, TN);
@@ -56,7 +59,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
/* if so, use later read & re-read seconds
* (assume all done within 1s)
*/
+ ptp_read_system_prets(sts);
ts->tv_nsec = gem_readl(bp, TN);
+ ptp_read_system_postts(sts);
secl = gem_readl(bp, TSL);
sech = gem_readl(bp, TSH);
} else {
@@ -161,7 +166,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
if (delta > TSU_NSEC_MAX_VAL) {
- gem_tsu_get_time(&bp->ptp_clock_info, &now);
+ gem_tsu_get_time(&bp->ptp_clock_info, &now, NULL);
now = timespec64_add(now, then);
gem_tsu_set_time(&bp->ptp_clock_info,
@@ -192,7 +197,7 @@ static const struct ptp_clock_info gem_ptp_caps_template = {
.pps = 1,
.adjfine = gem_ptp_adjfine,
.adjtime = gem_ptp_adjtime,
- .gettime64 = gem_tsu_get_time,
+ .gettimex64 = gem_tsu_get_time,
.settime64 = gem_tsu_set_time,
.enable = gem_ptp_enable,
};
@@ -251,7 +256,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
* The timestamp only contains lower few bits of seconds,
* so add value from 1588 timer
*/
- gem_tsu_get_time(&bp->ptp_clock_info, &tsu);
+ gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL);
/* If the top bit is set in the timestamp,
* but not in 1588 timer, it has rolled over,
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index b6a066404f4b..457cb7121000 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -607,7 +607,7 @@ static inline void xgmac_mac_disable(void __iomem *ioaddr)
writel(value, ioaddr + XGMAC_CONTROL);
}
-static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void xgmac_set_mac_addr(void __iomem *ioaddr, const unsigned char *addr,
int num)
{
u32 data;
@@ -1479,7 +1479,7 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
@@ -1693,6 +1693,7 @@ static int xgmac_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct xgmac_priv *priv = NULL;
+ u8 addr[ETH_ALEN];
u32 uid;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1785,7 +1786,8 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->max_mtu = XGMAC_MAX_MTU;
/* Get the MAC address */
- xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+ xgmac_get_mac_addr(priv->base, addr, 0);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr))
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 2a0d64e5797c..73cb03266549 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -411,7 +411,7 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
if (!ether_addr_equal(netdev->dev_addr, mac)) {
macaddr_changed = true;
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
}
@@ -490,7 +490,6 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev)
wq = &lio->rxq_status_wq[q_no];
if (wq->wq) {
cancel_delayed_work_sync(&wq->wk.work);
- flush_workqueue(wq->wq);
destroy_workqueue(wq->wq);
wq->wq = NULL;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index a34b3bb2dd4f..1daf63e437ce 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1279,6 +1279,14 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
struct lio *lio;
dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ device_lock(&oct->pci_dev->dev);
+ if (oct->devlink) {
+ devlink_unregister(oct->devlink);
+ devlink_free(oct->devlink);
+ oct->devlink = NULL;
+ }
+ device_unlock(&oct->pci_dev->dev);
+
if (!oct->ifcount) {
dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
return 1;
@@ -1300,12 +1308,6 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
for (i = 0; i < oct->ifcount; i++)
liquidio_destroy_nic_device(oct, i);
- if (oct->devlink) {
- devlink_unregister(oct->devlink);
- devlink_free(oct->devlink);
- oct->devlink = NULL;
- }
-
dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
return 0;
}
@@ -2022,7 +2024,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return -EIO;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
return 0;
@@ -3632,7 +3634,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Copy MAC Address to OS network device structure */
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
/* By default all interfaces on a single Octeon uses the same
* tx and rx queues
@@ -3749,10 +3751,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
}
}
+ device_lock(&octeon_dev->pci_dev->dev);
devlink = devlink_alloc(&liquidio_devlink_ops,
sizeof(struct lio_devlink_priv),
&octeon_dev->pci_dev->dev);
if (!devlink) {
+ device_unlock(&octeon_dev->pci_dev->dev);
dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
goto setup_nic_dev_free;
}
@@ -3760,9 +3764,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio_devlink = devlink_priv(devlink);
lio_devlink->oct = octeon_dev;
- devlink_register(devlink);
octeon_dev->devlink = devlink;
octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ devlink_register(devlink);
+ device_unlock(&octeon_dev->pci_dev->dev);
return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index f6396ac64006..c607756b731f 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1168,7 +1168,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return -EPERM;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
return 0;
@@ -2148,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
/* Copy MAC Address to OS network device structure */
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
if (liquidio_setup_io_queues(octeon_dev, i,
lio->linfo.num_txpciq,
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 30463a6d1f8c..4e39d712e121 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1501,7 +1501,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
- result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ result = of_get_ethdev_address(pdev->dev.of_node, netdev);
if (result)
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index b3d7d1afced7..f2f1ce81fd9c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)
dev_err(&nic->pdev->dev,
"Request for #%d msix vectors failed, returned %d\n",
nic->num_vec, ret);
- return 1;
+ return ret;
}
/* Register mailbox interrupt handler */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 2b87565781a0..bb45d5df2856 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -221,8 +221,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
nic->node = mbx.nic_cfg.node_id;
if (!nic->set_mac_pending)
- ether_addr_copy(nic->netdev->dev_addr,
- mbx.nic_cfg.mac_addr);
+ eth_hw_addr_set(nic->netdev, mbx.nic_cfg.mac_addr);
nic->sqs_mode = mbx.nic_cfg.sqs_mode;
nic->loopback_supported = mbx.nic_cfg.loopback_supported;
nic->link_up = false;
@@ -1224,7 +1223,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
if (ret < 0) {
netdev_err(nic->netdev,
"Req for #%d msix vectors failed\n", nic->num_vec);
- return 1;
+ return ret;
}
sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
@@ -1243,7 +1242,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
if (!nicvf_check_pf_ready(nic)) {
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
nicvf_unregister_interrupts(nic);
- return 1;
+ return -EIO;
}
return 0;
@@ -1612,7 +1611,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
if (nic->pdev->msix_enabled) {
if (nicvf_hw_set_mac_addr(nic, netdev))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index db66d4beb28a..574a32f23f96 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1387,10 +1387,10 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
u8 *dst)
{
u8 mac[ETH_ALEN];
- u8 *addr;
+ int ret;
- addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN);
- if (!addr) {
+ ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac);
+ if (ret) {
dev_err(dev, "MAC address invalid: %pM\n", mac);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d246eee4b6d5..609820e214a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -853,7 +853,7 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
if (!mac->ops->macaddress_set)
return -EOPNOTSUPP;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
mac->ops->macaddress_set(mac, dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index dfa77491a910..5913eaf442b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -117,7 +117,7 @@ struct cmac_ops {
const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
- int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
+ int (*macaddress_set)(struct cmac *, const u8 mac_addr[6]);
};
typedef struct _cmac_instance cmac_instance;
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index c27908e66f5e..0bb37e4680c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -496,7 +496,7 @@ static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
return 0;
}
-static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
+static int pm3393_macaddress_set(struct cmac *cmac, const u8 ma[6])
{
u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 310add28fcf5..007c591b8bf5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -1140,7 +1140,7 @@ int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi)
adapter->port[i].dev->name);
goto error;
}
- memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i].dev, hw_addr);
init_link_config(&adapter->port[i].link_config, bi);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
index 873c1c7b4ca0..2ad3efb550c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
@@ -379,7 +379,7 @@ static int mac_intr_clear(struct cmac *mac)
}
/* Expect MAC address to be in network byte order. */
-static int mac_set_address(struct cmac* mac, u8 addr[6])
+static int mac_set_address(struct cmac* mac, const u8 addr[6])
{
u32 val;
int port = mac->instance->index;
@@ -591,7 +591,7 @@ static void port_stats_update(struct cmac *mac)
} hw_stats[] = {
#define HW_STAT(reg, stat_name) \
- { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+ { reg, offsetof(struct cmac_statistics, stat_name) / sizeof(u64) }
/* Rx stats */
HW_STAT(RxUnicast, RxUnicastFramesOK),
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index b706f2fbe4f4..a309016f7f8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -710,7 +710,7 @@ int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
-int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 38e47703f9ab..9cf9e33664e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2586,7 +2586,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
if (offload_running(adapter))
write_smt_entry(adapter, pi->port_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 7ff31d1026fb..53feac8da503 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
@@ -3758,8 +3759,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
- memcpy(adapter->port[i]->dev_addr, hw_addr,
- ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i], hw_addr);
init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
index 3af19a550372..1bdc6cad1e49 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
@@ -240,7 +240,7 @@ static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
}
/* Set one of the station's unicast MAC addresses. */
-int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6])
{
if (idx >= mac->nucast)
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ecea3cdd30b3..5657ac8cfca0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1545,7 +1545,7 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
u8 hw_addr[])
{
- ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+ eth_hw_addr_set(adapter->port[port_idx], hw_addr);
ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d9cda4ab303..dde1cf51d0ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3468,7 +3468,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (ret < 0)
return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 64144b6171d7..e7b4e3ed056c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -9706,7 +9706,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
if (ret)
return ret;
- memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(adap->port[i], addr);
j++;
}
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index f55105a4112f..03cb1410d6fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -40,6 +40,7 @@
#ifndef __CXGB4VF_ADAPTER_H__
#define __CXGB4VF_ADAPTER_H__
+#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -507,7 +508,7 @@ static inline const char *port_name(struct adapter *adapter, int pidx)
static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
u8 hw_addr[])
{
- memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
+ eth_hw_addr_set(adapter->port[pidx], hw_addr);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 4920a80a0460..64479c464b4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1218,7 +1218,7 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
if (ret < 0)
return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index bcad69c48074..4af5561cbfc5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
- percpu_counter_inc((child)->sk_prot->orphan_count);
+ INC_ORPHAN_COUNT(child);
chtls_release_resources(child);
chtls_conn_done(child);
} else {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index b1161bdeda4d..f61ca657601c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -95,7 +95,7 @@ struct deferred_skb_cb {
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
-#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
+#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index d0c4c8b7a15a..4a97aa8e1387 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1227,7 +1227,7 @@ static int set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n",
dev->name, dev->dev_addr);
@@ -1314,6 +1314,7 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
int tmp;
unsigned rev_type = 0;
int eeprom_buff[CHKSUM_LEN];
+ u8 addr[ETH_ALEN];
int retval;
/* Initialize the device structure. */
@@ -1387,9 +1388,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
for (i = 0; i < ETH_ALEN / 2; i++) {
unsigned int Addr;
Addr = readreg(dev, PP_IA + i * 2);
- dev->dev_addr[i * 2] = Addr & 0xFF;
- dev->dev_addr[i * 2 + 1] = Addr >> 8;
+ addr[i * 2] = Addr & 0xFF;
+ addr[i * 2 + 1] = Addr >> 8;
}
+ eth_hw_addr_set(dev, addr);
/* Load the Adapter Configuration.
* Note: Barring any more specific information from some
@@ -1464,9 +1466,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
/* eeprom_buff has 32-bit ints, so we can't just memcpy it */
/* store the initial memory base address */
for (i = 0; i < ETH_ALEN / 2; i++) {
- dev->dev_addr[i * 2] = eeprom_buff[i];
- dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8;
+ addr[i * 2] = eeprom_buff[i];
+ addr[i * 2 + 1] = eeprom_buff[i] >> 8;
}
+ eth_hw_addr_set(dev, addr);
cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n",
dev->name, lp->adapter_cnf);
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 072fac5f5d24..21ba6e893072 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -746,7 +746,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
if (dev == NULL)
return NULL;
- memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, data->dev_addr);
dev->ethtool_ops = &ep93xx_ethtool_ops;
dev->netdev_ops = &ep93xx_netdev_ops;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 6324e80960c3..84251b85fc93 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -541,7 +541,7 @@ static int set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, saddr->sa_data);
netdev_info(dev, "Setting MAC address to %pM\n", dev->dev_addr);
/* set the Ethernet address */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 12ffc14fbecd..6ded4d9fa32a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -139,7 +139,7 @@ static void enic_get_drvinfo(struct net_device *netdev,
int err;
err = enic_dev_fw_info(enic, &fw_info);
- /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+ /* return only when dma_alloc_coherent fails in vnic_dev_fw_info
* For other failures, like devcmd failure, we return previously
* recorded info.
*/
@@ -270,7 +270,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
int err;
err = enic_dev_stats_dump(enic, &vstats);
- /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index d0a8f7106958..aacf141986d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -882,7 +882,7 @@ static void enic_get_stats(struct net_device *netdev,
int err;
err = enic_dev_stats_dump(enic, &stats);
- /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
@@ -985,7 +985,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
return -EADDRNOTAVAIL;
}
- memcpy(netdev->dev_addr, addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr);
return 0;
}
@@ -1098,6 +1098,7 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
static int enic_set_vf_port(struct net_device *netdev, int vf,
struct nlattr *port[])
{
+ static const u8 zero_addr[ETH_ALEN] = {};
struct enic *enic = netdev_priv(netdev);
struct enic_port_profile prev_pp;
struct enic_port_profile *pp;
@@ -1162,7 +1163,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
} else {
memset(pp, 0, sizeof(*pp));
if (vf == PORT_SELF_VF)
- eth_zero_addr(netdev->dev_addr);
+ eth_hw_addr_set(netdev, zero_addr);
}
} else {
/* Set flag to indicate that the port assoc/disassoc
@@ -1174,7 +1175,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (pp->request == PORT_REQUEST_DISASSOCIATE) {
eth_zero_addr(pp->mac_addr);
if (vf == PORT_SELF_VF)
- eth_zero_addr(netdev->dev_addr);
+ eth_hw_addr_set(netdev, zero_addr);
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index e6a83198c3dd..80f46dbd5117 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -73,9 +73,9 @@ static int enic_set_port_profile(struct enic *enic, int vf)
struct vic_provinfo *vp;
const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
const __be16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
+ const u8 *client_mac;
char uuid_str[38];
char client_mac_str[18];
- u8 *client_mac;
int err;
ENIC_PP_BY_INDEX(enic, vf, pp, &err);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 6e745ca4c433..941f175fb911 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1889,7 +1889,7 @@ static int gmac_set_mac_address(struct net_device *netdev, void *addr)
{
struct sockaddr *sa = addr;
- memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, sa->sa_data);
gmac_write_mac_address(netdev);
return 0;
@@ -2467,13 +2467,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
DEFAULT_NAPI_WEIGHT);
if (is_valid_ether_addr((void *)port->mac_addr)) {
- memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)port->mac_addr);
} else {
dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
port->mac_addr[0], port->mac_addr[1],
port->mac_addr[2]);
dev_info(dev, "using a random ethernet address\n");
- eth_random_addr(netdev->dev_addr);
+ eth_hw_addr_random(netdev);
}
gmac_write_mac_address(netdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index e842de6f6635..0985ab216566 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1425,6 +1425,7 @@ dm9000_probe(struct platform_device *pdev)
enum of_gpio_flags flags;
struct regulator *power;
bool inv_mac_addr = false;
+ u8 addr[ETH_ALEN];
power = devm_regulator_get(dev, "vcc");
if (IS_ERR(power)) {
@@ -1666,11 +1667,12 @@ dm9000_probe(struct platform_device *pdev)
/* try reading the node address from the attached EEPROM */
for (i = 0; i < 6; i += 2)
- dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
+ dm9000_read_eeprom(db, i / 2, addr + i);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
mac_src = "platform data";
- memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, pdata->dev_addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -1678,7 +1680,8 @@ dm9000_probe(struct platform_device *pdev)
mac_src = "chip";
for (i = 0; i < 6; i++)
- ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
+ addr[i] = ior(db, i + DM9000_PAR);
+ eth_hw_addr_set(ndev, pdata->dev_addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 117c26fa5909..d51b3d24a0c8 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -666,8 +666,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
struct de_private *de = netdev_priv(dev);
u16 hash_table[32];
struct netdev_hw_addr *ha;
+ const u16 *eaddrs;
int i;
- u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
__set_bit_le(255, hash_table); /* Broadcast entry */
@@ -685,7 +685,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
setup_frm = &de->setup_frame[13*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -695,7 +695,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
struct netdev_hw_addr *ha;
- u16 *eaddrs;
+ const u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
@@ -710,7 +710,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
setup_frm = &de->setup_frame[15*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1713,6 +1713,7 @@ static const struct ethtool_ops de_ethtool_ops = {
static void de21040_get_mac_address(struct de_private *de)
{
+ u8 addr[ETH_ALEN];
unsigned i;
dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
@@ -1724,12 +1725,13 @@ static void de21040_get_mac_address(struct de_private *de)
value = dr32(ROMCmd);
rmb();
} while (value < 0 && --boguscnt > 0);
- de->dev->dev_addr[i] = value;
+ addr[i] = value;
udelay(1);
if (boguscnt <= 0)
pr_warn("timeout reading 21040 MAC address byte %u\n",
i);
}
+ eth_hw_addr_set(de->dev, addr);
}
static void de21040_get_media_info(struct de_private *de)
@@ -1821,8 +1823,7 @@ static void de21041_get_srom_info(struct de_private *de)
#endif
/* store MAC address */
- for (i = 0; i < 6; i ++)
- de->dev->dev_addr[i] = ee_data[i + sa_offset];
+ eth_hw_addr_set(de->dev, &ee_data[sa_offset]);
/* get offset of controller 0 info leaf. ignore 2nd byte. */
ofs = ee_data[SROMC0InfoLeaf];
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 36ab4cbf2ad0..13121c4dcfe6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -4031,6 +4031,7 @@ get_hw_addr(struct net_device *dev)
int broken, i, k, tmp, status = 0;
u_short j,chksum;
struct de4x5_private *lp = netdev_priv(dev);
+ u8 addr[ETH_ALEN];
broken = de4x5_bad_srom(lp);
@@ -4042,28 +4043,30 @@ get_hw_addr(struct net_device *dev)
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
k += (u_char) tmp;
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
while ((tmp = inl(DE4X5_APROM)) < 0);
k += (u_short) (tmp << 8);
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
} else if (!broken) {
- dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
} else if ((broken == SMC) || (broken == ACCTON)) {
- dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
- dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ addr[i] = *((u_char *)&lp->srom + i); i++;
+ addr[i] = *((u_char *)&lp->srom + i); i++;
}
} else {
k += (u_char) (tmp = inb(EISA_APROM));
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
}
if (k > 0xffff) k-=0xffff;
}
if (k == 0xffff) k=0;
+ eth_hw_addr_set(dev, addr);
+
if (lp->bus == PCI) {
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4095,8 +4098,9 @@ get_hw_addr(struct net_device *dev)
int x = dev->dev_addr[i];
x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
- dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
+ addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
}
+ eth_hw_addr_set(dev, addr);
}
#endif /* CONFIG_PPC_PMAC */
@@ -4158,12 +4162,9 @@ test_bad_enet(struct net_device *dev, int status)
if ((tmp == 0) || (tmp == 0x5fa)) {
if ((lp->chipset == last.chipset) &&
(lp->bus_num == last.bus) && (lp->bus_num > 0)) {
- for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
- for (i=ETH_ALEN-1; i>2; --i) {
- dev->dev_addr[i] += 1;
- if (dev->dev_addr[i] != 0) break;
- }
- for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ eth_addr_inc(last.addr);
+ eth_hw_addr_set(dev, last.addr);
+
if (!an_exception(lp)) {
dev->irq = last.irq;
}
@@ -5391,9 +5392,7 @@ de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data
if (netif_queue_stopped(dev))
return -EBUSY;
netif_stop_queue(dev);
- for (i=0; i<ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp.addr[i];
- }
+ eth_hw_addr_set(dev, tmp.addr);
build_setup_frame(dev, PHYS_ADDR_ONLY);
/* Set up the descriptor and give ownership to the card */
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index c763b692e164..83f1727d1423 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -476,8 +476,7 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Set Node address */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = db->srom[20 + i];
+ eth_hw_addr_set(dev, &db->srom[20]);
err = register_netdev (dev);
if (err)
@@ -1436,9 +1435,9 @@ static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
static void dm9132_id_table(struct net_device *dev)
{
+ const u16 *addrptr = (const u16 *)dev->dev_addr;
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr + 0xc0;
- u16 *addrptr = (u16 *)dev->dev_addr;
struct netdev_hw_addr *ha;
u16 i, hash_table[4];
@@ -1477,7 +1476,7 @@ static void send_filter_frame(struct net_device *dev)
struct dmfe_board_info *db = netdev_priv(dev);
struct netdev_hw_addr *ha;
struct tx_desc *txptr;
- u16 * addrptr;
+ const u16 * addrptr;
u32 * suptr;
int i;
@@ -1487,7 +1486,7 @@ static void send_filter_frame(struct net_device *dev)
suptr = (u32 *) txptr->tx_buf_ptr;
/* Node address */
- addrptr = (u16 *) dev->dev_addr;
+ addrptr = (const u16 *) dev->dev_addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fcedd733bacb..79df5a72877b 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -339,7 +339,7 @@ static void tulip_up(struct net_device *dev)
}
} else {
/* This is set_rx_mode(), but without starting the transmitter. */
- u16 *eaddrs = (u16 *)dev->dev_addr;
+ const u16 *eaddrs = (const u16 *)dev->dev_addr;
u16 *setup_frm = &tp->setup_frame[15*6];
dma_addr_t mapping;
@@ -1001,8 +1001,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
struct tulip_private *tp = netdev_priv(dev);
u16 hash_table[32];
struct netdev_hw_addr *ha;
+ const u16 *eaddrs;
int i;
- u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
__set_bit_le(255, hash_table); /* Broadcast entry */
@@ -1019,7 +1019,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
setup_frm = &tp->setup_frame[13*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1029,7 +1029,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
struct netdev_hw_addr *ha;
- u16 *eaddrs;
+ const u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
@@ -1044,7 +1044,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
setup_frm = &tp->setup_frame[15*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1305,6 +1305,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
const char *chip_name = tulip_tbl[chip_idx].chip_name;
unsigned int eeprom_missing = 0;
+ u8 addr[ETH_ALEN] __aligned(2);
unsigned int force_csr0 = 0;
board_idx++;
@@ -1506,13 +1507,15 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
do {
value = ioread32(ioaddr + CSR9);
} while (value < 0 && --boguscnt > 0);
- put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
+ put_unaligned_le16(value, ((__le16 *)addr) + i);
sum += value & 0xffff;
}
+ eth_hw_addr_set(dev, addr);
} else if (chip_idx == COMET) {
/* No need to read the EEPROM. */
- put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
- put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
+ put_unaligned_le32(ioread32(ioaddr + 0xA4), addr);
+ put_unaligned_le16(ioread32(ioaddr + 0xA8), addr + 4);
+ eth_hw_addr_set(dev, addr);
for (i = 0; i < 6; i ++)
sum += dev->dev_addr[i];
} else {
@@ -1575,20 +1578,23 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
for (i = 0; i < 6; i ++) {
- dev->dev_addr[i] = ee_data[i + sa_offset];
+ addr[i] = ee_data[i + sa_offset];
sum += ee_data[i + sa_offset];
}
+ eth_hw_addr_set(dev, addr);
}
/* Lite-On boards have the address byte-swapped. */
if ((dev->dev_addr[0] == 0xA0 ||
dev->dev_addr[0] == 0xC0 ||
dev->dev_addr[0] == 0x02) &&
- dev->dev_addr[1] == 0x00)
+ dev->dev_addr[1] == 0x00) {
for (i = 0; i < 6; i+=2) {
- char tmp = dev->dev_addr[i];
- dev->dev_addr[i] = dev->dev_addr[i+1];
- dev->dev_addr[i+1] = tmp;
+ addr[i] = dev->dev_addr[i+1];
+ addr[i+1] = dev->dev_addr[i];
}
+ eth_hw_addr_set(dev, addr);
+ }
+
/* On the Zynx 315 Etherarray and other multiport boards only the
first Tulip has an EEPROM.
On Sparc systems the mac address is held in the OBP property
@@ -1599,17 +1605,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sum == 0 || sum == 6*0xff) {
#if defined(CONFIG_SPARC)
struct device_node *dp = pci_device_to_OF_node(pdev);
- const unsigned char *addr;
+ const unsigned char *addr2;
int len;
#endif
eeprom_missing = 1;
for (i = 0; i < 5; i++)
- dev->dev_addr[i] = last_phys_addr[i];
- dev->dev_addr[i] = last_phys_addr[i] + 1;
+ addr[i] = last_phys_addr[i];
+ addr[i] = last_phys_addr[i] + 1;
+ eth_hw_addr_set(dev, addr);
#if defined(CONFIG_SPARC)
- addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == ETH_ALEN)
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ addr2 = of_get_property(dp, "local-mac-address", &len);
+ if (addr2 && len == ETH_ALEN)
+ eth_hw_addr_set(dev, addr2);
#endif
#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
if (last_irq)
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index d67ef7d02d6b..77d9058431e3 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -272,6 +272,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
struct uli526x_board_info *db; /* board information structure */
struct net_device *dev;
void __iomem *ioaddr;
+ u8 addr[ETH_ALEN];
int i, err;
ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -379,7 +380,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
uw32(DCR13, 0x1b0); //Select ID Table access port
//Read MAC address from CR14
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ur32(DCR14);
+ addr[i] = ur32(DCR14);
//Read end
uw32(DCR13, 0); //Clear CR13
uw32(DCR0, 0); //Clear CR0
@@ -388,8 +389,10 @@ static int uli526x_init_one(struct pci_dev *pdev,
else /*Exist SROM*/
{
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = db->srom[20 + i];
+ addr[i] = db->srom[20 + i];
}
+ eth_hw_addr_set(dev, addr);
+
err = register_netdev (dev);
if (err)
goto err_out_unmap;
@@ -1343,7 +1346,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
void __iomem *ioaddr = db->ioaddr;
struct netdev_hw_addr *ha;
struct tx_desc *txptr;
- u16 * addrptr;
+ const u16 * addrptr;
u32 * suptr;
int i;
@@ -1353,7 +1356,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
suptr = (u32 *) txptr->tx_buf_ptr;
/* Node address */
- addrptr = (u16 *) dev->dev_addr;
+ addrptr = (const u16 *) dev->dev_addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 85b99099c6b9..86b1d23eba83 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -355,6 +355,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
int irq;
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ __le16 addr[ETH_ALEN / 2];
void __iomem *ioaddr;
i = pcim_enable_device(pdev);
@@ -382,7 +383,8 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_netdev;
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
+ addr[i] = cpu_to_le16(eeprom_read(ioaddr, i));
+ eth_hw_addr_set(dev, (u8 *)addr);
/* Reset the chip to erase previous misconfiguration.
No hold time required! */
@@ -877,7 +879,7 @@ static void init_registers(struct net_device *dev)
8000 16 longwords 0200 2 longwords 2000 32 longwords
C000 32 longwords 0400 4 longwords */
-#if defined (__i386__) && !defined(MODULE)
+#if defined (__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
/* When not a module we can work around broken '486 PCI boards. */
if (boot_cpu_data.x86 <= 4) {
i |= 0x4800;
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index a8de79355578..8759f9f76b62 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -1015,12 +1015,14 @@ static void read_mac_address(struct xircom_private *card)
xw32(CSR10, i + 3);
data_count = xr32(CSR9);
if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
+ u8 addr[ETH_ALEN];
int j;
for (j = 0; j < 6; j++) {
xw32(CSR10, i + j + 4);
- card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
+ addr[j] = xr32(CSR9) & 0xff;
}
+ eth_hw_addr_set(card->dev, addr);
break;
} else if (link == 0) {
break;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 202ecb132053..a301f7e6a440 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -349,8 +349,7 @@ parse_eeprom (struct net_device *dev)
}
/* Set MAC address */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = psrom->mac_addr[i];
+ eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
np->led_mode = psrom->led_mode;
@@ -567,7 +566,7 @@ static void rio_hw_init(struct net_device *dev)
*/
for (i = 0; i < 3; i++)
dw16(StationAddr0 + 2 * i,
- cpu_to_le16(((u16 *)dev->dev_addr)[i]));
+ cpu_to_le16(((const u16 *)dev->dev_addr)[i]));
set_multicast (dev);
if (np->coalesce) {
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index c36d186dffed..c710dc17be90 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -508,6 +508,7 @@ static int sundance_probe1(struct pci_dev *pdev,
int bar = 1;
#endif
int phy, phy_end, phy_idx = 0;
+ __le16 addr[ETH_ALEN / 2];
if (pci_enable_device(pdev))
return -EIO;
@@ -528,8 +529,9 @@ static int sundance_probe1(struct pci_dev *pdev,
goto err_out_res;
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] =
+ addr[i] =
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ eth_hw_addr_set(dev, (u8 *)addr);
np = netdev_priv(dev);
np->ndev = dev;
@@ -1611,7 +1613,7 @@ static int sundance_set_mac_addr(struct net_device *dev, void *data)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
__set_mac_addr(dev);
return 0;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 6c51cf991dad..92462ed87bc4 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -60,11 +60,11 @@ static void __dnet_set_hwaddr(struct dnet *bp)
{
u16 tmp;
- tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
+ tmp = be16_to_cpup((const __be16 *)bp->dev->dev_addr);
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
- tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
+ tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 2));
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
- tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
+ tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 4));
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
}
@@ -93,7 +93,7 @@ static void dnet_get_hwaddr(struct dnet *bp)
*((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
if (is_valid_ether_addr(addr))
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ eth_hw_addr_set(bp->dev, addr);
}
static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index b2d4fb3feb74..46e3a05e9582 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -479,6 +479,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct net_device *net_dev;
struct ec_bhf_priv *priv;
void __iomem *dma_io;
+ u8 addr[ETH_ALEN];
void __iomem *io;
int err = 0;
@@ -539,7 +540,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err < 0)
goto err_free_net_dev;
- memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+ memcpy_fromio(addr, priv->mii_io + MII_MAC_ADDR, ETH_ALEN);
+ eth_hw_addr_set(net_dev, addr);
err = register_netdev(net_dev);
if (err < 0)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 649c5c429bd7..528eb0f223b1 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1080,7 +1080,7 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index c30d6d6f0f3a..db1f3b908582 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2385,7 +2385,7 @@ int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
bool permanent, u32 if_handle, u32 pmac_id);
-int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, u32 if_id,
u32 *pmac_id, u32 domain);
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
u32 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 361c1c87c183..d51f24c9e1b8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -272,7 +272,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
iowrite32(val, adapter->db + DB_CQ_OFFSET);
}
-static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
+static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
{
int i;
@@ -369,7 +369,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Remember currently programmed MAC */
ether_addr_copy(adapter->dev_mac, addr->sa_data);
done:
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
@@ -4599,7 +4599,7 @@ static int be_mac_setup(struct be_adapter *adapter)
if (status)
return status;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(adapter->netdev, mac);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
/* Initial MAC for BE3 VFs is already programmed by PF */
@@ -4621,7 +4621,6 @@ static void be_destroy_err_recovery_workq(void)
if (!be_err_recovery_workq)
return;
- flush_workqueue(be_err_recovery_workq);
destroy_workqueue(be_err_recovery_workq);
be_err_recovery_workq = NULL;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0064ebdaf4b4..b1c8ffea6ad2 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -802,8 +802,8 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ethoc_do_set_mac_address(struct net_device *dev)
{
+ const unsigned char *mac = dev->dev_addr;
struct ethoc *priv = netdev_priv(dev);
- unsigned char *mac = dev->dev_addr;
ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
(mac[4] << 8) | (mac[5] << 0));
@@ -816,7 +816,7 @@ static int ethoc_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
ethoc_do_set_mac_address(dev);
return 0;
}
@@ -1144,18 +1144,22 @@ static int ethoc_probe(struct platform_device *pdev)
/* Allow the platform setup code to pass in a MAC address. */
if (pdata) {
- ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
+ eth_hw_addr_set(netdev, pdata->hwaddr);
priv->phy_id = pdata->phy_id;
} else {
- of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ of_get_ethdev_address(pdev->dev.of_node, netdev);
priv->phy_id = -1;
}
/* Check that the given MAC address is valid. If it isn't, read the
* current MAC from the controller.
*/
- if (!is_valid_ether_addr(netdev->dev_addr))
- ethoc_get_mac_address(netdev, netdev->dev_addr);
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ u8 addr[ETH_ALEN];
+
+ ethoc_get_mac_address(netdev, addr);
+ eth_hw_addr_set(netdev, addr);
+ }
/* Check the MAC again for validity, if it still isn't choose and
* program a random one.
diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig
index 38aa824efb25..9241b9b1c7a3 100644
--- a/drivers/net/ethernet/ezchip/Kconfig
+++ b/drivers/net/ethernet/ezchip/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_EZCHIP
config EZCHIP_NPS_MANAGEMENT_ENET
tristate "EZchip NPS management enet support"
- depends on OF_IRQ && OF_NET
+ depends on OF_IRQ
depends on HAS_IOMEM
help
Simple LAN device for debug or management purposes.
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f9a288a6ec8c..323340826dab 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -421,7 +421,7 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
res = eth_mac_addr(ndev, p);
if (!res) {
- ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(ndev, addr->sa_data);
nps_enet_set_hw_mac_address(ndev);
}
@@ -601,7 +601,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base);
/* set kernel MAC address to dev */
- err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, ndev);
if (err)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index ff76e401a014..97c5d70de76e 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -182,13 +182,10 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
u8 mac[ETH_ALEN];
unsigned int m;
unsigned int l;
- void *addr;
- addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
- if (addr) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ if (!device_get_ethdev_address(priv->dev, priv->netdev)) {
dev_info(priv->dev, "Read MAC address %pM from device tree\n",
- mac);
+ priv->netdev->dev_addr);
return;
}
@@ -203,7 +200,7 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
mac[5] = l & 0xff;
if (is_valid_ether_addr(mac)) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ eth_hw_addr_set(priv->netdev, mac);
dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
} else {
eth_hw_addr_random(priv->netdev);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 25c91b3c5fd3..b3939a5f7b03 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -482,6 +482,7 @@ static int fealnx_init_one(struct pci_dev *pdev,
struct net_device *dev;
void *ring_space;
dma_addr_t ring_dma;
+ u8 addr[ETH_ALEN];
#ifdef USE_IO_OPS
int bar = 0;
#else
@@ -525,7 +526,8 @@ static int fealnx_init_one(struct pci_dev *pdev,
/* read ethernet id */
for (i = 0; i < 6; ++i)
- dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
+ addr[i] = ioread8(ioaddr + PAR0 + i);
+ eth_hw_addr_set(dev, addr);
/* Reset the chip to erase previous misconfiguration. */
iowrite32(0x00000001, ioaddr + BCR);
@@ -827,7 +829,7 @@ static int netdev_open(struct net_device *dev)
return -EAGAIN;
for (i = 0; i < 3; i++)
- iowrite16(((unsigned short*)dev->dev_addr)[i],
+ iowrite16(((const unsigned short *)dev->dev_addr)[i],
ioaddr + PAR0 + i*2);
init_ring(dev);
@@ -857,7 +859,7 @@ static int netdev_open(struct net_device *dev)
np->bcrvalue |= 0x04; /* big-endian */
#endif
-#if defined(__i386__) && !defined(MODULE)
+#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
if (boot_cpu_data.x86 <= 4)
np->crvalue = 0xa00;
else
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 685d2d8a3b36..6b2927d863e2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -268,11 +268,11 @@ static int dpaa_netdev_init(struct net_device *net_dev,
if (is_valid_ether_addr(mac_addr)) {
memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
+ (const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
return -EINVAL;
@@ -452,7 +452,7 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
mac_dev = priv->mac_dev;
err = mac_dev->change_addr(mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
+ (const enet_addr_t *)net_dev->dev_addr);
if (err < 0) {
netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
err);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
index 426926fb6fc6..7fefe1574b6a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -189,7 +189,7 @@ static const struct devlink_ops dpaa2_eth_devlink_ops = {
.trap_group_action_set = dpaa2_eth_dl_trap_group_action_set,
};
-int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -203,15 +203,23 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
}
dl_priv = devlink_priv(priv->devlink);
dl_priv->dpaa2_priv = priv;
+ return 0;
+}
+
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv)
+{
+ devlink_free(priv->devlink);
+}
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+{
devlink_register(priv->devlink);
- return 0;
}
void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv)
{
devlink_unregister(priv->devlink);
- devlink_free(priv->devlink);
}
int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 7065c71ed7b8..714e961e7a77 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -533,6 +533,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
list_add_tail(&skb->list, ch->rx_list);
@@ -641,6 +642,7 @@ static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
fq->stats.frames += cleaned;
ch->stats.frames += cleaned;
+ ch->stats.frames_per_cdan += cleaned;
/* A dequeue operation only pulls frames from a single queue
* into the store. Return the frame queue as an out param.
@@ -1264,7 +1266,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch __always_unused,
+ struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
struct dpaa2_eth_fq *fq)
{
@@ -1279,6 +1281,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
percpu_extras->tx_conf_frames++;
percpu_extras->tx_conf_bytes += fd_len;
+ ch->stats.bytes_per_cdan += fd_len;
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
@@ -1601,6 +1604,12 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
}
} while (store_cleaned);
+ /* Update NET DIM with the values for this CDAN */
+ dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
+ ch->stats.bytes_per_cdan);
+ ch->stats.frames_per_cdan = 0;
+ ch->stats.bytes_per_cdan = 0;
+
/* We didn't consume the entire budget, so finish napi and
* re-enable data availability notifications
*/
@@ -4013,7 +4022,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
return err;
}
}
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else if (is_zero_ether_addr(dpni_mac_addr)) {
/* No MAC address configured, fill in net_dev->dev_addr
* with a random one
@@ -4038,7 +4047,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
/* NET_ADDR_PERM is default, all we have to do is
* fill in the device addr.
*/
- memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, dpni_mac_addr);
}
return 0;
@@ -4431,7 +4440,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_connect_mac;
- err = dpaa2_eth_dl_register(priv);
+ err = dpaa2_eth_dl_alloc(priv);
if (err)
goto err_dl_register;
@@ -4453,6 +4462,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dpaa2_dbg_add(priv);
#endif
+ dpaa2_eth_dl_register(priv);
dev_info(dev, "Probed interface %s\n", net_dev->name);
return 0;
@@ -4461,7 +4471,7 @@ err_netdev_reg:
err_dl_port_add:
dpaa2_eth_dl_traps_unregister(priv);
err_dl_trap_register:
- dpaa2_eth_dl_unregister(priv);
+ dpaa2_eth_dl_free(priv);
err_dl_register:
dpaa2_eth_disconnect_mac(priv);
err_connect_mac:
@@ -4508,6 +4518,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
+ dpaa2_eth_dl_unregister(priv);
+
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
@@ -4519,7 +4531,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
- dpaa2_eth_dl_unregister(priv);
+ dpaa2_eth_dl_free(priv);
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index cdb623d5f2c1..2085844227fe 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -384,6 +384,8 @@ struct dpaa2_eth_ch_stats {
__u64 xdp_redirect;
/* Must be last, does not show up in ethtool stats */
__u64 frames;
+ __u64 frames_per_cdan;
+ __u64 bytes_per_cdan;
};
/* Maximum number of queues associated with a DPNI */
@@ -725,7 +727,10 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
-int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv);
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv);
int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 2da5f881f630..adb8ce5306ee 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -820,7 +820,63 @@ static int dpaa2_eth_set_tunable(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio = priv->channel[0]->dpio;
+
+ dpaa2_io_get_irq_coalescing(dpio, &ic->rx_coalesce_usecs);
+ ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ return 0;
+}
+
+static int dpaa2_eth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio;
+ int prev_adaptive;
+ u32 prev_rx_usecs;
+ int i, j, err;
+
+ /* Keep track of the previous value, just in case we fail */
+ dpio = priv->channel[0]->dpio;
+ dpaa2_io_get_irq_coalescing(dpio, &prev_rx_usecs);
+ prev_adaptive = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ /* Setup new value for rx coalescing */
+ for (i = 0; i < priv->num_channels; i++) {
+ dpio = priv->channel[i]->dpio;
+
+ dpaa2_io_set_adaptive_coalescing(dpio,
+ ic->use_adaptive_rx_coalesce);
+ err = dpaa2_io_set_irq_coalescing(dpio, ic->rx_coalesce_usecs);
+ if (err)
+ goto restore_rx_usecs;
+ }
+
+ return 0;
+
+restore_rx_usecs:
+ for (j = 0; j < i; j++) {
+ dpio = priv->channel[j]->dpio;
+
+ dpaa2_io_set_irq_coalescing(dpio, prev_rx_usecs);
+ dpaa2_io_set_adaptive_coalescing(dpio, prev_adaptive);
+ }
+
+ return err;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = dpaa2_eth_get_drvinfo,
.nway_reset = dpaa2_eth_nway_reset,
.get_link = ethtool_op_get_link,
@@ -836,4 +892,6 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
+ .get_coalesce = dpaa2_eth_get_coalesce,
+ .set_coalesce = dpaa2_eth_set_coalesce,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 543c1f202420..ef8f0a055024 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -139,12 +139,7 @@ static void dpaa2_mac_validate(struct phylink_config *config,
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_USXGMII:
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
+ phylink_set_10g_modes(mask);
if (state->interface == PHY_INTERFACE_MODE_10GBASER)
break;
phylink_set(mask, 5000baseT_Full);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 175f15c46842..d039457928b0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -980,7 +980,7 @@ static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
/* First check if firmware has any address configured by bootloader */
if (!is_zero_ether_addr(mac_addr)) {
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else {
/* No MAC address configured, fill in net_dev->dev_addr
* with a random one
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 0c69409fe0d0..504e12554079 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -7,7 +7,9 @@
#include <linux/udp.h>
#include <linux/vmalloc.h>
#include <linux/ptp_classify.h>
+#include <net/ip6_checksum.h>
#include <net/pkt_sched.h>
+#include <net/tso.h>
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
@@ -314,12 +316,261 @@ dma_err:
return 0;
}
+static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
+{
+ union enetc_tx_bd txbd_tmp;
+ u8 flags = 0, e_flags = 0;
+ dma_addr_t addr;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ if (skb_vlan_tag_present(skb))
+ flags |= ENETC_TXBD_FLAGS_EX;
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
+ txbd_tmp.flags = flags;
+
+ /* For the TSO header we do not set the dma address since we do not
+ * want it unmapped when we do cleanup. We still set len so that we
+ * count the bytes sent.
+ */
+ tx_swbd->len = hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Add extension BD for VLAN */
+ if (flags & ENETC_TXBD_FLAGS_EX) {
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Setup the VLAN fields */
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = 0; /* < C-TAG */
+ e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ *txbd = txbd_tmp;
+ }
+}
+
+static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, char *data,
+ int size, bool last_bd)
+{
+ union enetc_tx_bd txbd_tmp;
+ dma_addr_t addr;
+ u8 flags = 0;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+
+ addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -ENOMEM;
+ }
+
+ if (last_bd) {
+ flags |= ENETC_TXBD_FLAGS_F;
+ tx_swbd->is_eof = 1;
+ }
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(size);
+ txbd_tmp.flags = flags;
+
+ tx_swbd->dma = addr;
+ tx_swbd->len = size;
+ tx_swbd->dir = DMA_TO_DEVICE;
+
+ *txbd = txbd_tmp;
+
+ return 0;
+}
+
+static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
+ char *hdr, int hdr_len, int *l4_hdr_len)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ int mac_hdr_len = skb_network_offset(skb);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = 0;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = 0;
+ }
+
+ /* Compute the IP checksum. This is necessary since tso_build_hdr()
+ * already incremented the IP ID field.
+ */
+ if (!tso->ipv6) {
+ struct iphdr *iph = (void *)(hdr + mac_hdr_len);
+
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
+ /* Compute the checksum over the L4 header. */
+ *l4_hdr_len = hdr_len - skb_transport_offset(skb);
+ return csum_partial(l4_hdr, *l4_hdr_len, 0);
+}
+
+static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
+ struct sk_buff *skb, char *hdr, int len,
+ __wsum sum)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ __sum16 csum_final;
+
+ /* Complete the L4 checksum by appending the pseudo-header to the
+ * already computed checksum.
+ */
+ if (!tso->ipv6)
+ csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ len, ip_hdr(skb)->protocol, sum);
+ else
+ csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ len, ipv6_hdr(skb)->nexthdr, sum);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = csum_final;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = csum_final;
+ }
+}
+
+static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ int hdr_len, total_len, data_len;
+ struct enetc_tx_swbd *tx_swbd;
+ union enetc_tx_bd *txbd;
+ struct tso_t tso;
+ __wsum csum, csum2;
+ int count = 0, pos;
+ int err, i, bd_data_num;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ total_len = skb->len - hdr_len;
+ i = tx_ring->next_to_use;
+
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Get the BD */
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Determine the length of this packet */
+ data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_len;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
+
+ /* compute the csum over the L4 header */
+ csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
+ enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ bd_data_num = 0;
+ count++;
+
+ while (data_len > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_len);
+
+ /* Advance the index in the BDR */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Compute the checksum over this segment of data and
+ * add it to the csum already computed (over the L4
+ * header and possible other data segments).
+ */
+ csum2 = csum_partial(tso.data, size, 0);
+ csum = csum_block_add(csum, csum2, pos);
+ pos += size;
+
+ err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
+ tso.data, size,
+ size == data_len);
+ if (err)
+ goto err_map_data;
+
+ data_len -= size;
+ count++;
+ bd_data_num++;
+ tso_build_data(skb, &tso, size);
+
+ if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ goto err_chained_bd;
+ }
+
+ enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
+
+ if (total_len == 0)
+ tx_swbd->skb = skb;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ }
+
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+err_map_data:
+ dev_err(tx_ring->dev, "DMA map error");
+
+err_chained_bd:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (count--);
+
+ return 0;
+}
+
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count;
+ int count, err;
/* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
@@ -332,19 +583,35 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
- if (unlikely(skb_linearize(skb)))
- goto drop_packet_err;
+ if (skb_is_gso(skb)) {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ } else {
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ err = skb_checksum_help(skb);
+ if (err)
+ goto drop_packet_err;
+ }
+ enetc_lock_mdio();
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
if (unlikely(!count))
goto drop_packet_err;
@@ -419,7 +686,7 @@ static void enetc_rx_dim_work(struct work_struct *w)
static void enetc_rx_net_dim(struct enetc_int_vector *v)
{
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
v->comp_cnt++;
@@ -546,10 +813,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
bool is_eof = tx_swbd->is_eof;
if (unlikely(tx_swbd->check_wb)) {
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- union enetc_tx_bd *txbd;
-
- txbd = ENETC_TXBD(*tx_ring, i);
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
if (txbd->flags & ENETC_TXBD_FLAGS_W &&
tx_swbd->do_twostep_tstamp) {
@@ -567,8 +831,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (xdp_frame) {
xdp_return_frame(xdp_frame);
} else if (skb) {
- if (unlikely(tx_swbd->skb->cb[0] &
- ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
/* Start work to release lock for next one-step
* timestamping packet. And send one skb in
* tx_skbs queue if has.
@@ -1493,15 +1756,32 @@ static int enetc_alloc_txbdr(struct enetc_bdr *txr)
return -ENOMEM;
err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
- if (err) {
- vfree(txr->tx_swbd);
- return err;
+ if (err)
+ goto err_alloc_bdr;
+
+ txr->tso_headers = dma_alloc_coherent(txr->dev,
+ txr->bd_count * TSO_HEADER_SIZE,
+ &txr->tso_headers_dma,
+ GFP_KERNEL);
+ if (!txr->tso_headers) {
+ err = -ENOMEM;
+ goto err_alloc_tso;
}
txr->next_to_clean = 0;
txr->next_to_use = 0;
return 0;
+
+err_alloc_tso:
+ dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
+ txr->bd_base, txr->bd_dma_base);
+ txr->bd_base = NULL;
+err_alloc_bdr:
+ vfree(txr->tx_swbd);
+ txr->tx_swbd = NULL;
+
+ return err;
}
static void enetc_free_txbdr(struct enetc_bdr *txr)
@@ -1513,6 +1793,10 @@ static void enetc_free_txbdr(struct enetc_bdr *txr)
size = txr->bd_count * sizeof(union enetc_tx_bd);
+ dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
+ txr->tso_headers, txr->tso_headers_dma);
+ txr->tso_headers = NULL;
+
dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
txr->bd_base = NULL;
@@ -1879,7 +2163,6 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
- cpumask_t cpu_mask;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
@@ -1908,9 +2191,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(irq, &cpu_mask);
+ irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
}
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 08b283347d9c..fb39e406b7fc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -112,6 +112,10 @@ struct enetc_bdr {
dma_addr_t bd_dma_base;
u8 tsd_enable; /* Time specific departure */
bool ext_en; /* enable h/w descriptor extensions */
+
+ /* DMA buffer for TSO headers */
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 9690e36e9e85..910b9f722504 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -157,7 +157,7 @@ static const struct {
{ ENETC_PM0_TFRM, "MAC tx frames" },
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frames" },
+ { ENETC_PM0_TERR, "MAC tx frame errors" },
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 0f5f081a5baf..1514e6a4a3ff 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -635,10 +635,14 @@ struct enetc_cmd_rfse {
#define ENETC_RFSE_EN BIT(15)
#define ENETC_RFSE_MODE_BD 2
-static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
+static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw,
+ struct net_device *ndev)
{
+ u8 addr[ETH_ALEN] __aligned(4);
+
*(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
*(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+ eth_hw_addr_set(ndev, addr);
}
#define ENETC_SI_INT_IDX 0
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index ee1468e3eaa3..91f02c505028 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
*
* The Integrated Endpoint Register Block (IERB) is configured by pre-boot
* software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
index b3b774e0998a..c2ce47c4be9f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2021 NXP Semiconductors */
+/* Copyright 2021 NXP */
#include <linux/pci.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index f0ff95096846..64f92770691f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -40,7 +40,7 @@ static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, saddr->sa_data);
enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
return 0;
@@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
static void enetc_configure_port_mac(struct enetc_hw *hw)
{
+ int tc;
+
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
- enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ for (tc = 0; tc < 8; tc++)
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -541,8 +544,7 @@ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
if (phy_interface_mode_is_rgmii(phy_mode)) {
val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
- val &= ~ENETC_PM0_IFM_EN_AUTO;
- val &= ENETC_PM0_IFM_IFMODE_MASK;
+ val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
}
@@ -760,10 +762,14 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
@@ -780,7 +786,7 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
}
/* pick up primary MAC address from SI */
- enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+ enetc_load_primary_mac_addr(&si->hw, ndev);
}
static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 4577226d3c6a..0536d2c76fbc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -486,14 +486,16 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
data_size = sizeof(struct streamid_data);
si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!si_data)
+ return -ENOMEM;
cbd.length = cpu_to_le16(data_size);
dma = dma_map_single(&priv->si->pdev->dev, si_data,
data_size, DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(si_data);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
@@ -512,12 +514,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- return -EINVAL;
+ goto out;
- if (!enable) {
- kfree(si_data);
- return 0;
- }
+ if (!enable)
+ goto out;
/* Enable the entry overwrite again incase space flushed by hardware */
memset(&cbd, 0, sizeof(cbd));
@@ -560,6 +560,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
}
err = enetc_send_cmd(priv->si, &cbd);
+out:
+ if (!dma_mapping_error(&priv->si->pdev->dev, dma))
+ dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
+
kfree(si_data);
return err;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 0704f6bf12fd..17924305afa2 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -122,16 +122,20 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
/* pick up primary MAC address from SI */
- enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+ enetc_load_primary_mac_addr(&si->hw, ndev);
}
static int enetc_vf_probe(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 80bd5c629fa0..bc418b910999 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1768,11 +1768,8 @@ static int fec_get_mac(struct net_device *ndev)
return 0;
}
- memcpy(ndev->dev_addr, iap, ETH_ALEN);
-
/* Adjust MAC if using macaddr */
- if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+ eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
return 0;
}
@@ -3326,7 +3323,7 @@ fec_set_mac_address(struct net_device *ndev, void *p)
if (addr) {
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
}
/* Add netif status check here to avoid system hang in below case:
@@ -4176,5 +4173,4 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
-MODULE_ALIAS("platform:"DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 73ff359a15f1..bbbde9f701c2 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -112,7 +112,7 @@ static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sock = addr;
- memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sock->sa_data);
mpc52xx_fec_set_paddr(dev, sock->sa_data);
return 0;
@@ -890,7 +890,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
*
* First try to read MAC address from DT
*/
- rv = of_get_mac_address(np, ndev->dev_addr);
+ rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index bce3c9398887..1950a8936bc0 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -366,7 +366,7 @@ static void set_dflts(struct dtsec_cfg *cfg)
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
}
-static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr)
{
u32 tmp;
@@ -516,7 +516,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
if (addr) {
MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
- set_mac_address(regs, (u8 *)eth_addr);
+ set_mac_address(regs, (const u8 *)eth_addr);
}
/* HASH */
@@ -1022,7 +1022,7 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
+int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
enum comm_mode mode = COMM_MODE_NONE;
@@ -1041,7 +1041,7 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
* Station address have to be swapped (big endian to little endian
*/
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
- set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
+ set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
graceful_start(dtsec, mode);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 5149d96ec2c1..68512c3bd6e5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -37,7 +37,7 @@
struct fman_mac *dtsec_config(struct fman_mac_params *params);
int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
+int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
int dtsec_adjust_link(struct fman_mac *dtsec,
u16 speed);
int dtsec_restart_autoneg(struct fman_mac *dtsec);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 62f42921933d..2216b7f51d26 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -354,7 +354,7 @@ struct fman_mac {
bool allmulti_enabled;
};
-static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
+static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr,
u8 paddr_num)
{
u32 tmp0, tmp1;
@@ -897,12 +897,12 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
+int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
{
if (!is_init_done(memac->memac_drv_param))
return -EINVAL;
- add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
+ add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0);
return 0;
}
@@ -1058,7 +1058,7 @@ int memac_init(struct fman_mac *memac)
/* MAC Address */
if (memac->addr != 0) {
MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
- add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0);
}
fixed_link = memac_drv_param->fixed_link;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index b2c671ec0ce7..3820f7a22983 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -40,7 +40,7 @@
struct fman_mac *memac_config(struct fman_mac_params *params);
int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
+int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
int memac_adjust_link(struct fman_mac *memac, u16 speed);
int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 41946b16f6c7..311c1906e044 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -221,7 +221,7 @@ struct fman_mac {
bool allmulti_enabled;
};
-static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
+static void set_mac_address(struct tgec_regs __iomem *regs, const u8 *adr)
{
u32 tmp0, tmp1;
@@ -514,13 +514,13 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
+int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
{
if (!is_init_done(tgec->cfg))
return -EINVAL;
tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
- set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
+ set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr));
return 0;
}
@@ -704,7 +704,7 @@ int tgec_init(struct fman_mac *tgec)
if (tgec->addr) {
MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
- set_mac_address(tgec->regs, (u8 *)eth_addr);
+ set_mac_address(tgec->regs, (const u8 *)eth_addr);
}
/* interrupts */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index 3bfd1062b386..b28b20b26148 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -37,7 +37,7 @@
struct fman_mac *tgec_config(struct fman_mac_params *params);
int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
+int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 824a81a9f350..daa285a9b8b2 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -66,7 +66,7 @@ struct mac_device {
int (*stop)(struct mac_device *mac_dev);
void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
- int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
+ int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 2db6e38a772e..bacf25318f87 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1005,7 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
spin_lock_init(&fep->lock);
spin_lock_init(&fep->tx_lock);
- of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
+ of_get_ethdev_address(ofdev->dev.of_node, ndev);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index af6ad94bf24a..acab58fd3db3 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -753,7 +753,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (stash_len || stash_idx)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
- err = of_get_mac_address(np, dev->dev_addr);
+ err = of_get_ethdev_address(np, dev);
if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3eb288d10b0c..823221c912ab 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3205,7 +3205,7 @@ static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/*
* If device is not running, we will set mac addr register
@@ -3731,7 +3731,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
goto err_free_netdev;
}
- of_get_mac_address(np, dev->dev_addr);
+ of_get_ethdev_address(np, dev);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 62c0bed82ced..b0d733e9a7c6 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -334,6 +334,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
u8 *buf;
size_t len;
u_char buggybuf[32];
+ u8 addr[ETH_ALEN];
dev_dbg(&link->dev, "fmvj18x_config\n");
@@ -468,8 +469,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
goto failed;
}
/* Read MACID from CIS */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = buf[i + 5];
+ eth_hw_addr_set(dev, &buf[5]);
kfree(buf);
} else {
if (pcmcia_get_mac_from_cis(link, dev))
@@ -490,7 +490,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
case UNGERMANN:
/* Read MACID from register */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ eth_hw_addr_set(dev, addr);
card_name = "Access/CARD";
break;
case XXX10304:
@@ -499,16 +500,15 @@ static int fmvj18x_config(struct pcmcia_device *link)
pr_notice("unable to read hardware net address\n");
goto failed;
}
- for (i = 0 ; i < 6; i++) {
- dev->dev_addr[i] = buggybuf[i];
- }
+ eth_hw_addr_set(dev, buggybuf);
card_name = "FMV-J182";
break;
case MBH10302:
default:
/* Read MACID from register */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + MAC_ID + i);
+ addr[i] = inb(ioaddr + MAC_ID + i);
+ eth_hw_addr_set(dev, addr);
card_name = "FMV-J181";
break;
}
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 1d3188e8e3b3..51ed8fe71d2d 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -30,7 +30,7 @@
#define GVE_MIN_MSIX 3
/* Numbers of gve tx/rx stats in stats report. */
-#define GVE_TX_STATS_REPORT_NUM 5
+#define GVE_TX_STATS_REPORT_NUM 6
#define GVE_RX_STATS_REPORT_NUM 2
/* Interval to schedule a stats report update, 20000ms. */
@@ -224,11 +224,6 @@ struct gve_tx_iovec {
u32 iov_padding; /* padding associated with this segment */
};
-struct gve_tx_dma_buf {
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
* ring entry but only used for a pkt_desc not a seg_desc
*/
@@ -236,7 +231,10 @@ struct gve_tx_buffer_state {
struct sk_buff *skb; /* skb for this pkt */
union {
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
- struct gve_tx_dma_buf buf;
+ struct {
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ };
};
};
@@ -280,7 +278,8 @@ struct gve_tx_pending_packet_dqo {
* All others correspond to `skb`'s frags and should be unmapped with
* `dma_unmap_page`.
*/
- struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
+ DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
+ DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
u16 num_bufs;
/* Linked list index to next element in the list, or -1 if none */
@@ -342,8 +341,8 @@ struct gve_tx_ring {
union {
/* GQI fields */
struct {
- /* NIC tail pointer */
- __be32 last_nic_done;
+ /* Spinlock for when cleanup in progress */
+ spinlock_t clean_lock;
};
/* DQO fields. */
@@ -414,7 +413,9 @@ struct gve_tx_ring {
u32 q_num ____cacheline_aligned; /* queue idx */
u32 stop_queue; /* count of queue stops */
u32 wake_queue; /* count of queue wakes */
+ u32 queue_timeout; /* count of queue timeouts */
u32 ntfy_id; /* notification block index */
+ u32 last_kick_msec; /* Last time the queue was kicked */
dma_addr_t bus; /* dma address of the descr ring */
dma_addr_t q_resources_bus; /* dma address of the queue resources */
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
@@ -780,7 +781,7 @@ struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
gve_num_tx_qpls(priv));
/* we are out of rx qpls */
- if (id == priv->qpl_cfg.qpl_map_size)
+ if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map);
@@ -822,15 +823,15 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv);
void gve_tx_free_rings_gqi(struct gve_priv *priv);
-__be32 gve_tx_load_event_counter(struct gve_priv *priv,
- struct gve_tx_ring *tx);
+u32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx);
+bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
/* rx handling */
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
-bool gve_rx_poll(struct gve_notify_block *block, int budget);
+int gve_rx_poll(struct gve_notify_block *block, int budget);
+bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv);
void gve_rx_free_rings_gqi(struct gve_priv *priv);
-bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
- netdev_features_t feat);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index f089d33dd48e..af2c1d1535f5 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -733,7 +733,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
}
priv->dev->max_mtu = mtu;
priv->num_event_counters = be16_to_cpu(descriptor->counters);
- ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
+ eth_hw_addr_set(priv->dev, descriptor->mac);
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 47c3d8f313fc..3953f6f7a427 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -270,6 +270,7 @@ enum gve_stat_names {
TX_LAST_COMPLETION_PROCESSED = 5,
RX_NEXT_EXPECTED_SEQUENCE = 6,
RX_BUFFERS_POSTED = 7,
+ TX_TIMEOUT_CNT = 8,
// stats from NIC
RX_QUEUE_DROP_CNT = 65,
RX_NO_BUFFERS_POSTED = 66,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 716e6240305d..618a3e1d858e 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -330,8 +330,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue;
- data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
- tx));
+ data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error;
/* stats from NIC */
if (skip_nic_stats) {
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 099a2bc5ae67..7647cd05b1d2 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -24,6 +24,9 @@
#define GVE_VERSION "1.0.0"
#define GVE_VERSION_PREFIX "GVE-"
+// Minimum amount of time between queue kicks in msec (10 seconds)
+#define MIN_TX_TIMEOUT_GAP (1000 * 10)
+
const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
@@ -41,6 +44,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
{
struct gve_priv *priv = netdev_priv(dev);
unsigned int start;
+ u64 packets, bytes;
int ring;
if (priv->rx) {
@@ -48,10 +52,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
do {
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
- s->rx_packets += priv->rx[ring].rpackets;
- s->rx_bytes += priv->rx[ring].rbytes;
+ packets = priv->rx[ring].rpackets;
+ bytes = priv->rx[ring].rbytes;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
+ s->rx_packets += packets;
+ s->rx_bytes += bytes;
}
}
if (priv->tx) {
@@ -59,10 +65,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
- s->tx_packets += priv->tx[ring].pkt_done;
- s->tx_bytes += priv->tx[ring].bytes_done;
+ packets = priv->tx[ring].pkt_done;
+ bytes = priv->tx[ring].bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
+ s->tx_packets += packets;
+ s->tx_bytes += bytes;
}
}
}
@@ -82,6 +90,9 @@ static int gve_alloc_counter_array(struct gve_priv *priv)
static void gve_free_counter_array(struct gve_priv *priv)
{
+ if (!priv->counter_array)
+ return;
+
dma_free_coherent(&priv->pdev->dev,
priv->num_event_counters *
sizeof(*priv->counter_array),
@@ -142,6 +153,9 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
static void gve_free_stats_report(struct gve_priv *priv)
{
+ if (!priv->stats_report)
+ return;
+
del_timer_sync(&priv->stats_report_timer);
dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
priv->stats_report, priv->stats_report_bus);
@@ -181,34 +195,40 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
__be32 __iomem *irq_doorbell;
bool reschedule = false;
struct gve_priv *priv;
+ int work_done = 0;
block = container_of(napi, struct gve_notify_block, napi);
priv = block->priv;
if (block->tx)
reschedule |= gve_tx_poll(block, budget);
- if (block->rx)
- reschedule |= gve_rx_poll(block, budget);
+ if (block->rx) {
+ work_done = gve_rx_poll(block, budget);
+ reschedule |= work_done == budget;
+ }
if (reschedule)
return budget;
- napi_complete(napi);
- irq_doorbell = gve_irq_doorbell(priv, block);
- iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
+ /* Complete processing - don't unmask irq if busy polling is enabled */
+ if (likely(napi_complete_done(napi, work_done))) {
+ irq_doorbell = gve_irq_doorbell(priv, block);
+ iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
- /* Double check we have no extra work.
- * Ensure unmask synchronizes with checking for work.
- */
- mb();
- if (block->tx)
- reschedule |= gve_tx_poll(block, -1);
- if (block->rx)
- reschedule |= gve_rx_poll(block, -1);
- if (reschedule && napi_reschedule(napi))
- iowrite32be(GVE_IRQ_MASK, irq_doorbell);
+ /* Ensure IRQ ACK is visible before we check pending work.
+ * If queue had issued updates, it would be truly visible.
+ */
+ mb();
- return 0;
+ if (block->tx)
+ reschedule |= gve_tx_clean_pending(priv, block->tx);
+ if (block->rx)
+ reschedule |= gve_rx_work_pending(block->rx);
+
+ if (reschedule && napi_reschedule(napi))
+ iowrite32be(GVE_IRQ_MASK, irq_doorbell);
+ }
+ return work_done;
}
static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
@@ -268,7 +288,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
int i, j;
int err;
- priv->msix_vectors = kvzalloc(num_vecs_requested *
+ priv->msix_vectors = kvcalloc(num_vecs_requested,
sizeof(*priv->msix_vectors), GFP_KERNEL);
if (!priv->msix_vectors)
return -ENOMEM;
@@ -370,18 +390,19 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
{
int i;
- if (priv->msix_vectors) {
- /* Free the irqs */
- for (i = 0; i < priv->num_ntfy_blks; i++) {
- struct gve_notify_block *block = &priv->ntfy_blocks[i];
- int msix_idx = i;
+ if (!priv->msix_vectors)
+ return;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- NULL);
- free_irq(priv->msix_vectors[msix_idx].vector, block);
- }
- free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+ /* Free the irqs */
+ for (i = 0; i < priv->num_ntfy_blks; i++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[i];
+ int msix_idx = i;
+
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ NULL);
+ free_irq(priv->msix_vectors[msix_idx].vector, block);
}
+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
dma_free_coherent(&priv->pdev->dev,
priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
priv->ntfy_blocks, priv->ntfy_block_bus);
@@ -628,7 +649,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
int err;
/* Setup tx rings */
- priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
+ priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
GFP_KERNEL);
if (!priv->tx)
return -ENOMEM;
@@ -641,7 +662,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
goto free_tx;
/* Setup rx rings */
- priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
+ priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
GFP_KERNEL);
if (!priv->rx) {
err = -ENOMEM;
@@ -764,12 +785,11 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
qpl->id = id;
qpl->num_entries = 0;
- qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
+ qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->pages)
return -ENOMEM;
- qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
- GFP_KERNEL);
+ qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->page_buses)
return -ENOMEM;
@@ -828,7 +848,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
if (priv->queue_format == GVE_GQI_RDA_FORMAT)
return 0;
- priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
+ priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
if (!priv->qpls)
return -ENOMEM;
@@ -847,7 +867,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
sizeof(unsigned long) * BITS_PER_BYTE;
- priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
+ priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
sizeof(unsigned long), GFP_KERNEL);
if (!priv->qpl_cfg.qpl_id_map) {
err = -ENOMEM;
@@ -1104,9 +1124,47 @@ static void gve_turnup(struct gve_priv *priv)
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- struct gve_priv *priv = netdev_priv(dev);
+ struct gve_notify_block *block;
+ struct gve_tx_ring *tx = NULL;
+ struct gve_priv *priv;
+ u32 last_nic_done;
+ u32 current_time;
+ u32 ntfy_idx;
+
+ netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+ priv = netdev_priv(dev);
+ if (txqueue > priv->tx_cfg.num_queues)
+ goto reset;
+
+ ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
+ if (ntfy_idx > priv->num_ntfy_blks)
+ goto reset;
+ block = &priv->ntfy_blocks[ntfy_idx];
+ tx = block->tx;
+
+ current_time = jiffies_to_msecs(jiffies);
+ if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+ goto reset;
+
+ /* Check to see if there are missed completions, which will allow us to
+ * kick the queue.
+ */
+ last_nic_done = gve_tx_load_event_counter(priv, tx);
+ if (last_nic_done - tx->done) {
+ netdev_info(dev, "Kicking queue %d", txqueue);
+ iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
+ napi_schedule(&block->napi);
+ tx->last_kick_msec = current_time;
+ goto out;
+ } // Else reset.
+
+reset:
gve_schedule_reset(priv);
+
+out:
+ if (tx)
+ tx->queue_timeout++;
priv->tx_timeo_cnt++;
}
@@ -1185,9 +1243,10 @@ static void gve_handle_reset(struct gve_priv *priv)
void gve_handle_report_stats(struct gve_priv *priv)
{
- int idx, stats_idx = 0, tx_bytes;
- unsigned int start = 0;
struct stats *stats = priv->stats_report->stats;
+ int idx, stats_idx = 0;
+ unsigned int start = 0;
+ u64 tx_bytes;
if (!gve_get_report_stats(priv))
return;
@@ -1234,6 +1293,11 @@ void gve_handle_report_stats(struct gve_priv *priv)
.value = cpu_to_be64(last_completion),
.queue_id = cpu_to_be32(idx),
};
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
+ .value = cpu_to_be64(priv->tx[idx].queue_timeout),
+ .queue_id = cpu_to_be32(idx),
+ };
}
}
/* rx stats */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index bb8261368250..95bc4d8a1811 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -16,19 +16,23 @@ static void gve_rx_free_buffer(struct device *dev,
dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
GVE_DATA_SLOT_ADDR_PAGE_MASK);
+ page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
}
static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
{
- if (rx->data.raw_addressing) {
- u32 slots = rx->mask + 1;
- int i;
+ u32 slots = rx->mask + 1;
+ int i;
+ if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]);
} else {
+ for (i = 0; i < slots; i++)
+ page_ref_sub(rx->data.page_info[i].page,
+ rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL;
}
@@ -69,6 +73,9 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
page_info->page_offset = 0;
page_info->page_address = page_address(page);
*slot_addr = cpu_to_be64(addr);
+ /* The page already has 1 ref */
+ page_ref_add(page, INT_MAX - 1);
+ page_info->pagecnt_bias = INT_MAX;
}
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
@@ -104,8 +111,14 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
if (!rx->data.page_info)
return -ENOMEM;
- if (!rx->data.raw_addressing)
+ if (!rx->data.raw_addressing) {
rx->data.qpl = gve_assign_rx_qpl(priv);
+ if (!rx->data.qpl) {
+ kvfree(rx->data.page_info);
+ rx->data.page_info = NULL;
+ return -ENOMEM;
+ }
+ }
for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i];
@@ -289,21 +302,22 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl
static bool gve_rx_can_flip_buffers(struct net_device *netdev)
{
- return PAGE_SIZE == 4096
+ return PAGE_SIZE >= 4096
? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false;
}
-static int gve_rx_can_recycle_buffer(struct page *page)
+static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
{
- int pagecount = page_count(page);
+ int pagecount = page_count(page_info->page);
/* This page is not being used by any SKBs - reuse */
- if (pagecount == 1)
+ if (pagecount == page_info->pagecnt_bias)
return 1;
/* This page is still being used by an SKB - we can't reuse */
- else if (pagecount >= 2)
+ else if (pagecount > page_info->pagecnt_bias)
return 0;
- WARN(pagecount < 1, "Pagecount should never be < 1");
+ WARN(pagecount < page_info->pagecnt_bias,
+ "Pagecount should never be less than the bias.");
return -1;
}
@@ -319,11 +333,11 @@ gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
if (!skb)
return NULL;
- /* Optimistically stop the kernel from freeing the page by increasing
- * the page bias. We will check the refcount in refill to determine if
- * we need to alloc a new page.
+ /* Optimistically stop the kernel from freeing the page.
+ * We will check again in refill to determine if we need to alloc a
+ * new page.
*/
- get_page(page_info->page);
+ gve_dec_pagecnt_bias(page_info);
return skb;
}
@@ -346,7 +360,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
- get_page(page_info->page);
+ gve_dec_pagecnt_bias(page_info);
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
@@ -370,8 +384,18 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
union gve_rx_data_slot *data_slot;
struct sk_buff *skb = NULL;
dma_addr_t page_bus;
+ void *va;
u16 len;
+ /* Prefetch two packet pages ahead, we will need it soon. */
+ page_info = &rx->data.page_info[(idx + 2) & rx->mask];
+ va = page_info->page_address + GVE_RX_PAD +
+ page_info->page_offset;
+
+ prefetch(page_info->page); /* Kernel page struct. */
+ prefetch(va); /* Packet header. */
+ prefetch(va + 64); /* Next cacheline too. */
+
/* drop this packet */
if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
u64_stats_update_begin(&rx->statss);
@@ -402,7 +426,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
int recycle = 0;
if (can_flip) {
- recycle = gve_rx_can_recycle_buffer(page_info->page);
+ recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) {
if (!rx->data.raw_addressing)
gve_schedule_reset(priv);
@@ -450,7 +474,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
return true;
}
-static bool gve_rx_work_pending(struct gve_rx_ring *rx)
+bool gve_rx_work_pending(struct gve_rx_ring *rx)
{
struct gve_rx_desc *desc;
__be16 flags_seq;
@@ -493,7 +517,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
* owns half the page it is impossible to tell which half. Either
* the whole page is free or it needs to be replaced.
*/
- int recycle = gve_rx_can_recycle_buffer(page_info->page);
+ int recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) {
if (!rx->data.raw_addressing)
@@ -508,8 +532,13 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL;
- if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot))
+ if (gve_rx_alloc_buffer(priv, dev, page_info,
+ data_slot)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
break;
+ }
}
}
fill_cnt++;
@@ -518,8 +547,8 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
return true;
}
-bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
- netdev_features_t feat)
+static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
+ netdev_features_t feat)
{
struct gve_priv *priv = rx->gve;
u32 work_done = 0, packets = 0;
@@ -540,6 +569,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
"[%d] seqno=%d rx->desc.seqno=%d\n",
rx->q_num, GVE_SEQNO(desc->flags_seq),
rx->desc.seqno);
+
+ /* prefetch two descriptors ahead */
+ prefetch(rx->desc.desc_ring + ((cnt + 2) & rx->mask));
+
dropped = !gve_rx(rx, desc, feat, idx);
if (!dropped) {
bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
@@ -553,13 +586,15 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
}
if (!work_done && rx->fill_cnt - cnt > rx->db_threshold)
- return false;
+ return 0;
- u64_stats_update_begin(&rx->statss);
- rx->rpackets += packets;
- rx->rbytes += bytes;
- u64_stats_update_end(&rx->statss);
- rx->cnt = cnt;
+ if (work_done) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rpackets += packets;
+ rx->rbytes += bytes;
+ u64_stats_update_end(&rx->statss);
+ rx->cnt = cnt;
+ }
/* restock ring slots */
if (!rx->data.raw_addressing) {
@@ -570,26 +605,26 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
* falls below a threshold.
*/
if (!gve_rx_refill_buffers(priv, rx))
- return false;
+ return 0;
/* If we were not able to completely refill buffers, we'll want
* to schedule this queue for work again to refill buffers.
*/
if (rx->fill_cnt - cnt <= rx->db_threshold) {
gve_rx_write_doorbell(priv, rx);
- return true;
+ return budget;
}
}
gve_rx_write_doorbell(priv, rx);
- return gve_rx_work_pending(rx);
+ return work_done;
}
-bool gve_rx_poll(struct gve_notify_block *block, int budget)
+int gve_rx_poll(struct gve_notify_block *block, int budget)
{
struct gve_rx_ring *rx = block->rx;
netdev_features_t feat;
- bool repoll = false;
+ int work_done = 0;
feat = block->napi.dev->features;
@@ -598,8 +633,7 @@ bool gve_rx_poll(struct gve_notify_block *block, int budget)
budget = INT_MAX;
if (budget > 0)
- repoll |= gve_clean_rx_done(rx, budget, feat);
- else
- repoll |= gve_rx_work_pending(rx);
- return repoll;
+ work_done = gve_clean_rx_done(rx, budget, feat);
+
+ return work_done;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 665ac795a1ad..a9cb241fedf4 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -144,7 +144,7 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1;
- gve_clean_tx_done(priv, tx, tx->req, false);
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
netdev_tx_reset_queue(tx->netdev_txq);
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -176,6 +176,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
/* Make sure everything is zeroed to start */
memset(tx, 0, sizeof(*tx));
+ spin_lock_init(&tx->clean_lock);
tx->q_num = idx;
tx->mask = slots - 1;
@@ -303,15 +304,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{
if (info->skb) {
- dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma),
- dma_unmap_len(&info->buf, len),
+ dma_unmap_single(dev, dma_unmap_addr(info, dma),
+ dma_unmap_len(info, len),
DMA_TO_DEVICE);
- dma_unmap_len_set(&info->buf, len, 0);
+ dma_unmap_len_set(info, len, 0);
} else {
- dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma),
- dma_unmap_len(&info->buf, len),
+ dma_unmap_page(dev, dma_unmap_addr(info, dma),
+ dma_unmap_len(info, len),
DMA_TO_DEVICE);
- dma_unmap_len_set(&info->buf, len, 0);
+ dma_unmap_len_set(info, len, 0);
}
}
@@ -328,10 +329,16 @@ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
}
+static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
+
/* Stops the queue if the skb cannot be transmitted. */
-static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
+static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct sk_buff *skb)
{
int bytes_required = 0;
+ u32 nic_done;
+ u32 to_do;
+ int ret;
if (!tx->raw_addressing)
bytes_required = gve_skb_fifo_bytes_required(tx, skb);
@@ -339,29 +346,28 @@ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
if (likely(gve_can_tx(tx, bytes_required)))
return 0;
- /* No space, so stop the queue */
- tx->stop_queue++;
- netif_tx_stop_queue(tx->netdev_txq);
- smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */
-
- /* Now check for resources again, in case gve_clean_tx_done() freed
- * resources after we checked and we stopped the queue after
- * gve_clean_tx_done() checked.
- *
- * gve_maybe_stop_tx() gve_clean_tx_done()
- * nsegs/can_alloc test failed
- * gve_tx_free_fifo()
- * if (tx queue stopped)
- * netif_tx_queue_wake()
- * netif_tx_stop_queue()
- * Need to check again for space here!
- */
- if (likely(!gve_can_tx(tx, bytes_required)))
- return -EBUSY;
+ ret = -EBUSY;
+ spin_lock(&tx->clean_lock);
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = nic_done - tx->done;
- netif_tx_start_queue(tx->netdev_txq);
- tx->wake_queue++;
- return 0;
+ /* Only try to clean if there is hope for TX */
+ if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
+ if (to_do > 0) {
+ to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
+ gve_clean_tx_done(priv, tx, to_do, false);
+ }
+ if (likely(gve_can_tx(tx, bytes_required)))
+ ret = 0;
+ }
+ if (ret) {
+ /* No space, so stop the queue */
+ tx->stop_queue++;
+ netif_tx_stop_queue(tx->netdev_txq);
+ }
+ spin_unlock(&tx->clean_lock);
+
+ return ret;
}
static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
@@ -491,7 +497,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct gve_tx_buffer_state *info;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
- struct gve_tx_dma_buf *buf;
u64 addr;
u32 len;
int i;
@@ -515,9 +520,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++;
goto drop;
}
- buf = &info->buf;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
+ dma_unmap_len_set(info, len, len);
+ dma_unmap_addr_set(info, dma, addr);
payload_nfrags = shinfo->nr_frags;
if (hlen < len) {
@@ -549,10 +553,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++;
goto unmap_drop;
}
- buf = &tx->info[idx].buf;
tx->info[idx].skb = NULL;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
+ dma_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], dma, addr);
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
}
@@ -579,7 +582,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)];
- if (unlikely(gve_maybe_stop_tx(tx, skb))) {
+ if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
/* We need to ring the txq doorbell -- we have stopped the Tx
* queue for want of resources, but prior calls to gve_tx()
* may have added descriptors without ringing the doorbell.
@@ -675,19 +678,19 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
return pkts;
}
-__be32 gve_tx_load_event_counter(struct gve_priv *priv,
- struct gve_tx_ring *tx)
+u32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx)
{
- u32 counter_index = be32_to_cpu((tx->q_resources->counter_index));
+ u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
+ __be32 counter = READ_ONCE(priv->counter_array[counter_index]);
- return READ_ONCE(priv->counter_array[counter_index]);
+ return be32_to_cpu(counter);
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
- bool repoll = false;
u32 nic_done;
u32 to_do;
@@ -695,17 +698,23 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget)
if (budget == 0)
budget = INT_MAX;
+ /* In TX path, it may try to clean completed pkts in order to xmit,
+ * to avoid cleaning conflict, use spin_lock(), it yields better
+ * concurrency between xmit/clean than netif's lock.
+ */
+ spin_lock(&tx->clean_lock);
/* Find out how much work there is to be done */
- tx->last_nic_done = gve_tx_load_event_counter(priv, tx);
- nic_done = be32_to_cpu(tx->last_nic_done);
- if (budget > 0) {
- /* Do as much work as we have that the budget will
- * allow
- */
- to_do = min_t(u32, (nic_done - tx->done), budget);
- gve_clean_tx_done(priv, tx, to_do, true);
- }
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+ gve_clean_tx_done(priv, tx, to_do, true);
+ spin_unlock(&tx->clean_lock);
/* If we still have work we want to repoll */
- repoll |= (nic_done != tx->done);
- return repoll;
+ return nic_done != tx->done;
+}
+
+bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
+{
+ u32 nic_done = gve_tx_load_event_counter(priv, tx);
+
+ return nic_done != tx->done;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 05ddb6a75c38..ec394d991668 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
int j;
for (j = 0; j < cur_state->num_bufs; j++) {
- struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
-
if (j == 0) {
dma_unmap_single(tx->dev,
- dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(cur_state, dma[j]),
+ dma_unmap_len(cur_state, len[j]),
+ DMA_TO_DEVICE);
} else {
dma_unmap_page(tx->dev,
- dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(cur_state, dma[j]),
+ dma_unmap_len(cur_state, len[j]),
+ DMA_TO_DEVICE);
}
}
if (cur_state->skb) {
@@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
const bool is_gso = skb_is_gso(skb);
u32 desc_idx = tx->dqo_tx.tail;
- struct gve_tx_pending_packet_dqo *pending_packet;
+ struct gve_tx_pending_packet_dqo *pkt;
struct gve_tx_metadata_dqo metadata;
s16 completion_tag;
int i;
- pending_packet = gve_alloc_pending_packet(tx);
- pending_packet->skb = skb;
- pending_packet->num_bufs = 0;
- completion_tag = pending_packet - tx->dqo.pending_packets;
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->skb = skb;
+ pkt->num_bufs = 0;
+ completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata);
if (is_gso) {
@@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
/* Map the linear portion of skb */
{
- struct gve_tx_dma_buf *buf =
- &pending_packet->bufs[pending_packet->num_bufs];
u32 len = skb_headlen(skb);
dma_addr_t addr;
@@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
- ++pending_packet->num_bufs;
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag,
@@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
}
for (i = 0; i < shinfo->nr_frags; i++) {
- struct gve_tx_dma_buf *buf =
- &pending_packet->bufs[pending_packet->num_bufs];
const skb_frag_t *frag = &shinfo->frags[i];
bool is_eop = i == (shinfo->nr_frags - 1);
u32 len = skb_frag_size(frag);
@@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
- ++pending_packet->num_bufs;
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag, is_eop, is_gso);
@@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
return 0;
err:
- for (i = 0; i < pending_packet->num_bufs; i++) {
- struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
-
+ for (i = 0; i < pkt->num_bufs; i++) {
if (i == 0) {
- dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
+ dma_unmap_single(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
DMA_TO_DEVICE);
} else {
- dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ dma_unmap_page(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE);
}
}
- pending_packet->skb = NULL;
- pending_packet->num_bufs = 0;
- gve_free_pending_packet(tx, pending_packet);
+ pkt->skb = NULL;
+ pkt->num_bufs = 0;
+ gve_free_pending_packet(tx, pkt);
return -1;
}
@@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
static void remove_from_list(struct gve_tx_ring *tx,
struct gve_index_list *list,
- struct gve_tx_pending_packet_dqo *pending_packet)
+ struct gve_tx_pending_packet_dqo *pkt)
{
s16 prev_index, next_index;
- prev_index = pending_packet->prev;
- next_index = pending_packet->next;
+ prev_index = pkt->prev;
+ next_index = pkt->next;
if (prev_index == -1) {
/* Node is head */
@@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx,
}
static void gve_unmap_packet(struct device *dev,
- struct gve_tx_pending_packet_dqo *pending_packet)
+ struct gve_tx_pending_packet_dqo *pkt)
{
- struct gve_tx_dma_buf *buf;
int i;
/* SKB linear portion is guaranteed to be mapped */
- buf = &pending_packet->bufs[0];
- dma_unmap_single(dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
- for (i = 1; i < pending_packet->num_bufs; i++) {
- buf = &pending_packet->bufs[i];
- dma_unmap_page(dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
+ dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
+ for (i = 1; i < pkt->num_bufs; i++) {
+ dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
}
- pending_packet->num_bufs = 0;
+ pkt->num_bufs = 0;
}
/* Completion types and expected behavior:
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 93f3dcbeeea9..45ff7a9ab5f9 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -18,12 +18,16 @@ void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
{
+ unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
+ num_online_cpus());
int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
struct gve_tx_ring *tx = &priv->tx[queue_idx];
block->tx = tx;
tx->ntfy_id = ntfy_idx;
+ netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
+ queue_idx);
}
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 37b605fed32c..c84ef494bd60 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -998,7 +998,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
hip04_config_fifo(priv);
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
hip04_update_mac_address(ndev);
ret = hip04_alloc_ring(ndev, d);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 22bf914f2dbd..a6c18b6527f9 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -427,7 +427,7 @@ static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
}
static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
- unsigned char *mac)
+ const unsigned char *mac)
{
u32 reg;
@@ -555,7 +555,7 @@ static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(skaddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, skaddr->sa_data);
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
@@ -841,7 +841,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
(unsigned long)phy->phy_id,
phy_modes(phy->interface));
- ret = of_get_mac_address(node, ndev->dev_addr);
+ ret = of_get_ethdev_address(node, ndev);
if (ret) {
eth_hw_addr_random(ndev);
dev_warn(dev, "using random MAC address %pM\n",
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c1aae0fca5e9..d7e62eca050f 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -429,7 +429,7 @@ static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
- unsigned char *mac = dev->dev_addr;
+ const unsigned char *mac = dev->dev_addr;
u32 val;
val = mac[1] | (mac[0] << 8);
@@ -1219,7 +1219,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
goto out_phy_node;
}
- ret = of_get_mac_address(node, ndev->dev_addr);
+ ret = of_get_ethdev_address(node, ndev);
if (ret) {
eth_hw_addr_random(ndev);
netdev_warn(ndev, "using random MAC address %pM\n",
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 2b7db1c22321..d72657444ef3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -499,7 +499,7 @@ struct hnae_ae_ops {
u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
int (*get_mac_addr)(struct hnae_handle *handle, void **p);
- int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+ int (*set_mac_addr)(struct hnae_handle *handle, const void *p);
int (*add_uc_addr)(struct hnae_handle *handle,
const unsigned char *addr);
int (*rm_uc_addr)(struct hnae_handle *handle,
@@ -558,7 +558,7 @@ struct hnae_handle {
enum hnae_media_type media_type;
struct list_head node; /* list to hnae_ae_dev->handle_list */
struct hnae_buf_ops *bops; /* operation for the buffer */
- struct hnae_queue **qs; /* array base of all queues */
+ struct hnae_queue *qs[]; /* flexible array of all queues */
};
#define ring_to_dev(ring) ((ring)->q->dev->dev)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 75e4ec569da8..bc3e406f0139 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -81,8 +81,8 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
- vf_cb = kzalloc(sizeof(*vf_cb) +
- qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
+ vf_cb = kzalloc(struct_size(vf_cb, ae_handle.qs, qnum_per_vf),
+ GFP_KERNEL);
if (unlikely(!vf_cb)) {
dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
ae_handle = ERR_PTR(-ENOMEM);
@@ -108,7 +108,6 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
goto vf_id_err;
}
- ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
for (i = 0; i < qnum_per_vf; i++) {
ae_handle->qs[i] = &ring_pair_cb->q;
ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
@@ -207,7 +206,7 @@ static void hns_ae_fini_queue(struct hnae_queue *q)
hns_rcb_reset_ring_hw(q);
}
-static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
+static int hns_ae_set_mac_address(struct hnae_handle *handle, const void *p)
{
int ret;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index f387a859a201..8f391e2adcc0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -450,7 +450,7 @@ static void hns_gmac_update_stats(void *mac_drv)
+= dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
}
-static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
+static void hns_gmac_set_mac_addr(void *mac_drv, const char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index f41379de2186..7edf8569514c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -240,7 +240,7 @@ int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
*@addr:mac address
*/
int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
- u32 vmid, char *addr)
+ u32 vmid, const char *addr)
{
int ret;
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 8943ffab4418..e3bb05959ba9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -348,7 +348,7 @@ struct mac_driver {
/*disable mac when disable nic or dsaf*/
void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
/* config mac address*/
- void (*set_mac_addr)(void *mac_drv, char *mac_addr);
+ void (*set_mac_addr)(void *mac_drv, const char *mac_addr);
/*adjust mac mode of port,include speed and duplex*/
int (*adjust_link)(void *mac_drv, enum mac_speed speed,
u32 full_duplex);
@@ -425,7 +425,8 @@ int hns_mac_init(struct dsaf_device *dsaf_dev);
void mac_adjust_link(struct net_device *net_dev);
bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
-int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid,
+ const char *addr);
int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
u32 port_num, char *addr, bool enable);
int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index cba04bfa0b3f..5526a10caac5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -210,7 +210,7 @@ struct hnae_vf_cb {
u8 port_index;
struct hns_mac_cb *mac_cb;
struct dsaf_device *dsaf_dev;
- struct hnae_handle ae_handle; /* must be the last number */
+ struct hnae_handle ae_handle; /* must be the last member */
};
struct dsaf_int_xge_src {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 401fef5f1d07..fc26ffaae620 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -255,7 +255,7 @@ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
}
-static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, const char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 343c605c4be8..22a463e15678 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1194,7 +1194,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
return ret;
}
- memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, mac_addr->sa_data);
return 0;
}
@@ -1212,7 +1212,7 @@ static void hns_init_mac_addr(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
+ if (device_get_ethdev_address(priv->dev, ndev)) {
eth_hw_addr_random(ndev);
dev_warn(priv->dev, "No valid mac, use random mac %pM",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index eef1b2764d34..67b0bf310daa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
static LIST_HEAD(hnae3_client_list);
static LIST_HEAD(hnae3_ae_dev_list);
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *pci_id;
+ struct hnae3_ae_dev *ae_dev;
+
+ if (!ae_algo)
+ return;
+
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!pci_id)
+ continue;
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ pci_disable_sriov(ae_dev->pdev);
+ }
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+
/* we are keeping things simple and using single lock for all the
* list. This is a non-critical code so other updations, if happen
* in parallel, can wait.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 6ccb0109412b..30a3954b78e0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -298,6 +298,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_MAC_TNL_STATUS,
HNAE3_DBG_CMD_SERV_INFO,
HNAE3_DBG_CMD_UMV_INFO,
+ HNAE3_DBG_CMD_PAGE_POOL_INFO,
HNAE3_DBG_CMD_UNKNOWN,
};
@@ -594,7 +595,7 @@ struct hnae3_ae_ops {
u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
- int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
+ int (*set_mac_addr)(struct hnae3_handle *handle, const void *p,
bool is_first);
int (*do_ioctl)(struct hnae3_handle *handle,
struct ifreq *ifr, int cmd);
@@ -758,7 +759,6 @@ struct hnae3_tc_info {
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
u16 tqp_count[HNAE3_MAX_TC];
u16 tqp_offset[HNAE3_MAX_TC];
- unsigned long tc_en; /* bitmap of TC enabled */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
};
@@ -860,6 +860,7 @@ struct hnae3_handle {
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index a1555f074e06..b26d43c9c088 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -336,6 +336,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
+ {
+ .name = "page_pool_info",
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
};
static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
@@ -941,6 +948,69 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
return 0;
}
+static const struct hns3_dbg_item page_pool_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "ALLOCATE_CNT", 2 },
+ { "FREE_CNT", 6 },
+ { "POOL_SIZE(PAGE_NUM)", 2 },
+ { "ORDER", 2 },
+ { "NUMA_ID", 2 },
+ { "MAX_LEN", 2 },
+};
+
+static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
+ char **result, u32 index)
+{
+ u32 j = 0;
+
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt);
+ sprintf(result[j++], "%u",
+ atomic_read(&ring->page_pool->pages_state_release_cnt));
+ sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
+ sprintf(result[j++], "%u", ring->page_pool->p.order);
+ sprintf(result[j++], "%d", ring->page_pool->p.nid);
+ sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+}
+
+static int
+hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
+ NULL, ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
+ hns3_dump_page_pool_info(ring, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ page_pool_info_items,
+ (const char **)result,
+ ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
{
u32 i;
@@ -982,6 +1052,10 @@ static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dbg_dump = hns3_dbg_tx_queue_info,
},
+ {
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dbg_dump = hns3_dbg_page_pool_info,
+ },
};
static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index adc54a726661..a2b993d62822 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -623,13 +623,9 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
return ret;
}
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (!test_bit(i, &tc_info->tc_en))
- continue;
-
+ for (i = 0; i < tc_info->num_tc; i++)
netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
tc_info->tqp_offset[i]);
- }
}
ret = netif_set_real_num_tx_queues(netdev, queue_size);
@@ -779,6 +775,11 @@ static int hns3_nic_net_open(struct net_device *netdev)
if (hns3_nic_resetting(netdev))
return -EBUSY;
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+ netdev_warn(netdev, "net open repeatedly!\n");
+ return 0;
+ }
+
netif_carrier_off(netdev);
ret = hns3_nic_set_real_num_queue(netdev);
@@ -1846,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
static int hns3_skb_linearize(struct hns3_enet_ring *ring,
struct sk_buff *skb,
- u8 max_non_tso_bd_num,
unsigned int bd_num)
{
/* 'bd_num == UINT_MAX' means the skb' fraglist has a
@@ -1863,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* will not help.
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
- (!skb_is_gso(skb) && skb->len >
- HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
+ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp);
@@ -1899,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
goto out;
}
- if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
- bd_num))
+ if (hns3_skb_linearize(ring, skb, bd_num))
return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len);
@@ -2286,7 +2284,7 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return ret;
}
- ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
+ eth_hw_addr_set(netdev, mac_addr->sa_data);
return 0;
}
@@ -3257,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc[i].addr = 0;
+ ring->desc_cb[i].refill = 0;
}
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
@@ -3335,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
+ ring->desc_cb[i].refill = 1;
return 0;
}
@@ -3364,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
+ ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
@@ -3372,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
+ ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
@@ -3478,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
int ntc = ring->next_to_clean;
int ntu = ring->next_to_use;
+ if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
+ return ring->desc_num;
+
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
-static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+/* Return true if there is any allocation failure */
+static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
int cleand_count)
{
struct hns3_desc_cb *desc_cb;
@@ -3506,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
ret);
- break;
+
+ writel(i, ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ return true;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@@ -3519,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+ return false;
}
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
@@ -3823,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
{
ring->desc[ring->next_to_clean].rx.bd_base_info &=
cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+ ring->desc_cb[ring->next_to_clean].refill = 0;
ring->next_to_clean += 1;
if (unlikely(ring->next_to_clean == ring->desc_num))
@@ -4169,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
+ bool failure = false;
int recv_pkts = 0;
int err;
@@ -4177,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
while (recv_pkts < budget) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns3_nic_alloc_rx_buffers(ring, unused_count);
- unused_count = hns3_desc_unused(ring) -
- ring->pending_buf;
+ failure = failure ||
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
+ unused_count = 0;
}
/* Poll one pkt */
@@ -4198,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
}
out:
- /* Make all data has been write before submit */
- if (unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring, unused_count);
-
- return recv_pkts;
+ return failure ? budget : recv_pkts;
}
static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
@@ -4865,12 +4873,9 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
struct hnae3_tc_info *tc_info = &kinfo->tc_info;
int i;
- for (i = 0; i < HNAE3_MAX_TC; i++) {
+ for (i = 0; i < tc_info->num_tc; i++) {
int j;
- if (!test_bit(i, &tc_info->tc_en))
- continue;
-
for (j = 0; j < tc_info->tqp_count[i]; j++) {
struct hnae3_queue *q;
@@ -4935,7 +4940,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
} else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
- ether_addr_copy(netdev->dev_addr, mac_addr_temp);
+ eth_hw_addr_set(netdev, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
} else {
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 6162d9f88e37..f09a61d9c626 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -186,11 +186,9 @@ enum hns3_nic_state {
#define HNS3_MAX_BD_SIZE 65535
#define HNS3_MAX_TSO_BD_NUM 63U
-#define HNS3_MAX_TSO_SIZE \
- (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
+#define HNS3_MAX_TSO_SIZE 1048576U
+#define HNS3_MAX_NON_TSO_SIZE 9728U
-#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
- (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
@@ -332,6 +330,7 @@ struct hns3_desc_cb {
u32 length; /* length of the buffer */
u16 reuse_flag;
+ u16 refill;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 7ea511d59e91..5ebd96f6833d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -334,7 +334,8 @@ static void hns3_selftest_prepare(struct net_device *ndev,
#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Disable the vlan filter for selftest does not support it */
- if (h->ae_algo->ops->enable_vlan_filter)
+ if (h->ae_algo->ops->enable_vlan_filter &&
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
h->ae_algo->ops->enable_vlan_filter(h, false);
#endif
@@ -359,7 +360,8 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
h->ae_algo->ops->halt_autoneg(h, false);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
- if (h->ae_algo->ops->enable_vlan_filter)
+ if (h->ae_algo->ops->enable_vlan_filter &&
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
h->ae_algo->ops->enable_vlan_filter(h, true);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ac9b69513332..9c2eeaa82294 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -467,7 +467,7 @@ err_csq:
return ret;
}
-static int hclge_firmware_compat_config(struct hclge_dev *hdev)
+static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
{
struct hclge_firmware_compat_cmd *req;
struct hclge_desc desc;
@@ -475,13 +475,16 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
- req = (struct hclge_firmware_compat_cmd *)desc.data;
+ if (en) {
+ req = (struct hclge_firmware_compat_cmd *)desc.data;
- hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
- if (hnae3_dev_phy_imp_supported(hdev))
- hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
- req->compat = cpu_to_le32(compat);
+ hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+ if (hnae3_dev_phy_imp_supported(hdev))
+ hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
+
+ req->compat = cpu_to_le32(compat);
+ }
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -538,7 +541,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
/* ask the firmware to enable some features, driver can work without
* it.
*/
- ret = hclge_firmware_compat_config(hdev);
+ ret = hclge_firmware_compat_config(hdev, true);
if (ret)
dev_warn(&hdev->pdev->dev,
"Firmware compatible features not enabled(%d).\n",
@@ -568,6 +571,8 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
void hclge_cmd_uninit(struct hclge_dev *hdev)
{
+ hclge_firmware_compat_config(hdev, false);
+
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
/* wait to ensure that the firmware completes the possible left
* over commands.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 4a619e5d3f35..91cb578f56b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
+ /* The hardware will switch to sp mode if bandwidth is
+ * 0, so limit ets bandwidth must be greater than 0.
+ */
+ if (!ets->tc_tx_bw[i]) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u ets bw cannot be 0\n", i);
+ return -EINVAL;
+ }
+
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR)
*changed = true;
@@ -247,6 +256,10 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
}
hclge_tm_schd_info_update(hdev, num_tc);
+ if (num_tc > 1)
+ hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+ else
+ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -306,8 +319,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
u8 i, j, pfc_map, *prio_tc;
int ret;
- if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (pfc->pfc_en == hdev->tm_info.pfc_en)
@@ -441,8 +453,6 @@ static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
- int i;
-
memset(tc_info, 0, sizeof(*tc_info));
tc_info->num_tc = mqprio_qopt->qopt.num_tc;
memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
@@ -451,9 +461,6 @@ static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
sizeof_field(struct hnae3_tc_info, tqp_count));
memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
sizeof_field(struct hnae3_tc_info, tqp_offset));
-
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
- set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
}
static int hclge_config_tc(struct hclge_dev *hdev,
@@ -519,12 +526,17 @@ static int hclge_setup_tc(struct hnae3_handle *h,
return hclge_notify_init_up(hdev);
err_out:
- /* roll-back */
- memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
- if (hclge_config_tc(hdev, &kinfo->tc_info))
- dev_err(&hdev->pdev->dev,
- "failed to roll back tc configuration\n");
-
+ if (!tc) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to destroy mqprio, will active after reset, ret = %d\n",
+ ret);
+ } else {
+ /* roll-back */
+ memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
+ if (hclge_config_tc(hdev, &kinfo->tc_info))
+ dev_err(&hdev->pdev->dev,
+ "failed to roll back tc configuration\n");
+ }
hclge_notify_init_up(hdev);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index a983d012e26f..f0aa4fbd2200 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -719,9 +719,9 @@ static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
sprintf(result[(*index)++], "%6u", para->rate);
}
-static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
+ char *buf, int len)
{
- char data_str[ARRAY_SIZE(tm_pg_items)][HCLGE_DBG_DATA_STR_LEN];
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
@@ -729,8 +729,10 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
int pos = 0;
int ret;
- for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++)
- result[i] = &data_str[i][0];
+ for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
+ result[i] = data_str;
+ data_str += HCLGE_DBG_DATA_STR_LEN;
+ }
hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
NULL, ARRAY_SIZE(tm_pg_items));
@@ -781,6 +783,24 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
+static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+{
+ char *data_str;
+ int ret;
+
+ data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
+ HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
+
+ if (!data_str)
+ return -ENOMEM;
+
+ ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
+
+ kfree(data_str);
+
+ return ret;
+}
+
static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tm_shaper_para shaper_para;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index 59b0ae7d59e0..4c441e6a5082 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -119,8 +119,8 @@ int hclge_devlink_init(struct hclge_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
- devlink_reload_enable(devlink);
return 0;
}
@@ -128,8 +128,6 @@ void hclge_devlink_uninit(struct hclge_dev *hdev)
{
struct devlink *devlink = hdev->devlink;
- devlink_reload_disable(devlink);
-
devlink_unregister(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 718c16d686fa..93aa7f2bdc13 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure TM QCN hw errors */
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
- if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
+ if (en) {
+ desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+ }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -2445,12 +2448,12 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
- dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
- dev_err(dev, "invalid vf id(%u)\n", vf_id);
+ dev_err(dev, "invalid vport(%u)\n", vf_id);
return;
}
@@ -2463,8 +2466,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
- dev_err(dev, "inform reset to vf(%u) failed %d!\n",
- hdev->vport->vport_id, ret);
+ dev_err(dev, "inform reset to vport(%u) failed %d!\n",
+ vf_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 07987fb8332e..d811eeefe2c0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -50,6 +50,8 @@
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
+#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
#define HCLGE_NCSI_ERR_INT_EN 0x3
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a0d0fa4f6a24..be6f0a6229aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3667,7 +3667,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret) {
dev_err(&hdev->pdev->dev,
"set vf(%u) rst failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
return ret;
}
@@ -3682,7 +3683,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret)
dev_warn(&hdev->pdev->dev,
"inform reset to vf(%u) failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
}
return 0;
@@ -4747,6 +4749,24 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
return 0;
}
+static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = vport->rss_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
@@ -4756,30 +4776,27 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
u8 hash_algo;
int ret, i;
+ ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
+ return ret;
+ }
+
/* Set the RSS Hash Key if specififed by the user */
if (key) {
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- break;
- case ETH_RSS_HASH_XOR:
- hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- break;
- case ETH_RSS_HASH_NO_CHANGE:
- hash_algo = vport->rss_algo;
- break;
- default:
- return -EINVAL;
- }
-
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret)
return ret;
/* Update the shadow RSS key with user specified qids */
memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
- vport->rss_algo = hash_algo;
+ } else {
+ ret = hclge_set_rss_algo_key(hdev, hash_algo,
+ vport->rss_hash_key);
+ if (ret)
+ return ret;
}
+ vport->rss_algo = hash_algo;
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
@@ -6633,10 +6650,13 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
u16 tqps;
+ /* To keep consistent with user's configuration, minus 1 when
+ * printing 'vf', because vf id from ethtool is added 1 for vf.
+ */
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
- "Error: vf id (%u) > max vf num (%u)\n",
- vf, hdev->num_req_vfs);
+ "Error: vf id (%u) should be less than %u\n",
+ vf - 1, hdev->num_req_vfs);
return -EINVAL;
}
@@ -8699,15 +8719,8 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
/* check if we just hit the duplicate */
- if (!ret) {
- dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
- vport->vport_id, addr);
- return 0;
- }
-
- dev_err(&hdev->pdev->dev,
- "PF failed to add unicast entry(%pM) in the MAC table\n",
- addr);
+ if (!ret)
+ return -EEXIST;
return ret;
}
@@ -8876,7 +8889,13 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
} else {
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
&vport->state);
- break;
+
+ /* If one unicast mac address is existing in hardware,
+ * we need to try whether other unicast mac addresses
+ * are new addresses that can be added.
+ */
+ if (ret != -EEXIST)
+ break;
}
}
}
@@ -9423,7 +9442,7 @@ int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
return 0;
}
-static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
@@ -9825,6 +9844,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill && !vlan_id)
return 0;
+ if (vlan_id >= VLAN_N_VID)
+ return -EINVAL;
+
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -10731,7 +10753,8 @@ static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
return 0;
}
-static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
+static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
+ u8 *reset_status)
{
struct hclge_reset_tqp_queue_cmd *req;
struct hclge_desc desc;
@@ -10749,7 +10772,9 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret;
}
- return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+ *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+
+ return 0;
}
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
@@ -10768,7 +10793,7 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u16 reset_try_times = 0;
- int reset_status;
+ u8 reset_status;
u16 queue_gid;
int ret;
u16 i;
@@ -10784,7 +10809,11 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- reset_status = hclge_get_reset_status(hdev, queue_gid);
+ ret = hclge_get_reset_status(hdev, queue_gid,
+ &reset_status);
+ if (ret)
+ return ret;
+
if (reset_status)
break;
@@ -11477,11 +11506,11 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
struct hclge_vport *vport = &hdev->vport[i];
int ret;
- /* Send cmd to clear VF's FUNC_RST_ING */
+ /* Send cmd to clear vport's FUNC_RST_ING */
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
- "clear vf(%u) rst failed %d!\n",
+ "clear vport(%u) rst failed %d!\n",
vport->vport_id, ret);
}
}
@@ -12795,8 +12824,12 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
continue;
if (vport->vf_info.trusted) {
- uc_en = vport->vf_info.request_uc_en > 0;
- mc_en = vport->vf_info.request_mc_en > 0;
+ uc_en = vport->vf_info.request_uc_en > 0 ||
+ vport->overflow_promisc_flags &
+ HNAE3_OVERFLOW_UPE;
+ mc_en = vport->vf_info.request_mc_en > 0 ||
+ vport->overflow_promisc_flags &
+ HNAE3_OVERFLOW_MPE;
}
bc_en = vport->vf_info.request_bc_en > 0;
@@ -13060,6 +13093,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
+ hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 2ce5302c5956..65d78ee4d65a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -566,7 +566,7 @@ static int hclge_reset_vf(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
- vport->vport_id);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
return hclge_func_reset_cmd(hdev, vport->vport_id);
}
@@ -590,9 +590,17 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg)
{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
u16 queue_id, qid_in_pf;
memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
+ queue_id, mbx_req->mbx_src_vfid);
+ return;
+ }
+
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
resp_msg->len = sizeof(qid_in_pf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 78d5bf1ea561..95074e91a846 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -581,7 +581,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
vport->vport_id, shap_cfg_cmd->qs_id,
max_tx_rate, ret);
return ret;
@@ -687,12 +687,10 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
for (i = 0; i < HNAE3_MAX_TC; i++) {
if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
- set_bit(i, &kinfo->tc_info.tc_en);
kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
} else {
/* Set to default queue if TC is disable */
- clear_bit(i, &kinfo->tc_info.tc_en);
kinfo->tc_info.tqp_offset[i] = 0;
kinfo->tc_info.tqp_count[i] = 1;
}
@@ -729,14 +727,6 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i;
-
- /* DCB is enabled if we have more than 1 TC or pfc_en is
- * non-zero.
- */
- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
}
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
@@ -762,15 +752,17 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+ for (; k < HNAE3_MAX_TC; k++)
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
}
}
static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
{
- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
dev_warn(&hdev->pdev->dev,
- "DCB is disable, but last mode is FC_PFC\n");
+ "Only 1 tc used, but last mode is FC_PFC\n");
hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
@@ -796,7 +788,7 @@ static void hclge_update_fc_mode(struct hclge_dev *hdev)
}
}
-static void hclge_pfc_info_init(struct hclge_dev *hdev)
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
{
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
hclge_update_fc_mode(hdev);
@@ -812,7 +804,7 @@ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
hclge_tm_vport_info_update(hdev);
- hclge_pfc_info_init(hdev);
+ hclge_tm_pfc_info_update(hdev);
}
static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@@ -1558,19 +1550,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hclge_tm_schd_info_init(hdev);
}
-void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
-{
- /* DCB is enabled if we have more than 1 TC or pfc_en is
- * non-zero.
- */
- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
-
- hclge_pfc_info_init(hdev);
-}
-
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
{
int ret;
@@ -1616,7 +1595,7 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
if (ret)
return ret;
- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
return 0;
return hclge_tm_bp_setup(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index d60cc9426f70..fdc19868b818 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -121,8 +121,8 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
- devlink_reload_enable(devlink);
return 0;
}
@@ -130,8 +130,6 @@ void hclgevf_devlink_uninit(struct hclgevf_dev *hdev)
{
struct devlink *devlink = hdev->devlink;
- devlink_reload_disable(devlink);
-
devlink_unregister(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a69e892277b3..3306050ad72c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -816,40 +816,56 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
return 0;
}
+static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = hdev->rss_cfg.hash_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 hash_algo;
int ret, i;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+ if (ret)
+ return ret;
+
/* Set the RSS Hash Key if specififed by the user */
if (key) {
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- rss_cfg->hash_algo =
- HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- break;
- case ETH_RSS_HASH_XOR:
- rss_cfg->hash_algo =
- HCLGEVF_RSS_HASH_ALGO_SIMPLE;
- break;
- case ETH_RSS_HASH_NO_CHANGE:
- break;
- default:
- return -EINVAL;
- }
-
- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
- key);
- if (ret)
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "invalid hfunc type %u\n", hfunc);
return ret;
+ }
/* Update the shadow RSS key with user specified qids */
memcpy(rss_cfg->rss_hash_key, key,
HCLGEVF_RSS_KEY_SIZE);
+ } else {
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
}
+ rss_cfg->hash_algo = hash_algo;
}
/* update the shadow RSS table with user specified qids */
@@ -1333,7 +1349,7 @@ static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
-static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -2257,9 +2273,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
hdev->reset_attempts = 0;
hdev->last_reset_time = jiffies;
- while ((hdev->reset_type =
- hclgevf_get_reset_level(hdev, &hdev->reset_pending))
- != HNAE3_NONE_RESET)
+ hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 3e54017a2a5b..07fdab58001d 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -354,7 +354,7 @@ static int hns_mdio_reset(struct mii_bus *bus)
if (dev_of_node(bus->parent)) {
if (!mdio_dev->subctrl_vbase) {
- dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+ dev_err(&bus->dev, "mdio sys ctl reg has not mapped\n");
return -ENODEV;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index b2ece3adbc72..657a15447bd0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -754,11 +754,9 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
return err;
}
- hinic_devlink_register(hwdev->devlink_dev);
err = hinic_func_to_func_init(hwdev);
if (err) {
dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
- hinic_devlink_unregister(hwdev->devlink_dev);
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
return err;
}
@@ -781,7 +779,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
}
hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
-
+ hinic_devlink_register(hwdev->devlink_dev);
return 0;
}
@@ -793,6 +791,7 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
{
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ hinic_devlink_unregister(hwdev->devlink_dev);
hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
if (!HINIC_IS_VF(hwdev->hwif)) {
@@ -810,8 +809,6 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
hinic_func_to_func_free(hwdev);
- hinic_devlink_unregister(hwdev->devlink_dev);
-
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 9965e8d5d0a9..f9a766b8ac43 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -656,7 +656,7 @@ static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
err = change_mac_addr(netdev, new_mac);
if (!err)
- memcpy(netdev->dev_addr, new_mac, ETH_ALEN);
+ eth_hw_addr_set(netdev, new_mac);
return err;
}
@@ -1181,6 +1181,7 @@ static int nic_dev_init(struct pci_dev *pdev)
struct net_device *netdev;
struct hinic_hwdev *hwdev;
struct devlink *devlink;
+ u8 addr[ETH_ALEN];
int err, num_qps;
devlink = hinic_devlink_alloc(&pdev->dev);
@@ -1259,11 +1260,12 @@ static int nic_dev_init(struct pci_dev *pdev)
pci_set_drvdata(pdev, netdev);
- err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
+ err = hinic_port_get_mac(nic_dev, addr);
if (err) {
dev_err(&pdev->dev, "Failed to get mac address\n");
goto err_get_mac;
}
+ eth_hw_addr_set(netdev, addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 0696f723228a..3909c6a0af89 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -339,14 +339,13 @@ static const struct net_device_ops sun3_82586_netdev_ops = {
static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
{
- int i, size, retval;
+ int size, retval;
if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME))
return -EBUSY;
/* copy in the ethernet address from the prom */
- for(i = 0; i < 6 ; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr);
@@ -461,7 +460,7 @@ static int init586(struct net_device *dev)
ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST);
ias_cmd->cmd_link = 0xffff;
- memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+ memcpy((char *)&ias_cmd->iaddr,(const char *) dev->dev_addr,ETH_ALEN);
p->scb->cbl_offset = make16(ias_cmd);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index d5df131b183c..bad94e4d50f4 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1741,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
goto out_free;
}
- memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, mac_addr->sa_data);
/* Deregister old MAC in pHYP */
if (port->state == EHEA_PORT_UP) {
@@ -2986,7 +2986,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
SET_NETDEV_DEV(dev, port_dev);
/* initialize net_device structure */
- memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, (u8 *)&port->mac_addr);
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 664a91af662d..6b3fc8823c54 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1013,7 +1013,7 @@ static int emac_set_mac_address(struct net_device *ndev, void *sa)
mutex_lock(&dev->link_lock);
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
emac_rx_disable(dev);
emac_tx_disable(dev);
@@ -2848,7 +2848,6 @@ static int emac_init_phy(struct emac_instance *dev)
static int emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
- const void *p;
int err;
/* Read config from device-tree */
@@ -2976,13 +2975,12 @@ static int emac_init_config(struct emac_instance *dev)
}
/* Read MAC-address */
- p = of_get_property(np, "local-mac-address", NULL);
- if (p == NULL) {
- printk(KERN_ERR "%pOF: Can't find local-mac-address property\n",
- np);
- return -ENXIO;
+ err = of_get_ethdev_address(np, dev->ndev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n");
+ return err;
}
- memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 3d9b4f99d357..45ba40cf4d07 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -483,17 +483,6 @@ retry:
return rc;
}
-static u64 ibmveth_encode_mac_addr(u8 *mac)
-{
- int i;
- u64 encoded = 0;
-
- for (i = 0; i < ETH_ALEN; i++)
- encoded = (encoded << 8) | mac[i];
-
- return encoded;
-}
-
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
@@ -553,7 +542,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
+ mac_address = ether_addr_to_u64(netdev->dev_addr);
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
adapter->rx_queue.queue_len;
@@ -605,17 +594,13 @@ static int ibmveth_open(struct net_device *netdev)
}
rc = -ENOMEM;
- adapter->bounce_buffer =
- kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
- if (!adapter->bounce_buffer)
- goto out_free_irq;
- adapter->bounce_buffer_dma =
- dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
- netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- netdev_err(netdev, "unable to map bounce buffer\n");
- goto out_free_bounce_buffer;
+ adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
+ netdev->mtu + IBMVETH_BUFF_OH,
+ &adapter->bounce_buffer_dma, GFP_KERNEL);
+ if (!adapter->bounce_buffer) {
+ netdev_err(netdev, "unable to alloc bounce buffer\n");
+ goto out_free_irq;
}
netdev_dbg(netdev, "initial replenish cycle\n");
@@ -627,8 +612,6 @@ static int ibmveth_open(struct net_device *netdev)
return 0;
-out_free_bounce_buffer:
- kfree(adapter->bounce_buffer);
out_free_irq:
free_irq(netdev->irq, netdev);
out_free_buffer_pools:
@@ -702,10 +685,9 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- DMA_BIDIRECTIONAL);
- kfree(adapter->bounce_buffer);
+ dma_free_coherent(&adapter->vdev->dev,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ adapter->bounce_buffer, adapter->bounce_buffer_dma);
netdev_dbg(netdev, "close complete\n");
@@ -1483,7 +1465,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
u64 mcast_addr;
- mcast_addr = ibmveth_encode_mac_addr(ha->addr);
+ mcast_addr = ether_addr_to_u64(ha->addr);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1613,14 +1595,14 @@ static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+ mac_address = ether_addr_to_u64(addr->sa_data);
rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
if (rc) {
netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
return rc;
}
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -1727,7 +1709,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
- memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
+ eth_hw_addr_set(netdev, mac_addr_p);
if (firmware_has_feature(FW_FEATURE_CMO))
memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 8f17096e614d..9d61167ba767 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -4766,8 +4766,7 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
/* crq->change_mac_addr.mac_addr is the requested one
* crq->change_mac_addr_rsp.mac_addr is the returned valid one.
*/
- ether_addr_copy(netdev->dev_addr,
- &crq->change_mac_addr_rsp.mac_addr[0]);
+ eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
ether_addr_copy(adapter->mac_addr,
&crq->change_mac_addr_rsp.mac_addr[0]);
out:
@@ -4898,14 +4897,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
- if (adapter->failover_pending) {
- adapter->init_done_rc = -EAGAIN;
- netdev_dbg(netdev, "Failover pending, ignoring login response\n");
- complete(&adapter->init_done);
- /* login response buffer will be released on reset */
- return 0;
- }
-
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
@@ -5731,7 +5722,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
bitmap_set(adapter->map_ids, 0, 1);
ether_addr_copy(adapter->mac_addr, mac_addr_p);
- ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ eth_hw_addr_set(netdev, adapter->mac_addr);
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmvnic_netdev_ops;
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index b0b6f90deb7d..0b274d8fa45b 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -313,6 +313,20 @@ config ICE
To compile this driver as a module, choose M here. The module
will be called ice.
+config ICE_SWITCHDEV
+ bool "Switchdev Support"
+ default y
+ depends on ICE && NET_SWITCHDEV
+ help
+ Switchdev support provides internal SRIOV packet steering and switching.
+
+ To enable it on running kernel use devlink tool:
+ #devlink dev eswitch set pci/0000:XX:XX.X mode switchdev
+
+ Say Y here if you want to use Switchdev in the driver.
+
+ If unsure, say N.
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
@@ -335,6 +349,7 @@ config IGC
tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"
default n
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 373eb027b925..5039a2536951 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2259,7 +2259,7 @@ static int e100_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
e100_exec_cb(nic, NULL, e100_setup_iaaddr);
return 0;
@@ -2437,11 +2437,15 @@ static void e100_get_drvinfo(struct net_device *netdev,
sizeof(info->bus_info));
}
-#define E100_PHY_REGS 0x1C
+#define E100_PHY_REGS 0x1D
static int e100_get_regs_len(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
- return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
+
+ /* We know the number of registers, and the size of the dump buffer.
+ * Calculate the total size in bytes.
+ */
+ return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
}
static void e100_get_regs(struct net_device *netdev,
@@ -2455,14 +2459,18 @@ static void e100_get_regs(struct net_device *netdev,
buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
ioread8(&nic->csr->scb.cmd_lo) << 16 |
ioread16(&nic->csr->scb.status);
- for (i = E100_PHY_REGS; i >= 0; i--)
- buff[1 + E100_PHY_REGS - i] =
- mdio_read(netdev, nic->mii.phy_id, i);
+ for (i = 0; i < E100_PHY_REGS; i++)
+ /* Note that we read the registers in reverse order. This
+ * ordering is the ABI apparently used by ethtool and other
+ * applications.
+ */
+ buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
+ E100_PHY_REGS - 1 - i);
memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
e100_exec_cb(nic, NULL, e100_dump);
msleep(10);
- memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
- sizeof(nic->mem->dump_buf));
+ memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
+ sizeof(nic->mem->dump_buf));
}
static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2913,7 +2921,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e100_phy_init(nic);
- memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)nic->eeprom);
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!eeprom_bad_csum_allow) {
netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index bed4f040face..669060a2e6aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1103,7 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e_err(probe, "EEPROM Read Error\n");
}
/* don't block initialization here due to bad MAC address */
- memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr))
e_err(probe, "Invalid MAC Address\n");
@@ -2209,7 +2209,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
if (hw->mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
e1000_rar_set(hw, hw->mac_addr, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index f3424255bd2b..c3def0ee7788 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -114,7 +114,8 @@ enum e1000_boards {
board_pch2lan,
board_pch_lpt,
board_pch_spt,
- board_pch_cnp
+ board_pch_cnp,
+ board_pch_tgp
};
struct e1000_ps_page {
@@ -500,6 +501,7 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
+extern const struct e1000_info e1000_pch_tgp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 60c582a16821..5e4fc9b4e2ad 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- u32 ctrl_ext, txdctl, snoop;
+ u32 ctrl_ext, txdctl, snoop, fflt_dbg;
s32 ret_val;
u16 i;
@@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
snoop = (u32)~(PCIE_NO_SNOOP_ALL);
e1000e_set_pcie_no_snoop(hw, snoop);
+ /* Enable workaround for packet loss issue on TGP PCH
+ * Do not gate DMA clock from the modPHY block
+ */
+ if (mac->type >= e1000_pch_tgp) {
+ fflt_dbg = er32(FFLT_DBG);
+ fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
+ ew32(FFLT_DBG, fflt_dbg);
+ }
+
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
@@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_tgp_info = {
+ .mac = e1000_pch_tgp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index d6a092e5ee74..2504b11c3169 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -289,6 +289,9 @@
/* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8
+/* Don't gate wake DMA clock */
+#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
+
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 900b3ab998bd..44e2dc8328a2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_lpt] = &e1000_pch_lpt_info,
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
+ [board_pch_tgp] = &e1000_pch_tgp_info,
};
struct e1000_reg_info {
@@ -2549,7 +2550,6 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (adapter->link_speed != SPEED_1000) {
- current_itr = 0;
new_itr = 4000;
goto set_itr_now;
}
@@ -4786,7 +4786,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
@@ -7589,7 +7589,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"NVM Read Error while reading MAC address\n");
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
@@ -7896,28 +7896,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2fb52bd6fc0e..2cca9e84e31e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -990,7 +990,7 @@ static int fm10k_set_mac(struct net_device *dev, void *p)
}
if (!err) {
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
ether_addr_copy(hw->mac.addr, addr->sa_data);
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index adfa2768f024..b473cb7d7c57 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -300,7 +300,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface)
if (is_valid_ether_addr(hw->mac.perm_addr)) {
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
+ eth_hw_addr_set(netdev, hw->mac.perm_addr);
netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
@@ -2045,7 +2045,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
- ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ eth_hw_addr_set(netdev, hw->mac.addr);
ether_addr_copy(netdev->perm_addr, hw->mac.addr);
if (!is_valid_ether_addr(netdev->perm_addr)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 39fb3d57c057..3d528fba754b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -435,7 +435,7 @@ static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch)
return !!ch->fwd;
}
-static inline u8 *i40e_channel_mac(struct i40e_channel *ch)
+static inline const u8 *i40e_channel_mac(struct i40e_channel *ch)
{
if (i40e_is_channel_macvlan(ch))
return ch->fwd->netdev->dev_addr;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2f20980dd9a5..ba862131b9bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1587,7 +1587,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
i40e_add_mac_filter(vsi, netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -4871,7 +4871,8 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
int i;
- i40e_free_misc_vector(pf);
+ if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
+ i40e_free_misc_vector(pf);
i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
I40E_IWARP_IRQ_PILE_ID);
@@ -10113,7 +10114,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
/* retry with a larger buffer */
buf_len = data_size;
- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
"capability discovery failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, err),
@@ -13424,7 +13425,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
i40e_add_mac_filter(vsi, broadcast);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- ether_addr_copy(netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(netdev, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
/* i40iw_net_event() reads 16 bytes from neigh->primary_key */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e7e778ca074c..6f85879ba993 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -193,42 +193,40 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
{
u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
- struct xdp_buff **bi, *xdp;
+ struct xdp_buff **xdp;
+ u32 nb_buffs, i;
dma_addr_t dma;
- bool ok = true;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = i40e_rx_bi(rx_ring, ntu);
- do {
- xdp = xsk_buff_alloc(rx_ring->xsk_pool);
- if (!xdp) {
- ok = false;
- goto no_buffers;
- }
- *bi = xdp;
- dma = xsk_buff_xdp_get_dma(xdp);
+ xdp = i40e_rx_bi(rx_ring, ntu);
+
+ nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+ nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+ if (!nb_buffs)
+ return false;
+
+ i = nb_buffs;
+ while (i--) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->read.hdr_addr = 0;
rx_desc++;
- bi++;
- ntu++;
-
- if (unlikely(ntu == rx_ring->count)) {
- rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = i40e_rx_bi(rx_ring, 0);
- ntu = 0;
- }
- } while (--count);
+ xdp++;
+ }
-no_buffers:
- if (rx_ring->next_to_use != ntu) {
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
- i40e_release_rx_desc(rx_ring, ntu);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ xdp = i40e_rx_bi(rx_ring, 0);
+ ntu = 0;
}
- return ok;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+ i40e_release_rx_desc(rx_ring, ntu);
+
+ return count == nb_buffs ? true : false;
}
/**
@@ -365,7 +363,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
break;
bi = *i40e_rx_bi(rx_ring, next_to_clean);
- bi->data_end = bi->data + size;
+ xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 68c80f04113c..e0b88ff76466 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -177,6 +177,7 @@ enum iavf_state_t {
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__IAVF_INIT_SW, /* got resources, setting up structs */
+ __IAVF_INIT_FAILED, /* init failed, restarting procedure */
__IAVF_RESETTING, /* in reset */
__IAVF_COMM_FAILED, /* communication with PF failed */
/* Below here, watchdog is running */
@@ -225,7 +226,6 @@ struct iavf_adapter {
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work client_task;
- struct delayed_work init_task;
wait_queue_head_t down_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
@@ -312,6 +312,7 @@ struct iavf_adapter {
struct iavf_hw hw; /* defined in iavf_type.h */
enum iavf_state_t state;
+ enum iavf_state_t last_state;
unsigned long crit_section;
struct delayed_work watchdog_task;
@@ -393,6 +394,15 @@ struct iavf_device {
extern char iavf_driver_name[];
extern struct workqueue_struct *iavf_wq;
+static inline void iavf_change_state(struct iavf_adapter *adapter,
+ enum iavf_state_t state)
+{
+ if (adapter->state != state) {
+ adapter->last_state = adapter->state;
+ adapter->state = state;
+ }
+}
+
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 23762a7ef740..56956e449c97 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -14,7 +14,7 @@
static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
static int iavf_close(struct net_device *netdev);
-static int iavf_init_get_resources(struct iavf_adapter *adapter);
+static void iavf_init_get_resources(struct iavf_adapter *adapter);
static int iavf_check_reset_complete(struct iavf_hw *hw);
char iavf_driver_name[] = "iavf";
@@ -960,7 +960,7 @@ static void iavf_configure(struct iavf_adapter *adapter)
**/
static void iavf_up_complete(struct iavf_adapter *adapter)
{
- adapter->state = __IAVF_RUNNING;
+ iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_napi_enable_all(adapter);
@@ -1688,9 +1688,9 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
*
* Function process __IAVF_STARTUP driver state.
* When success the state is changed to __IAVF_INIT_VERSION_CHECK
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_startup(struct iavf_adapter *adapter)
+static void iavf_startup(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
@@ -1729,9 +1729,10 @@ static int iavf_startup(struct iavf_adapter *adapter)
iavf_shutdown_adminq(hw);
goto err;
}
- adapter->state = __IAVF_INIT_VERSION_CHECK;
+ iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
+ return;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1740,9 +1741,9 @@ err:
*
* Function process __IAVF_INIT_VERSION_CHECK driver state.
* When success the state is changed to __IAVF_INIT_GET_RESOURCES
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_init_version_check(struct iavf_adapter *adapter)
+static void iavf_init_version_check(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
@@ -1753,7 +1754,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter)
if (!iavf_asq_done(hw)) {
dev_err(&pdev->dev, "Admin queue command never completed\n");
iavf_shutdown_adminq(hw);
- adapter->state = __IAVF_STARTUP;
+ iavf_change_state(adapter, __IAVF_STARTUP);
goto err;
}
@@ -1776,10 +1777,10 @@ static int iavf_init_version_check(struct iavf_adapter *adapter)
err);
goto err;
}
- adapter->state = __IAVF_INIT_GET_RESOURCES;
-
+ iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
+ return;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1789,9 +1790,9 @@ err:
* Function process __IAVF_INIT_GET_RESOURCES driver state and
* finishes driver initialization procedure.
* When success the state is changed to __IAVF_DOWN
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_init_get_resources(struct iavf_adapter *adapter)
+static void iavf_init_get_resources(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -1819,7 +1820,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
*/
iavf_shutdown_adminq(hw);
dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
- return 0;
+ return;
}
if (err) {
dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
@@ -1847,7 +1848,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
eth_hw_addr_random(netdev);
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
@@ -1893,7 +1894,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
rtnl_unlock();
@@ -1911,7 +1912,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
else
iavf_init_rss(adapter);
- return err;
+ return;
err_mem:
iavf_free_rss(adapter);
err_register:
@@ -1922,7 +1923,7 @@ err_alloc:
kfree(adapter->vf_res);
adapter->vf_res = NULL;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1941,9 +1942,50 @@ static void iavf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
- adapter->state = __IAVF_COMM_FAILED;
+ iavf_change_state(adapter, __IAVF_COMM_FAILED);
+
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
+ adapter->state != __IAVF_RESETTING) {
+ iavf_change_state(adapter, __IAVF_RESETTING);
+ adapter->aq_required = 0;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ }
switch (adapter->state) {
+ case __IAVF_STARTUP:
+ iavf_startup(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_VERSION_CHECK:
+ iavf_init_version_check(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_GET_RESOURCES:
+ iavf_init_get_resources(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_FAILED:
+ if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to communicate with PF; waiting before retry\n");
+ adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
+ iavf_shutdown_adminq(hw);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq,
+ &adapter->watchdog_task, (5 * HZ));
+ return;
+ }
+ /* Try again from failed step*/
+ iavf_change_state(adapter, adapter->last_state);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
+ return;
case __IAVF_COMM_FAILED:
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -1952,24 +1994,19 @@ static void iavf_watchdog_task(struct work_struct *work)
/* A chance for redemption! */
dev_err(&adapter->pdev->dev,
"Hardware came out of reset. Attempting reinit.\n");
- adapter->state = __IAVF_STARTUP;
- adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- queue_delayed_work(iavf_wq, &adapter->init_task, 10);
- mutex_unlock(&adapter->crit_lock);
- /* Don't reschedule the watchdog, since we've restarted
- * the init task. When init_task contacts the PF and
+ /* When init task contacts the PF and
* gets everything set up again, it'll restart the
* watchdog for us. Down, boy. Sit. Stay. Woof.
*/
- return;
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
- goto watchdog_done;
+ return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
@@ -1992,38 +2029,40 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->state == __IAVF_RUNNING)
iavf_request_stats(adapter);
}
+ if (adapter->state == __IAVF_RUNNING)
+ iavf_detect_recover_hung(&adapter->vsi);
break;
case __IAVF_REMOVE:
mutex_unlock(&adapter->crit_lock);
return;
default:
- goto restart_watchdog;
+ return;
}
- /* check for hw reset */
+ /* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
+ iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
queue_work(iavf_wq, &adapter->reset_task);
- goto watchdog_done;
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq,
+ &adapter->watchdog_task, HZ * 2);
+ return;
}
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
-watchdog_done:
- if (adapter->state == __IAVF_RUNNING ||
- adapter->state == __IAVF_COMM_FAILED)
- iavf_detect_recover_hung(&adapter->vsi);
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
+ queue_work(iavf_wq, &adapter->adminq_task);
if (adapter->aq_required)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
else
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
- queue_work(iavf_wq, &adapter->adminq_task);
}
static void iavf_disable_vf(struct iavf_adapter *adapter)
@@ -2082,7 +2121,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
adapter->netdev->flags &= ~IFF_UP;
mutex_unlock(&adapter->crit_lock);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
}
@@ -2192,7 +2231,7 @@ continue_reset:
}
iavf_irq_disable(adapter);
- adapter->state = __IAVF_RESETTING;
+ iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
/* free the Tx/Rx rings and descriptors, might be better to just
@@ -2292,11 +2331,14 @@ continue_reset:
iavf_configure(adapter);
+ /* iavf_up_complete() will switch device back
+ * to __IAVF_RUNNING
+ */
iavf_up_complete(adapter);
iavf_irq_enable(adapter, true);
} else {
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
mutex_unlock(&adapter->client_lock);
@@ -2306,6 +2348,8 @@ continue_reset:
reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
+ if (running)
+ iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
@@ -3298,7 +3342,7 @@ static int iavf_close(struct net_device *netdev)
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
iavf_down(adapter);
- adapter->state = __IAVF_DOWN_PENDING;
+ iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter);
mutex_unlock(&adapter->crit_lock);
@@ -3632,64 +3676,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
/**
- * iavf_init_task - worker thread to perform delayed initialization
- * @work: pointer to work_struct containing our data
- *
- * This task completes the work that was begun in probe. Due to the nature
- * of VF-PF communications, we may need to wait tens of milliseconds to get
- * responses back from the PF. Rather than busy-wait in probe and bog down the
- * whole system, we'll do it in a task so we can sleep.
- * This task only runs during driver init. Once we've established
- * communications with the PF driver and set up our netdev, the watchdog
- * takes over.
- **/
-static void iavf_init_task(struct work_struct *work)
-{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- init_task.work);
- struct iavf_hw *hw = &adapter->hw;
-
- if (iavf_lock_timeout(&adapter->crit_lock, 5000)) {
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
- return;
- }
- switch (adapter->state) {
- case __IAVF_STARTUP:
- if (iavf_startup(adapter) < 0)
- goto init_failed;
- break;
- case __IAVF_INIT_VERSION_CHECK:
- if (iavf_init_version_check(adapter) < 0)
- goto init_failed;
- break;
- case __IAVF_INIT_GET_RESOURCES:
- if (iavf_init_get_resources(adapter) < 0)
- goto init_failed;
- goto out;
- default:
- goto init_failed;
- }
-
- queue_delayed_work(iavf_wq, &adapter->init_task,
- msecs_to_jiffies(30));
- goto out;
-init_failed:
- if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
- dev_err(&adapter->pdev->dev,
- "Failed to communicate with PF; waiting before retry\n");
- adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
- iavf_shutdown_adminq(hw);
- adapter->state = __IAVF_STARTUP;
- queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
- goto out;
- }
- queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
-out:
- mutex_unlock(&adapter->crit_lock);
-}
-
-/**
* iavf_shutdown - Shutdown the device in preparation for a reboot
* @pdev: pci device structure
**/
@@ -3706,7 +3692,7 @@ static void iavf_shutdown(struct pci_dev *pdev)
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
/* Prevent the watchdog from running. */
- adapter->state = __IAVF_REMOVE;
+ iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
mutex_unlock(&adapter->crit_lock);
@@ -3779,7 +3765,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
- adapter->state = __IAVF_STARTUP;
+ iavf_change_state(adapter, __IAVF_STARTUP);
/* Call save state here because it relies on the adapter struct. */
pci_save_state(pdev);
@@ -3823,8 +3809,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
- queue_delayed_work(iavf_wq, &adapter->init_task,
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
@@ -3930,8 +3915,8 @@ static void iavf_remove(struct pci_dev *pdev)
int err;
/* Indicate we are in remove and not to run reset_task */
mutex_lock(&adapter->remove_lock);
- cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_delayed_work_sync(&adapter->client_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
@@ -3955,7 +3940,7 @@ static void iavf_remove(struct pci_dev *pdev)
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
/* Shut down all the garbage mashers on the detention level */
- adapter->state = __IAVF_REMOVE;
+ iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 3c735968e1b8..8c3f0f77cb57 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1685,7 +1685,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
@@ -1716,7 +1716,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
/* refresh current mac address if changed */
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
@@ -1735,7 +1735,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
if (adapter->state == __IAVF_DOWN_PENDING) {
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
break;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 4f538cdf42c1..c36faa7d1471 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -26,10 +26,13 @@ ice-y := ice_main.o \
ice_devlink.o \
ice_fw_update.o \
ice_lag.o \
- ice_ethtool.o
+ ice_ethtool.o \
+ ice_repr.o \
+ ice_tc_lib.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
+ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 3c4f08d20414..967a90efcb11 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -38,6 +38,10 @@
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/ip.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
@@ -55,6 +59,7 @@
#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
+#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
#include "ice_virtchnl_pf.h"
@@ -63,6 +68,8 @@
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
+#include "ice_repr.h"
+#include "ice_eswitch.h"
#include "ice_lag.h"
#define ICE_BAR0 0
@@ -84,6 +91,7 @@
#define ICE_FDIR_MSIX 2
#define ICE_RDMA_NUM_AEQ_MSIX 4
#define ICE_MIN_RDMA_MSIX 2
+#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -101,6 +109,10 @@
#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
+
+#define ICE_CHNL_START_TC 1
+#define ICE_CHNL_MAX_TC 16
+
#define ICE_MAX_RESET_WAIT 20
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -118,14 +130,24 @@
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
+/* Minimum BW limit is 500 Kbps for any scheduler node */
+#define ICE_MIN_BW_LIMIT 500
+/* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
+ * use it to convert user specified BW limit into Kbps
+ */
+#define ICE_BW_KBPS_DIVISOR 125
+
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
-/* Macros for each Tx/Rx ring in a VSI */
+/* Macros for each Tx/Xdp/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
+#define ice_for_each_xdp_txq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
+
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
@@ -139,6 +161,9 @@
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+#define ice_for_each_chnl_tc(i) \
+ for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
+
#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
@@ -158,6 +183,29 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
+enum ice_feature {
+ ICE_F_DSCP,
+ ICE_F_SMA_CTRL,
+ ICE_F_MAX
+};
+
+DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+
+struct ice_channel {
+ struct list_head list;
+ u8 type;
+ u16 sw_id;
+ u16 base_q;
+ u16 num_rxq;
+ u16 num_txq;
+ u16 vsi_num;
+ u8 ena_tc;
+ struct ice_aqc_vsi_props info;
+ u64 max_tx_rate;
+ u64 min_tx_rate;
+ struct ice_vsi *ch_vsi;
+};
+
struct ice_txq_meta {
u32 q_teid; /* Tx-scheduler element identifier */
u16 q_id; /* Entry in VSI's txq_map bitmap */
@@ -175,7 +223,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
- u8 ena_tc; /* Tx map */
+ u16 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
@@ -266,8 +314,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
- struct ice_ring **rx_rings; /* Rx ring array */
- struct ice_ring **tx_rings; /* Tx ring array */
+ struct ice_rx_ring **rx_rings; /* Rx ring array */
+ struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -306,10 +354,6 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
- /* devlink port data */
- struct devlink_port devlink_port;
- bool devlink_port_registered;
-
u16 max_frame;
u16 rx_buf_len;
@@ -344,11 +388,42 @@ struct ice_vsi {
u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
- struct ice_ring **xdp_rings; /* XDP ring array */
+ struct ice_tx_ring **xdp_rings; /* XDP ring array */
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct net_device **target_netdevs;
+
+ struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
+
+ /* Channel Specific Fields */
+ struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
+ u16 cnt_q_avail;
+ u16 next_base_q; /* next queue to be used for channel setup */
+ struct list_head ch_list;
+ u16 num_chnl_rxq;
+ u16 num_chnl_txq;
+ u16 ch_rss_size;
+ u16 num_chnl_fltr;
+ /* store away rss size info before configuring ADQ channels so that,
+ * it can be used after tc-qdisc delete, to get back RSS setting as
+ * they were before
+ */
+ u16 orig_rss_size;
+ /* this keeps tracks of all enabled TC with and without DCB
+ * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
+ * information
+ */
+ u8 all_numtc;
+ u16 all_enatc;
+
+ /* store away TC info, to be used for rebuild logic */
+ u8 old_numtc;
+ u16 old_ena_tc;
+
+ struct ice_channel *ch;
+
/* setup back reference, to which aggregator node this VSI
* corresponds to
*/
@@ -377,6 +452,8 @@ struct ice_q_vector {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
+ struct ice_channel *ch;
+
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
@@ -395,6 +472,8 @@ enum ice_pf_flags {
ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
+ ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
+ ICE_FLAG_CLS_FLOWER,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
ICE_FLAG_NO_MEDIA,
@@ -408,6 +487,12 @@ enum ice_pf_flags {
ICE_PF_FLAGS_NBITS /* must be last */
};
+struct ice_switchdev_info {
+ struct ice_vsi *control_vsi;
+ struct ice_vsi *uplink_vsi;
+ bool is_running;
+};
+
struct ice_agg_node {
u32 agg_id;
#define ICE_MAX_VSIS_IN_AGG_NODE 64
@@ -421,6 +506,9 @@ struct ice_pf {
struct devlink_region *nvm_region;
struct devlink_region *devcaps_region;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_res_tracker *irq_tracker;
@@ -434,6 +522,7 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
+ u16 eswitch_mode; /* current mode of eswitch */
/* Virtchnl/SR-IOV config info */
struct ice_vf *vf;
u16 num_alloc_vfs; /* actual number of VFs allocated */
@@ -443,6 +532,7 @@ struct ice_pf {
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
+ DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
@@ -495,12 +585,19 @@ struct ice_pf {
struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
+ /* count of tc_flower filters specific to channel (aka where filter
+ * action is "hw_tc <tc_num>")
+ */
+ u16 num_dmac_chnl_fltrs;
+ struct hlist_head tc_flower_fltr_list;
__le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */
struct ice_link_default_override_tlv link_dflt_override;
struct ice_lag *lag; /* Link Aggregation information */
+ struct ice_switchdev_info switchdev;
+
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32
@@ -512,9 +609,21 @@ struct ice_pf {
struct ice_netdev_priv {
struct ice_vsi *vsi;
+ struct ice_repr *repr;
};
/**
+ * ice_vector_ch_enabled
+ * @qv: pointer to q_vector, can be NULL
+ *
+ * This function returns true if vector is channel enabled otherwise false
+ */
+static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
+{
+ return !!qv->ch; /* Enable it to run with TC */
+}
+
+/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to HW struct
* @vsi: pointer to VSI struct, can be NULL
@@ -556,25 +665,42 @@ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
return !!vsi->xdp_prog;
}
-static inline void ice_set_ring_xdp(struct ice_ring *ring)
+static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
{
ring->flags |= ICE_TX_FLAGS_RING_XDP;
}
/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: ring to use
+ * @ring: Rx ring to use
*
* Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- if (ice_ring_is_xdp(ring))
- qid -= vsi->num_xdp_txq;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xsk_get_pool_from_qid(vsi->netdev, qid);
+}
+
+/**
+ * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: Tx ring to use
+ *
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
+ * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ */
+static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ u16 qid;
+
+ qid = ring->q_index - vsi->num_xdp_txq;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
@@ -597,6 +723,19 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
}
/**
+ * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
+ * @np: private netdev structure
+ */
+static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np)
+{
+ /* In case of port representor return source port VSI. */
+ if (np->repr)
+ return np->repr->src_vsi;
+ else
+ return np->vsi;
+}
+
+/**
* ice_get_ctrl_vsi - Get the control VSI
* @pf: PF instance
*/
@@ -610,6 +749,18 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
}
/**
+ * ice_is_switchdev_running - check if switchdev is configured
+ * @pf: pointer to PF structure
+ *
+ * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
+ * and switchdev is configured, false otherwise.
+ */
+static inline bool ice_is_switchdev_running(struct ice_pf *pf)
+{
+ return pf->switchdev.is_running;
+}
+
+/**
* ice_set_sriov_cap - enable SRIOV in PF flags
* @pf: PF struct
*/
@@ -633,11 +784,37 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf)
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+/**
+ * ice_is_adq_active - any active ADQs
+ * @pf: pointer to PF
+ *
+ * This function returns true if there are any ADQs configured (which is
+ * determined by looking at VSI type (which should be VSI_PF), numtc, and
+ * TC_MQPRIO flag) otherwise return false
+ */
+static inline bool ice_is_adq_active(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ /* is ADQ configured */
+ if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ return true;
+
+ return false;
+}
+
bool netif_is_ice(struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
+int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
+void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
@@ -648,6 +825,7 @@ int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 21b4c7cd6f05..a5425f0dce3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -233,6 +233,7 @@ struct ice_aqc_get_sw_cfg_resp_elem {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_RECIPE 0x05
#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
@@ -241,6 +242,7 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
+#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
@@ -474,6 +476,53 @@ struct ice_aqc_vsi_props {
#define ICE_MAX_NUM_RECIPES 64
+/* Add/Get Recipe (indirect 0x0290/0x0292) */
+struct ice_aqc_add_get_recipe {
+ __le16 num_sub_recipes; /* Input in Add cmd, Output in Get cmd */
+ __le16 return_index; /* Input, used for Get cmd only */
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_recipe_content {
+ u8 rid;
+#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7)
+#define ICE_AQ_SW_ID_LKUP_IDX 0
+ u8 lkup_indx[5];
+#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7)
+#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF
+ __le16 mask[5];
+ u8 result_indx;
+#define ICE_AQ_RECIPE_RESULT_DATA_S 0
+#define ICE_AQ_RECIPE_RESULT_DATA_M (0x3F << ICE_AQ_RECIPE_RESULT_DATA_S)
+#define ICE_AQ_RECIPE_RESULT_EN BIT(7)
+ u8 rsvd0[3];
+ u8 act_ctrl_join_priority;
+ u8 act_ctrl_fwd_priority;
+ u8 act_ctrl;
+#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2)
+ u8 rsvd1;
+ __le32 dflt_act;
+};
+
+struct ice_aqc_recipe_data_elem {
+ u8 recipe_indx;
+ u8 resp_bits;
+ u8 rsvd0[2];
+ u8 recipe_bitmap[8];
+ u8 rsvd1[4];
+ struct ice_aqc_recipe_content content;
+ u8 rsvd2[20];
+};
+
+/* Set/Get Recipes to Profile Association (direct 0x0291/0x0293) */
+struct ice_aqc_recipe_to_profile {
+ __le16 profile_id;
+ u8 rsvd[6];
+ DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
+};
+
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
struct ice_aqc_sw_rules {
@@ -671,6 +720,16 @@ struct ice_aqc_sw_rules_elem {
} __packed pdata;
};
+/* Query PFC Mode (direct 0x0302)
+ * Set PFC Mode (direct 0x0303)
+ */
+struct ice_aqc_set_query_pfc_mode {
+ u8 pfc_mode;
+/* For Query Command response, reserved in all other cases */
+#define ICE_AQC_PFC_VLAN_BASED_PFC 1
+#define ICE_AQC_PFC_DSCP_BASED_PFC 2
+ u8 rsvd[15];
+};
/* Get Default Topology (indirect 0x0400) */
struct ice_aqc_get_topo {
u8 port_num;
@@ -1220,7 +1279,7 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
-struct ice_aqc_link_topo_addr {
+struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0)
@@ -1246,6 +1305,10 @@ struct ice_aqc_link_topo_addr {
#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4
#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5
u8 index;
+};
+
+struct ice_aqc_link_topo_addr {
+ struct ice_aqc_link_topo_params topo_params;
__le16 handle;
#define ICE_AQC_LINK_TOPO_HANDLE_S 0
#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
@@ -1268,6 +1331,7 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
u8 rsvd[9];
};
@@ -1281,6 +1345,16 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ice_aqc_gpio {
+ __le16 gpio_ctrl_handle;
+#define ICE_AQC_GPIO_HANDLE_S 0
+#define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
/* Read/Write SFF EEPROM command (indirect 0x06EE) */
struct ice_aqc_sff_eeprom {
u8 lport_num;
@@ -1922,10 +1996,13 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
+ struct ice_aqc_add_get_recipe add_get_recipe;
+ struct ice_aqc_recipe_to_profile recipe_to_profile;
struct ice_aqc_get_topo get_topo;
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
@@ -1936,6 +2013,7 @@ struct ice_aq_desc {
struct ice_aqc_nvm_pkg_data pkg_data;
struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
struct ice_aqc_lldp_stop lldp_stop;
@@ -2033,6 +2111,12 @@ enum ice_adminq_opc {
ice_aqc_opc_update_vsi = 0x0211,
ice_aqc_opc_free_vsi = 0x0213,
+ /* recipe commands */
+ ice_aqc_opc_add_recipe = 0x0290,
+ ice_aqc_opc_recipe_to_profile = 0x0291,
+ ice_aqc_opc_get_recipe = 0x0292,
+ ice_aqc_opc_get_recipe_to_profile = 0x0293,
+
/* switch rules population commands */
ice_aqc_opc_add_sw_rules = 0x02A0,
ice_aqc_opc_update_sw_rules = 0x02A1,
@@ -2040,6 +2124,10 @@ enum ice_adminq_opc {
ice_aqc_opc_clear_pf_cfg = 0x02A4,
+ /* DCB commands */
+ ice_aqc_opc_query_pfc_mode = 0x0302,
+ ice_aqc_opc_set_pfc_mode = 0x0303,
+
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
@@ -2064,6 +2152,8 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_set_gpio = 0x06EC,
+ ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
/* NVM commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 88d98c9e5f91..5daade32ea62 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -513,7 +513,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
if (!vsi || vsi->type != ICE_VSI_PF)
return;
- arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
+ arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
GFP_KERNEL);
if (!arfs_fltr_list)
return;
@@ -614,7 +614,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
return -EINVAL;
base_idx = vsi->base_vector;
- for (i = 0; i < vsi->num_q_vectors; i++)
+ ice_for_each_q_vector(vsi, i)
if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
pf->msix_entries[base_idx + i].vector)) {
ice_free_cpu_rx_rmap(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c36057efc7ae..fa6cd63cbf1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
q_vector->tx.itr_mode = ITR_DYNAMIC;
q_vector->rx.itr_mode = ITR_DYNAMIC;
+ q_vector->tx.type = ICE_TX_CONTAINER;
+ q_vector->rx.type = ICE_RX_CONTAINER;
if (vsi->type == ICE_VSI_VF)
goto out;
@@ -146,7 +148,8 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_q_vector *q_vector;
struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
struct device *dev;
dev = ice_pf_to_dev(pf);
@@ -156,10 +159,10 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ tx_ring->q_vector = NULL;
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ rx_ring->q_vector = NULL;
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
@@ -201,15 +204,18 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
}
/**
- * ice_calc_q_handle - calculate the queue handle
+ * ice_calc_txq_handle - calculate the queue handle
* @vsi: VSI that ring belongs to
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
-static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
+static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
+ if (ring->ch)
+ return ring->q_index - ring->ch->base_q;
+
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
* and as a result we get the queue's index within TC.
@@ -218,13 +224,37 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
}
/**
+ * ice_eswitch_calc_txq_handle
+ * @ring: pointer to ring which unique index is needed
+ *
+ * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
+ * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
+ * here by finding index in vsi->tx_rings of this ring.
+ *
+ * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
+ * because VSI is get from ring->vsi, so it has to be present in this VSI.
+ */
+static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ int i;
+
+ ice_for_each_txq(vsi, i) {
+ if (vsi->tx_rings[i] == ring)
+ return i;
+ }
+
+ return ICE_INVAL_Q_INDEX;
+}
+
+/**
* ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
* @ring: The Tx ring to configure
*
* This enables/disables XPS for a given Tx descriptor ring
* based on the TCs enabled for the VSI that ring belongs to.
*/
-static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
+static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
{
if (!ring->q_vector || !ring->netdev)
return;
@@ -246,7 +276,7 @@ static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
* Configure the Tx descriptor ring in TLAN context.
*/
static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
{
struct ice_vsi *vsi = ring->vsi;
struct ice_hw *hw = &vsi->back->hw;
@@ -258,7 +288,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;
- ice_set_cgd_num(tlan_ctx, ring);
+ ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
/* PF number */
tlan_ctx->pf_num = hw->pf_id;
@@ -273,19 +303,28 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
case ICE_VSI_LB:
case ICE_VSI_CTRL:
case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ if (ring->ch)
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ else
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return;
}
/* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+ if (ring->ch)
+ tlan_ctx->src_vsi = ring->ch->vsi_num;
+ else
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
/* Restrict Tx timestamps to the PF VSI */
switch (vsi->type) {
@@ -312,7 +351,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*
* Returns the offset value for ring into the data buffer.
*/
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
@@ -328,7 +367,7 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
*
* Configure the Rx descriptor ring in RLAN context.
*/
-static int ice_setup_rx_ctx(struct ice_ring *ring)
+static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
{
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
struct ice_vsi *vsi = ring->vsi;
@@ -439,7 +478,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_ring *ring)
+int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u16 num_bufs = ICE_DESC_UNUSED(ring);
@@ -660,16 +699,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
+ q_vector->tx.tx_ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
q_base = vsi->num_txq - tx_rings_rem;
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
+ tx_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = tx_ring;
}
tx_rings_rem -= tx_rings_per_v;
@@ -677,16 +716,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
+ q_vector->rx.rx_ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
q_base = vsi->num_rxq - rx_rings_rem;
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
+ rx_ring->next = q_vector->rx.rx_ring;
+ q_vector->rx.rx_ring = rx_ring;
}
rx_rings_rem -= rx_rings_per_v;
}
@@ -711,12 +750,13 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @qg_buf: queue group buffer
*/
int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_aqc_add_txqs_perq *txq;
+ struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
@@ -746,10 +786,23 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
- ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
+ if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ ring->q_handle = ice_eswitch_calc_txq_handle(ring);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
- 1, qg_buf, buf_len, NULL);
+ if (ring->q_handle == ICE_INVAL_Q_INDEX)
+ return -ENODEV;
+ } else {
+ ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
+ }
+
+ if (ch)
+ status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
+ ring->q_handle, 1, qg_buf, buf_len,
+ NULL);
+ else
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ ring->q_handle, 1, qg_buf, buf_len,
+ NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
ice_stat_str(status));
@@ -870,7 +923,7 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
*/
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
+ u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
struct ice_pf *pf = vsi->back;
@@ -927,9 +980,10 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* are needed for stopping Tx queue
*/
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
+ struct ice_channel *ch = ring->ch;
u8 tc;
if (IS_ENABLED(CONFIG_DCB))
@@ -940,6 +994,11 @@ ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
txq_meta->q_id = ring->reg_idx;
txq_meta->q_teid = ring->txq_teid;
txq_meta->q_handle = ring->q_handle;
- txq_meta->vsi_idx = vsi->idx;
- txq_meta->tc = tc;
+ if (ch) {
+ txq_meta->vsi_idx = ch->ch_vsi->idx;
+ txq_meta->tc = 0;
+ } else {
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+ }
}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 20e1c29aa68a..b67dca417acb 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,7 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_ring *ring);
+int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
@@ -25,9 +25,9 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
+ u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2fb81e359cdf..b3066d0fea8b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E810C_BACKPLANE:
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
+ case ICE_DEV_ID_E810_XXV_QSFP:
case ICE_DEV_ID_E810_XXV_SFP:
hw->mac_type = ICE_MAC_E810;
break;
@@ -70,6 +72,27 @@ bool ice_is_e810(struct ice_hw *hw)
}
/**
+ * ice_is_e810t
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E810T based, false if not.
+ */
+bool ice_is_e810t(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
+ hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ return true;
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -240,11 +263,13 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
- ICE_AQC_LINK_TOPO_NODE_CTX_S);
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
/* set node type */
- cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+ cmd->addr.topo_params.node_type_ctx |=
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
}
@@ -568,6 +593,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
return ICE_ERR_NO_MEMORY;
INIT_LIST_HEAD(&sw->vsi_list_map_head);
+ sw->prof_res_bm_init = 0;
status = ice_init_def_sw_recp(hw);
if (status) {
@@ -594,17 +620,42 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
list_del(&v_pos_map->list_entry);
devm_kfree(ice_hw_to_dev(hw), v_pos_map);
}
- recps = hw->switch_info->recp_list;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
- struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+ recps = sw->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
recps[i].root_rid = i;
- mutex_destroy(&recps[i].filt_rule_lock);
- list_for_each_entry_safe(lst_itr, tmp_entry,
- &recps[i].filt_rules, list_entry) {
- list_del(&lst_itr->list_entry);
- devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ list_for_each_entry_safe(rg_entry, tmprg_entry,
+ &recps[i].rg_list, l_entry) {
+ list_del(&rg_entry->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), rg_entry);
+ }
+
+ if (recps[i].adv_rule) {
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+ } else {
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
}
+ if (recps[i].root_buf)
+ devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
@@ -4767,6 +4818,64 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
}
/**
+ * ice_aq_set_gpio
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
+ */
+int
+ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_gpio *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+ cmd->gpio_val = value ? 1 : 0;
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+}
+
+/**
+ * ice_aq_get_gpio
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
+ * the topology
+ */
+int
+ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_gpio *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ *value = !!cmd->gpio_val;
+ return 0;
+}
+
+/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
*
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index fb16070f02e2..65c1b3244264 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -183,6 +183,7 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
+bool ice_is_e810t(struct ice_hw *hw);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -192,6 +193,12 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
int
ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 *value, struct ice_sq_cd *cd);
+int
+ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
+ struct ice_sq_cd *cd);
+int
+ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 849fcf605479..241427cd9bc0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
+#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_dcb.h"
@@ -736,6 +737,45 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
}
/**
+ * ice_aq_set_pfc_mode - Set PFC mode
+ * @hw: pointer to the HW struct
+ * @pfc_mode: value of PFC mode to set
+ * @cd: pointer to command details structure or NULL
+ *
+ * This AQ call configures the PFC mode to DSCP-based PFC mode or
+ * VLAN-based PFC (0x0303)
+ */
+int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_query_pfc_mode *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
+ return -EINVAL;
+
+ cmd = &desc.params.set_query_pfc_mode;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
+
+ cmd->pfc_mode = pfc_mode;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
+ * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
+ * been executed, check if cmd->pfc_mode is what was requested. If not,
+ * return an error.
+ */
+ if (cmd->pfc_mode != pfc_mode)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
* ice_cee_to_dcb_cfg
* @cee_cfg: pointer to CEE configuration struct
* @pi: port information structure
@@ -1207,7 +1247,140 @@ ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
}
/**
- * ice_add_dcb_tlv - Add all IEEE TLVs
+ * ice_add_dscp_up_tlv - Prepare DSCP to UP TLV
+ * @tlv: location to build the TLV data
+ * @dcbcfg: location of data to convert to TLV
+ */
+static void
+ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+ int i;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_UP_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_DSCP2UP);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* bytes 0 - 63 - IPv4 DSCP2UP LUT */
+ for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ /* IPv4 mapping */
+ buf[i] = dcbcfg->dscp_map[i];
+ /* IPv6 mapping */
+ buf[i + ICE_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i];
+ }
+
+ /* byte 64 - IPv4 untagged traffic */
+ buf[i] = 0;
+
+ /* byte 144 - IPv6 untagged traffic */
+ buf[i + ICE_DSCP_IPV6_OFFSET] = 0;
+}
+
+#define ICE_BYTES_PER_TC 8
+/**
+ * ice_add_dscp_enf_tlv - Prepare DSCP Enforcement TLV
+ * @tlv: location to build the TLV data
+ */
+static void
+ice_add_dscp_enf_tlv(struct ice_lldp_org_tlv *tlv)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_ENF_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_ENFORCE);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Allow all DSCP values to be valid for all TC's (IPv4 and IPv6) */
+ memset(buf, 0, 2 * (ICE_MAX_TRAFFIC_CLASS * ICE_BYTES_PER_TC));
+}
+
+/**
+ * ice_add_dscp_tc_bw_tlv - Prepare DSCP BW for TC TLV
+ * @tlv: location to build the TLV data
+ * @dcbcfg: location of the data to convert to TLV
+ */
+static void
+ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u8 offset = 0;
+ u16 typelen;
+ int i;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_TC_BW_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_TCBW);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octect after subtype
+ * ----------------------------
+ * | RSV | CBS | RSV | Max TCs |
+ * | 1b | 1b | 3b | 3b |
+ * ----------------------------
+ */
+ etscfg = &dcbcfg->etscfg;
+ buf[0] = etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
+
+ /* bytes 1 - 4 reserved */
+ offset = 5;
+
+ /* TC BW table
+ * bytes 0 - 7 for TC 0 - 7
+ *
+ * TSA Assignment table
+ * bytes 8 - 15 for TC 0 - 7
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ buf[offset] = etscfg->tcbwtable[i];
+ buf[offset + ICE_MAX_TRAFFIC_CLASS] = etscfg->tsatable[i];
+ offset++;
+ }
+}
+
+/**
+ * ice_add_dscp_pfc_tlv - Prepare DSCP PFC TLV
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store which holds the PFC CFG data
+ */
+static void
+ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_PFC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ buf[0] = dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena & 0xF;
+}
+
+/**
+ * ice_add_dcb_tlv - Add all IEEE or DSCP TLVs
* @tlv: Fill TLV data in IEEE format
* @dcbcfg: Local store which holds the DCB Config
* @tlvid: Type of IEEE TLV
@@ -1218,21 +1391,41 @@ static void
ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
u16 tlvid)
{
- switch (tlvid) {
- case ICE_IEEE_TLV_ID_ETS_CFG:
- ice_add_ieee_ets_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_ETS_REC:
- ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_PFC_CFG:
- ice_add_ieee_pfc_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_APP_PRI:
- ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
- break;
- default:
- break;
+ if (dcbcfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ switch (tlvid) {
+ case ICE_IEEE_TLV_ID_ETS_CFG:
+ ice_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_ETS_REC:
+ ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_PFC_CFG:
+ ice_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_APP_PRI:
+ ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* pfc_mode == ICE_QOS_MODE_DSCP */
+ switch (tlvid) {
+ case ICE_TLV_ID_DSCP_UP:
+ ice_add_dscp_up_tlv(tlv, dcbcfg);
+ break;
+ case ICE_TLV_ID_DSCP_ENF:
+ ice_add_dscp_enf_tlv(tlv);
+ break;
+ case ICE_TLV_ID_DSCP_TC_BW:
+ ice_add_dscp_tc_bw_tlv(tlv, dcbcfg);
+ break;
+ case ICE_TLV_ID_DSCP_TO_PFC:
+ ice_add_dscp_pfc_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index d7e5e6178a21..9b6f87a889a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -22,6 +22,14 @@
#define ICE_CEE_DCBX_OUI 0x001B21
#define ICE_CEE_DCBX_TYPE 2
+
+#define ICE_DSCP_OUI 0xFFFFFF
+#define ICE_DSCP_SUBTYPE_DSCP2UP 0x41
+#define ICE_DSCP_SUBTYPE_ENFORCE 0x42
+#define ICE_DSCP_SUBTYPE_TCBW 0x43
+#define ICE_DSCP_SUBTYPE_PFC 0x44
+#define ICE_DSCP_IPV6_OFFSET 80
+
#define ICE_CEE_SUBTYPE_PG_CFG 2
#define ICE_CEE_SUBTYPE_PFC_CFG 3
#define ICE_CEE_SUBTYPE_APP_PRI 4
@@ -78,11 +86,20 @@
#define ICE_IEEE_TLV_ID_APP_PRI 6
#define ICE_TLV_ID_END_OF_LLDPPDU 7
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
+#define ICE_TLV_ID_DSCP_UP 3
+#define ICE_TLV_ID_DSCP_ENF 4
+#define ICE_TLV_ID_DSCP_TC_BW 5
+#define ICE_TLV_ID_DSCP_TO_PFC 6
#define ICE_IEEE_ETS_TLV_LEN 25
#define ICE_IEEE_PFC_TLV_LEN 6
#define ICE_IEEE_APP_TLV_LEN 11
+#define ICE_DSCP_UP_TLV_LEN 148
+#define ICE_DSCP_ENF_TLV_LEN 132
+#define ICE_DSCP_TC_BW_TLV_LEN 25
+#define ICE_DSCP_PFC_TLV_LEN 6
+
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
__be16 typelen;
@@ -120,6 +137,7 @@ struct ice_cee_app_prio {
u8 prio_map;
} __packed;
+int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 926cf748c5ec..a72e18320a22 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -5,52 +5,10 @@
#include "ice_dcb_nl.h"
/**
- * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
- * @vsi: the VSI being configured
- * @ena_tc: TC map to be enabled
- */
-void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
-{
- struct net_device *netdev = vsi->netdev;
- struct ice_pf *pf = vsi->back;
- struct ice_dcbx_cfg *dcbcfg;
- u8 netdev_tc;
- int i;
-
- if (!netdev)
- return;
-
- if (!ena_tc) {
- netdev_reset_tc(netdev);
- return;
- }
-
- if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
- return;
-
- dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
-
- ice_for_each_traffic_class(i)
- if (vsi->tc_cfg.ena_tc & BIT(i))
- netdev_set_tc_queue(netdev,
- vsi->tc_cfg.tc_info[i].netdev_tc,
- vsi->tc_cfg.tc_info[i].qcount_tx,
- vsi->tc_cfg.tc_info[i].qoffset);
-
- for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
- u8 ets_tc = dcbcfg->etscfg.prio_table[i];
-
- /* Get the mapped netdev TC# for the UP */
- netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
- netdev_set_prio_tc_map(netdev, i, netdev_tc);
- }
-}
-
-/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs
*/
-u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
+static u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
{
u8 i, num_tc, ena_tc = 1;
@@ -179,6 +137,67 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_get_first_droptc - returns number of first droptc
+ * @vsi: used to find the first droptc
+ *
+ * This function returns the value of first_droptc.
+ * When DCB is enabled, first droptc information is derived from enabled_tc
+ * and PFC enabled bits. otherwise this function returns 0 as there is one
+ * TC without DCB (tc0)
+ */
+static u8 ice_get_first_droptc(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ u8 num_tc, ena_tc_map, pfc_ena_map;
+ u8 i;
+
+ num_tc = ice_dcb_get_num_tc(cfg);
+
+ /* get bitmap of enabled TCs */
+ ena_tc_map = ice_dcb_get_ena_tc(cfg);
+
+ /* get bitmap of PFC enabled TCs */
+ pfc_ena_map = cfg->pfc.pfcena;
+
+ /* get first TC that is not PFC enabled */
+ for (i = 0; i < num_tc; i++) {
+ if ((ena_tc_map & BIT(i)) && (!(pfc_ena_map & BIT(i)))) {
+ dev_dbg(dev, "first drop tc = %d\n", i);
+ return i;
+ }
+ }
+
+ dev_dbg(dev, "first drop tc = 0\n");
+ return 0;
+}
+
+/**
+ * ice_vsi_set_dcb_tc_cfg - Set VSI's TC based on DCB configuration
+ * @vsi: pointer to the VSI instance
+ */
+void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
+ vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+ break;
+ case ICE_VSI_CHNL:
+ vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
+ vsi->tc_cfg.numtc = 1;
+ break;
+ case ICE_VSI_CTRL:
+ case ICE_VSI_LB:
+ default:
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+ }
+}
+
+/**
* ice_dcb_get_tc - Get the TC associated with the queue
* @vsi: ptr to the VSI
* @queue_index: queue number associated with VSI
@@ -194,17 +213,18 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
*/
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
{
- struct ice_ring *tx_ring, *rx_ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
u16 qoffset, qcount;
int i, n;
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
/* Reset the TC information */
- for (i = 0; i < vsi->num_txq; i++) {
+ ice_for_each_txq(vsi, i) {
tx_ring = vsi->tx_rings[i];
tx_ring->dcb_tc = 0;
}
- for (i = 0; i < vsi->num_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
rx_ring = vsi->rx_rings[i];
rx_ring->dcb_tc = 0;
}
@@ -217,11 +237,68 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
qoffset = vsi->tc_cfg.tc_info[n].qoffset;
qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
- for (i = qoffset; i < (qoffset + qcount); i++) {
- tx_ring = vsi->tx_rings[i];
- rx_ring = vsi->rx_rings[i];
- tx_ring->dcb_tc = n;
- rx_ring->dcb_tc = n;
+ for (i = qoffset; i < (qoffset + qcount); i++)
+ vsi->tx_rings[i]->dcb_tc = n;
+
+ qcount = vsi->tc_cfg.tc_info[n].qcount_rx;
+ for (i = qoffset; i < (qoffset + qcount); i++)
+ vsi->rx_rings[i]->dcb_tc = n;
+ }
+ /* applicable only if "all_enatc" is set, which will be set from
+ * setup_tc method as part of configuring channels
+ */
+ if (vsi->all_enatc) {
+ u8 first_droptc = ice_get_first_droptc(vsi);
+
+ /* When DCB is configured, TC for ADQ queues (which are really
+ * PF queues) should be the first drop TC of the main VSI
+ */
+ ice_for_each_chnl_tc(n) {
+ if (!(vsi->all_enatc & BIT(n)))
+ break;
+
+ qoffset = vsi->mqprio_qopt.qopt.offset[n];
+ qcount = vsi->mqprio_qopt.qopt.count[n];
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ vsi->tx_rings[i]->dcb_tc = first_droptc;
+ vsi->rx_rings[i]->dcb_tc = first_droptc;
+ }
+ }
+ }
+}
+
+/**
+ * ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig
+ * @pf: pointer to the PF instance
+ * @ena: true to enable VSIs, false to disable
+ * @locked: true if caller holds RTNL lock, false otherwise
+ *
+ * Before a new DCB configuration can be applied, VSIs of type PF, SWITCHDEV
+ * and CHNL need to be brought down. Following completion of DCB configuration
+ * the VSIs that were downed need to be brought up again. This helper function
+ * does both.
+ */
+static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (!vsi)
+ continue;
+
+ switch (vsi->type) {
+ case ICE_VSI_CHNL:
+ case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_PF:
+ if (ena)
+ ice_ena_vsi(vsi, locked);
+ else
+ ice_dis_vsi(vsi, locked);
+ break;
+ default:
+ continue;
}
}
}
@@ -330,7 +407,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
*/
if (!locked)
rtnl_lock();
- ice_dis_vsi(pf_vsi, true);
+
+ /* disable VSIs affected by DCB changes */
+ ice_dcb_ena_dis_vsi(pf, false, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
@@ -358,7 +437,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ice_pf_dcb_recfg(pf);
out:
- ice_ena_vsi(pf_vsi, true);
+ /* enable previously downed VSIs */
+ ice_dcb_ena_dis_vsi(pf, true, true);
if (!locked)
rtnl_unlock();
free_cfg:
@@ -544,7 +624,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
* @ets_willing: configure ETS willing
* @locked: was this function called with RTNL held
*/
-static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
+int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
@@ -673,6 +753,8 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ice_dcb_noncontig_cfg(pf);
}
+ } else if (vsi->type == ICE_VSI_CHNL) {
+ tc_map = BIT(ice_get_first_droptc(vsi));
} else {
tc_map = ICE_DFLT_TRAFFIC_CLASS;
}
@@ -683,6 +765,12 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
vsi->idx);
continue;
}
+ /* no need to proceed with remaining cfg if it is CHNL
+ * or switchdev VSI
+ */
+ if (vsi->type == ICE_VSI_CHNL ||
+ vsi->type == ICE_VSI_SWITCHDEV_CTRL)
+ continue;
ice_vsi_map_rings_to_vectors(vsi);
if (vsi->type == ICE_VSI_PF)
@@ -726,6 +814,11 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+ err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC,
+ NULL);
+ if (err)
+ dev_info(dev, "Failed to set VLAN PFC mode\n");
+
err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
dev_err(dev, "Failed to set local DCB config %d\n",
@@ -814,7 +907,7 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* tag will already be configured with the correct ID and priority bits
*/
void
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
@@ -851,7 +944,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
- struct ice_vsi *pf_vsi;
u8 mib_type;
int ret;
@@ -927,14 +1019,9 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
- pf_vsi = ice_get_main_vsi(pf);
- if (!pf_vsi) {
- dev_dbg(dev, "PF VSI doesn't exist\n");
- goto out;
- }
-
rtnl_lock();
- ice_dis_vsi(pf_vsi, true);
+ /* disable VSIs affected by DCB changes */
+ ice_dcb_ena_dis_vsi(pf, false, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -945,7 +1032,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
- ice_ena_vsi(pf_vsi, true);
+ /* enable previously downed VSIs */
+ ice_dcb_ena_dis_vsi(pf, true, true);
unlock_rtnl:
rtnl_unlock();
out:
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 261b6e2ed7bc..4c421c842a13 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -15,7 +15,7 @@
#define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */
void ice_dcb_rebuild(struct ice_pf *pf);
-u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
+int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
@@ -28,13 +28,11 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
void
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
-void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
-
/**
* ice_find_q_in_range
* @low: start of queue range for a TC i.e. offset of TC
@@ -49,9 +47,9 @@ static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
}
static inline void
-ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
+ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc)
{
- tlan_ctx->cgd_num = ring->dcb_tc;
+ tlan_ctx->cgd_num = dcb_tc;
}
static inline bool ice_is_dcb_active(struct ice_pf *pf)
@@ -59,9 +57,21 @@ static inline bool ice_is_dcb_active(struct ice_pf *pf)
return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) ||
test_bit(ICE_FLAG_DCB_ENA, pf->flags));
}
+
+static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
+{
+ return pf->hw.port_info->qos_cfg.local_dcbx_cfg.pfc_mode;
+}
+
#else
static inline void ice_dcb_rebuild(struct ice_pf *pf) { }
+static inline void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
+{
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+}
+
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
return ICE_DFLT_TRAFFIC_CLASS;
@@ -95,7 +105,7 @@ ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
}
static inline int
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring __always_unused *tx_ring,
struct ice_tx_buf __always_unused *first)
{
return 0;
@@ -113,12 +123,16 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
return false;
}
+static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
+{
+ return 0;
+}
+
static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { }
-static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { }
+static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 4180f1f35fb8..7fdeb411b6df 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -64,7 +64,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcbx_cfg *new_cfg;
int bwcfg = 0, bwrec = 0;
- int err, i, max_tc = 0;
+ int err, i;
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -80,13 +80,14 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
bwcfg += ets->tc_tx_bw[i];
new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
- new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
- if (ets->prio_tc[i] > max_tc)
- max_tc = ets->prio_tc[i];
+ if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ /* in DSCP mode up->tc mapping cannot change */
+ new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
+ new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
+ }
new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
bwrec += ets->tc_reco_bw[i];
new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
- new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
}
if (ice_dcb_bwchk(pf, new_cfg)) {
@@ -94,12 +95,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
goto ets_out;
}
- max_tc = pf->hw.func_caps.common_cap.maxtc;
-
- new_cfg->etscfg.maxtcs = max_tc;
-
- if (!bwcfg)
- new_cfg->etscfg.tcbwtable[0] = 100;
+ new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
if (!bwrec)
new_cfg->etsrec.tcbwtable[0] = 100;
@@ -173,10 +169,13 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
pf->dcbx_cap = mode;
qos_cfg = &pf->hw.port_info->qos_cfg;
- if (mode & DCB_CAP_DCBX_VER_CEE)
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+ return ICE_DCB_NO_HW_CHG;
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
- else
+ } else {
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+ }
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST;
@@ -683,6 +682,8 @@ ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
return false;
}
+#define ICE_BYTES_PER_DSCP_VAL 8
+
/**
* ice_dcbnl_setapp - set local IEEE App config
* @netdev: relevant netdev struct
@@ -693,42 +694,117 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcb_app_priority_table new_app;
struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ u8 max_tc;
int ret;
- if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
- !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ /* ONLY DSCP APP TLVs have operational significance */
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
return -EINVAL;
- mutex_lock(&pf->tc_mutex);
+ /* only allow APP TLVs in SW Mode */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n");
+ return -EINVAL;
+ }
- new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
- old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ if (!ice_is_feature_supported(pf, ICE_F_DSCP))
+ return -EOPNOTSUPP;
- if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
- ret = -EINVAL;
- goto setapp_out;
+ if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ netdev_err(netdev, "DSCP value 0x%04X out of range\n",
+ app->protocol);
+ return -EINVAL;
+ }
+
+ max_tc = pf->hw.func_caps.common_cap.maxtc;
+ if (app->priority >= max_tc) {
+ netdev_err(netdev, "TC %d out of range, max TC %d\n",
+ app->priority, max_tc);
+ return -EINVAL;
}
+ /* grab TC mutex */
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+
ret = dcb_ieee_setapp(netdev, app);
if (ret)
goto setapp_out;
+ if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) {
+ netdev_err(netdev, "DSCP value 0x%04X already user mapped\n",
+ app->protocol);
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ netdev_err(netdev, "Failed to delete re-mapping TLV\n");
+ ret = -EINVAL;
+ goto setapp_out;
+ }
+
new_app.selector = app->selector;
new_app.prot_id = app->protocol;
new_app.priority = app->priority;
- if (ice_dcbnl_find_app(old_cfg, &new_app)) {
- ret = 0;
- goto setapp_out;
- }
+ /* If port is not in DSCP mode, need to set */
+ if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ int i, j;
+
+ /* set DSCP mode */
+ ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC,
+ NULL);
+ if (ret) {
+ netdev_err(netdev, "Failed to set DSCP PFC mode %d\n",
+ ret);
+ goto setapp_out;
+ }
+ netdev_info(netdev, "Switched QoS to L3 DSCP mode\n");
+
+ new_cfg->pfc_mode = ICE_QOS_MODE_DSCP;
+
+ /* set default DSCP QoS values */
+ new_cfg->etscfg.willing = 0;
+ new_cfg->pfc.pfccap = max_tc;
+ new_cfg->pfc.willing = 0;
+
+ for (i = 0; i < max_tc; i++)
+ for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) {
+ int dscp, offset;
+
+ dscp = (i * max_tc) + j;
+ offset = max_tc * ICE_BYTES_PER_DSCP_VAL;
+
+ new_cfg->dscp_map[dscp] = i;
+ /* if less that 8 TCs supported */
+ if (max_tc < ICE_MAX_TRAFFIC_CLASS)
+ new_cfg->dscp_map[dscp + offset] = i;
+ }
+
+ new_cfg->etscfg.tcbwtable[0] = 100;
+ new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ new_cfg->etscfg.prio_table[0] = 0;
+
+ for (i = 1; i < max_tc; i++) {
+ new_cfg->etscfg.tcbwtable[i] = 0;
+ new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ new_cfg->etscfg.prio_table[i] = i;
+ }
+ } /* end of switching to DSCP mode */
+
+ /* apply new mapping for this DSCP value */
+ new_cfg->dscp_map[app->protocol] = app->priority;
new_cfg->app[new_cfg->numapps++] = new_app;
+
ret = ice_pf_dcb_cfg(pf, new_cfg, true);
/* return of zero indicates new cfg applied */
if (ret == ICE_DCB_HW_CHG_RST)
ice_dcbnl_devreset(netdev);
- if (ret == ICE_DCB_NO_HW_CHG)
- ret = ICE_DCB_HW_CHG_RST;
+ else
+ ret = ICE_DCB_NO_HW_CHG;
setapp_out:
mutex_unlock(&pf->tc_mutex);
@@ -749,22 +825,21 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
unsigned int i, j;
int ret = 0;
- if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n");
return -EINVAL;
+ }
mutex_lock(&pf->tc_mutex);
old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- if (old_cfg->numapps <= 1)
- goto delapp_out;
-
ret = dcb_ieee_delapp(netdev, app);
if (ret)
goto delapp_out;
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
- for (i = 1; i < new_cfg->numapps; i++) {
+ for (i = 0; i < new_cfg->numapps; i++) {
if (app->selector == new_cfg->app[i].selector &&
app->protocol == new_cfg->app[i].prot_id &&
app->priority == new_cfg->app[i].priority) {
@@ -784,17 +859,58 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
new_cfg->numapps--;
for (j = i; j < new_cfg->numapps; j++) {
- new_cfg->app[i].selector = old_cfg->app[j + 1].selector;
- new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id;
- new_cfg->app[i].priority = old_cfg->app[j + 1].priority;
+ new_cfg->app[j].selector = old_cfg->app[j + 1].selector;
+ new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id;
+ new_cfg->app[j].priority = old_cfg->app[j + 1].priority;
}
- ret = ice_pf_dcb_cfg(pf, new_cfg, true);
- /* return of zero indicates new cfg applied */
+ /* if not a DSCP APP TLV or DSCP is not supported, we are done */
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ !ice_is_feature_supported(pf, ICE_F_DSCP)) {
+ ret = ICE_DCB_HW_CHG;
+ goto delapp_out;
+ }
+
+ /* if DSCP TLV, then need to address change in mapping */
+ clear_bit(app->protocol, new_cfg->dscp_mapped);
+ /* remap this DSCP value to default value */
+ new_cfg->dscp_map[app->protocol] = app->protocol %
+ ICE_BYTES_PER_DSCP_VAL;
+
+ /* if the last DSCP mapping just got deleted, need to switch
+ * to L2 VLAN QoS mode
+ */
+ if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
+ ret = ice_aq_set_pfc_mode(&pf->hw,
+ ICE_AQC_PFC_VLAN_BASED_PFC,
+ NULL);
+ if (ret) {
+ netdev_info(netdev, "Failed to set VLAN PFC mode %d\n",
+ ret);
+ goto delapp_out;
+ }
+ netdev_info(netdev, "Switched QoS to L2 VLAN mode\n");
+
+ new_cfg->pfc_mode = ICE_QOS_MODE_VLAN;
+
+ ret = ice_dcb_sw_dflt_cfg(pf, true, true);
+ } else {
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ }
+
+ /* return of ICE_DCB_HW_CHG_RST indicates new cfg applied
+ * and reset needs to be performed
+ */
if (ret == ICE_DCB_HW_CHG_RST)
ice_dcbnl_devreset(netdev);
+
+ /* if the change was not siginificant enough to actually call
+ * the reconfiguration flow, we still need to tell caller that
+ * their request was successfully handled
+ */
if (ret == ICE_DCB_NO_HW_CHG)
- ret = ICE_DCB_HW_CHG_RST;
+ ret = ICE_DCB_HW_CHG;
delapp_out:
mutex_unlock(&pf->tc_mutex);
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 9d8194671f6a..61dd2f18dee8 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -21,6 +21,12 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+#define ICE_SUBDEV_ID_E810T 0x000E
+#define ICE_SUBDEV_ID_E810T2 0x000F
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index ab3d876fa624..b9bd9f9472f6 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
+#include "ice_eswitch.h"
#include "ice_fw_update.h"
/* context for devlink info version reporting */
@@ -22,7 +23,7 @@ struct ice_info_ctx {
*
* If a version does not exist, for example when attempting to get the
* inactive version of flash when there is no pending update, the function
- * should leave the buffer in the ctx structure empty and return 0.
+ * should leave the buffer in the ctx structure empty.
*/
static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
@@ -35,7 +36,7 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
}
-static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
enum ice_status status;
@@ -45,148 +46,127 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
/* We failed to locate the PBA, so just skip this entry */
dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
ice_stat_str(status));
-
- return 0;
}
-static int ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
- hw->fw_patch);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch);
}
-static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
+ hw->api_min_ver, hw->api_patch);
}
-static int ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
-
- return 0;
}
-static int ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_orom_info *orom = &pf->hw.flash.orom;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
}
-static int
-ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_orom_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_orom_info *orom = &ctx->pending_orom;
if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
orom->major, orom->build, orom->patch);
-
- return 0;
}
-static int ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
-
- return 0;
}
-static int
-ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &ctx->pending_nvm;
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
- snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x",
+ nvm->major, nvm->minor);
}
-static int ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
-
- return 0;
}
-static int
-ice_info_pending_eetrack(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &ctx->pending_nvm;
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
-
- return 0;
}
-static int ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
-
- return 0;
}
-static int ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
- pkg->draft);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u",
+ pkg->major, pkg->minor, pkg->update, pkg->draft);
}
-static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
-
- return 0;
}
-static int ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
/* The netlist version fields are BCD formatted */
- snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
- netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
- netlist->cust_ver);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
}
-static int ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
-
- return 0;
}
-static int
-ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &ctx->pending_netlist;
@@ -194,21 +174,18 @@ ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
netlist->major, netlist->minor,
- netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
- netlist->cust_ver);
-
- return 0;
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
}
-static int
-ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &ctx->pending_netlist;
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
-
- return 0;
}
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
@@ -238,8 +215,8 @@ enum ice_version_type {
static const struct ice_devlink_version {
enum ice_version_type type;
const char *key;
- int (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
- int (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
} ice_devlink_versions[] = {
fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
@@ -351,24 +328,15 @@ static int ice_devlink_info_get(struct devlink *devlink,
memset(ctx->buf, 0, sizeof(ctx->buf));
- err = ice_devlink_versions[i].getter(pf, ctx);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- goto out_free_ctx;
- }
+ ice_devlink_versions[i].getter(pf, ctx);
/* If the default getter doesn't report a version, use the
* fallback function. This is primarily useful in the case of
* "stored" versions that want to report the same value as the
* running version in the normal case of no pending update.
*/
- if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) {
- err = ice_devlink_versions[i].fallback(pf, ctx);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- goto out_free_ctx;
- }
- }
+ if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback)
+ ice_devlink_versions[i].fallback(pf, ctx);
/* Do not report missing versions */
if (ctx->buf[0] == '\0')
@@ -456,6 +424,8 @@ ice_devlink_flash_update(struct devlink *devlink,
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .eswitch_mode_get = ice_eswitch_mode_get,
+ .eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
@@ -482,10 +452,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
return NULL;
/* Add an action to teardown the devlink when unwinding the driver */
- if (devm_add_action(dev, ice_devlink_free, devlink)) {
- devlink_free(devlink);
+ if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
return NULL;
- }
return devlink_priv(devlink);
}
@@ -517,60 +485,115 @@ void ice_devlink_unregister(struct ice_pf *pf)
}
/**
- * ice_devlink_create_port - Create a devlink port for this VSI
- * @vsi: the VSI to create a port for
+ * ice_devlink_create_pf_port - Create a devlink port for this PF
+ * @pf: the PF to create a devlink port for
*
- * Create and register a devlink_port for this VSI.
+ * Create and register a devlink_port for this PF.
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_create_port(struct ice_vsi *vsi)
+int ice_devlink_create_pf_port(struct ice_pf *pf)
{
struct devlink_port_attrs attrs = {};
- struct ice_port_info *pi;
+ struct devlink_port *devlink_port;
struct devlink *devlink;
+ struct ice_vsi *vsi;
struct device *dev;
- struct ice_pf *pf;
int err;
- /* Currently we only create devlink_port instances for PF VSIs */
- if (vsi->type != ICE_VSI_PF)
- return -EINVAL;
-
- pf = vsi->back;
- devlink = priv_to_devlink(pf);
dev = ice_pf_to_dev(pf);
- pi = pf->hw.port_info;
+
+ devlink_port = &pf->devlink_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EIO;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pi->lport;
- devlink_port_attrs_set(&vsi->devlink_port, &attrs);
- err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
+ attrs.phys.port_number = pf->hw.bus.func;
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
if (err) {
- dev_err(dev, "devlink_port_register failed: %d\n", err);
+ dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
+ pf->hw.pf_id, err);
return err;
}
- vsi->devlink_port_registered = true;
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this PF.
+ */
+void ice_devlink_destroy_pf_port(struct ice_pf *pf)
+{
+ struct devlink_port *devlink_port;
+
+ devlink_port = &pf->devlink_port;
+
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
+}
+
+/**
+ * ice_devlink_create_vf_port - Create a devlink port for this VF
+ * @vf: the VF to create a port for
+ *
+ * Create and register a devlink_port for this VF.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_vf_port(struct ice_vf *vf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ int err;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_get_vf_vsi(vf);
+ devlink_port = &vf->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = pf->hw.bus.func;
+ attrs.pci_vf.vf = vf->vf_id;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
+ if (err) {
+ dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
return 0;
}
/**
- * ice_devlink_destroy_port - Destroy the devlink_port for this VSI
- * @vsi: the VSI to cleanup
+ * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
+ * @vf: the VF to cleanup
*
- * Unregisters the devlink_port structure associated with this VSI.
+ * Unregisters the devlink_port structure associated with this VF.
*/
-void ice_devlink_destroy_port(struct ice_vsi *vsi)
+void ice_devlink_destroy_vf_port(struct ice_vf *vf)
{
- if (!vsi->devlink_port_registered)
- return;
+ struct devlink_port *devlink_port;
- devlink_port_type_clear(&vsi->devlink_port);
- devlink_port_unregister(&vsi->devlink_port);
+ devlink_port = &vf->devlink_port;
- vsi->devlink_port_registered = false;
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index e721d7b0d627..b7f9551e4fc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -8,8 +8,10 @@ struct ice_pf *ice_allocate_pf(struct device *dev);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
-int ice_devlink_create_port(struct ice_vsi *vsi);
-void ice_devlink_destroy_port(struct ice_vsi *vsi);
+int ice_devlink_create_pf_port(struct ice_pf *pf);
+void ice_devlink_destroy_pf_port(struct ice_pf *pf);
+int ice_devlink_create_vf_port(struct ice_vf *vf);
+void ice_devlink_destroy_vf_port(struct ice_vf *vf);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
new file mode 100644
index 000000000000..6cb50653b18d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_eswitch.h"
+#include "ice_fltr.h"
+#include "ice_repr.h"
+#include "ice_devlink.h"
+#include "ice_tc_lib.h"
+
+/**
+ * ice_eswitch_setup_env - configure switchdev HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function adds HW filters configuration specific for switchdev
+ * mode.
+ */
+static int ice_eswitch_setup_env(struct ice_pf *pf)
+{
+ struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_port_info *pi = pf->hw.port_info;
+ bool rule_added = false;
+
+ ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
+
+ ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+
+ if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
+ goto err_def_rx;
+
+ if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
+ if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
+ goto err_def_rx;
+ rule_added = true;
+ }
+
+ if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
+ goto err_def_tx;
+
+ if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
+ goto err_override_uplink;
+
+ if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
+ goto err_override_control;
+
+ if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
+ ICE_FLTR_TX,
+ ICE_SINGLE_ACT_LB_ENABLE))
+ goto err_update_action;
+
+ return 0;
+
+err_update_action:
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
+err_override_control:
+ ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+err_override_uplink:
+ ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
+err_def_tx:
+ if (rule_added)
+ ice_clear_dflt_vsi(uplink_vsi->vsw);
+err_def_rx:
+ ice_fltr_add_mac_and_broadcast(uplink_vsi,
+ uplink_vsi->port_info->mac.perm_addr,
+ ICE_FWD_TO_VSI);
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
+ * @pf: pointer to PF struct
+ *
+ * In switchdev number of allocated Tx/Rx rings is equal.
+ *
+ * This function fills q_vectors structures associated with representor and
+ * move each ring pairs to port representor netdevs. Each port representor
+ * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
+ * number of VFs.
+ */
+static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = pf->switchdev.control_vsi;
+ int q_id;
+
+ ice_for_each_txq(vsi, q_id) {
+ struct ice_repr *repr = pf->vf[q_id].repr;
+ struct ice_q_vector *q_vector = repr->q_vector;
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
+
+ q_vector->vsi = vsi;
+ q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
+
+ q_vector->num_ring_tx = 1;
+ q_vector->tx.tx_ring = tx_ring;
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = NULL;
+ tx_ring->netdev = repr->netdev;
+ /* In switchdev mode, from OS stack perspective, there is only
+ * one queue for given netdev, so it needs to be indexed as 0.
+ */
+ tx_ring->q_index = 0;
+
+ q_vector->num_ring_rx = 1;
+ q_vector->rx.rx_ring = rx_ring;
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = NULL;
+ rx_ring->netdev = repr->netdev;
+ }
+}
+
+/**
+ * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
+ * @pf: pointer to PF struct
+ */
+static int ice_eswitch_setup_reprs(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ int max_vsi_num = 0;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!vf->repr->dst) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ goto err;
+ }
+
+ if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ metadata_dst_free(vf->repr->dst);
+ goto err;
+ }
+
+ if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ metadata_dst_free(vf->repr->dst);
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ goto err;
+ }
+
+ if (max_vsi_num < vsi->vsi_num)
+ max_vsi_num = vsi->vsi_num;
+
+ netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+ netif_keep_dst(vf->repr->netdev);
+ }
+
+ kfree(ctrl_vsi->target_netdevs);
+
+ ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
+ sizeof(*ctrl_vsi->target_netdevs),
+ GFP_KERNEL);
+ if (!ctrl_vsi->target_netdevs)
+ goto err;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_repr *repr = pf->vf[i].repr;
+ struct ice_vsi *vsi = repr->src_vsi;
+ struct metadata_dst *dst;
+
+ ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
+
+ dst = repr->dst;
+ dst->u.port_info.port_id = vsi->vsi_num;
+ dst->u.port_info.lower_dev = repr->netdev;
+ ice_repr_set_traffic_vsi(repr, ctrl_vsi);
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * @pf: poiner to PF struct
+ * @ctrl_vsi: pointer to switchdev control VSI
+ */
+static void
+ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+{
+ int i;
+
+ kfree(ctrl_vsi->target_netdevs);
+ ice_for_each_vf(pf, i) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+
+ netif_napi_del(&vf->repr->q_vector->napi);
+ }
+}
+
+/**
+ * ice_eswitch_update_repr - reconfigure VF port representor
+ * @vsi: VF VSI for which port representor is configured
+ */
+void ice_eswitch_update_repr(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_repr *repr;
+ struct ice_vf *vf;
+ int ret;
+
+ if (!ice_is_switchdev_running(pf))
+ return;
+
+ vf = &pf->vf[vsi->vf_id];
+ repr = vf->repr;
+ repr->src_vsi = vsi;
+ repr->dst->u.port_info.port_id = vsi->vsi_num;
+
+ ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+ if (ret) {
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
+ }
+}
+
+/**
+ * ice_eswitch_port_start_xmit - callback for packets transmit
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_repr *repr;
+ struct ice_vsi *vsi;
+
+ np = netdev_priv(netdev);
+ vsi = np->vsi;
+
+ if (ice_is_reset_in_progress(vsi->back->state))
+ return NETDEV_TX_BUSY;
+
+ repr = ice_netdev_to_repr(netdev);
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *)repr->dst);
+ skb_dst_set(skb, (struct dst_entry *)repr->dst);
+ skb->queue_mapping = repr->vf->vf_id;
+
+ return ice_start_xmit(skb, netdev);
+}
+
+/**
+ * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
+ * @skb: pointer to send buffer
+ * @off: pointer to offload struct
+ */
+void
+ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off)
+{
+ struct metadata_dst *dst = skb_metadata_dst(skb);
+ u64 cd_cmd, dst_vsi;
+
+ if (!dst) {
+ cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
+ off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
+ } else {
+ cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
+ dst_vsi = ((u64)dst->u.port_info.port_id <<
+ ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
+ off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
+ }
+}
+
+/**
+ * ice_eswitch_release_env - clear switchdev HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function removes HW filters configuration specific for switchdev
+ * mode and restores default legacy mode settings.
+ */
+static void ice_eswitch_release_env(struct ice_pf *pf)
+{
+ struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
+ ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+ ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
+ ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_fltr_add_mac_and_broadcast(uplink_vsi,
+ uplink_vsi->port_info->mac.perm_addr,
+ ICE_FWD_TO_VSI);
+}
+
+/**
+ * ice_eswitch_vsi_setup - configure switchdev control VSI
+ * @pf: pointer to PF structure
+ * @pi: pointer to port_info structure
+ */
+static struct ice_vsi *
+ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
+}
+
+/**
+ * ice_eswitch_napi_del - remove NAPI handle for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_del(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ netif_napi_del(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_napi_enable - enable NAPI for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_enable(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ napi_enable(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_napi_disable - disable NAPI for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_disable(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ napi_disable(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
+ * @vsi: VSI to setup rxdid on
+ * @rxdid: flex descriptor id
+ */
+static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
+ u16 pf_q = vsi->rxq_map[ring->q_index];
+
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
+ }
+}
+
+/**
+ * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
+ * @pf: pointer to PF structure
+ */
+static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi;
+
+ pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
+ if (!pf->switchdev.control_vsi)
+ return -ENODEV;
+
+ ctrl_vsi = pf->switchdev.control_vsi;
+ pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
+ if (!pf->switchdev.uplink_vsi)
+ goto err_vsi;
+
+ if (ice_eswitch_setup_env(pf))
+ goto err_vsi;
+
+ if (ice_repr_add_for_all_vfs(pf))
+ goto err_repr_add;
+
+ if (ice_eswitch_setup_reprs(pf))
+ goto err_setup_reprs;
+
+ ice_eswitch_remap_rings_to_vectors(pf);
+
+ if (ice_vsi_open(ctrl_vsi))
+ goto err_setup_reprs;
+
+ ice_eswitch_napi_enable(pf);
+
+ ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
+
+ return 0;
+
+err_setup_reprs:
+ ice_repr_rem_from_all_vfs(pf);
+err_repr_add:
+ ice_eswitch_release_env(pf);
+err_vsi:
+ ice_vsi_release(ctrl_vsi);
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_disable_switchdev - disable switchdev resources
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+
+ ice_eswitch_napi_disable(pf);
+ ice_eswitch_release_env(pf);
+ ice_eswitch_release_reprs(pf, ctrl_vsi);
+ ice_vsi_release(ctrl_vsi);
+ ice_repr_rem_from_all_vfs(pf);
+}
+
+/**
+ * ice_eswitch_mode_set - set new eswitch mode
+ * @devlink: pointer to devlink structure
+ * @mode: eswitch mode to switch to
+ * @extack: pointer to extack structure
+ */
+int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (pf->eswitch_mode == mode)
+ return 0;
+
+ if (pf->num_alloc_vfs) {
+ dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
+ NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
+ return -EOPNOTSUPP;
+ }
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ {
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
+ break;
+ }
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
+ return -EINVAL;
+ }
+
+ pf->eswitch_mode = mode;
+ return 0;
+}
+
+/**
+ * ice_eswitch_get_target_netdev - return port representor netdev
+ * @rx_ring: pointer to Rx ring
+ * @rx_desc: pointer to Rx descriptor
+ *
+ * When working in switchdev mode context (when control VSI is used), this
+ * function returns netdev of appropriate port representor. For non-switchdev
+ * context, regular netdev associated with Rx ring is returned.
+ */
+struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ struct ice_32b_rx_flex_desc_nic_2 *desc;
+ struct ice_vsi *vsi = rx_ring->vsi;
+ struct ice_vsi *control_vsi;
+ u16 target_vsi_id;
+
+ control_vsi = vsi->back->switchdev.control_vsi;
+ if (vsi != control_vsi)
+ return rx_ring->netdev;
+
+ desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
+ target_vsi_id = le16_to_cpu(desc->src_vsi);
+
+ return vsi->target_netdevs[target_vsi_id];
+}
+
+/**
+ * ice_eswitch_mode_get - get current eswitch mode
+ * @devlink: pointer to devlink structure
+ * @mode: output parameter for current eswitch mode
+ */
+int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ *mode = pf->eswitch_mode;
+ return 0;
+}
+
+/**
+ * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
+ * @pf: pointer to PF structure
+ *
+ * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
+ * false otherwise.
+ */
+bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
+{
+ return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
+}
+
+/**
+ * ice_eswitch_release - cleanup eswitch
+ * @pf: pointer to PF structure
+ */
+void ice_eswitch_release(struct ice_pf *pf)
+{
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return;
+
+ ice_eswitch_disable_switchdev(pf);
+ pf->switchdev.is_running = false;
+}
+
+/**
+ * ice_eswitch_configure - configure eswitch
+ * @pf: pointer to PF structure
+ */
+int ice_eswitch_configure(struct ice_pf *pf)
+{
+ int status;
+
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
+ return 0;
+
+ status = ice_eswitch_enable_switchdev(pf);
+ if (status)
+ return status;
+
+ pf->switchdev.is_running = true;
+ return 0;
+}
+
+/**
+ * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ int i;
+
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ repr = pf->vf[i].repr;
+ if (repr)
+ ice_repr_start_tx_queues(repr);
+ }
+}
+
+/**
+ * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
+ * @pf: pointer to PF structure
+ */
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ int i;
+
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ repr = pf->vf[i].repr;
+ if (repr)
+ ice_repr_stop_tx_queues(repr);
+ }
+}
+
+/**
+ * ice_eswitch_rebuild - rebuild eswitch
+ * @pf: pointer to PF structure
+ */
+int ice_eswitch_rebuild(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ int status;
+
+ ice_eswitch_napi_disable(pf);
+ ice_eswitch_napi_del(pf);
+
+ status = ice_eswitch_setup_env(pf);
+ if (status)
+ return status;
+
+ status = ice_eswitch_setup_reprs(pf);
+ if (status)
+ return status;
+
+ ice_eswitch_remap_rings_to_vectors(pf);
+
+ ice_replay_tc_fltrs(pf);
+
+ status = ice_vsi_open(ctrl_vsi);
+ if (status)
+ return status;
+
+ ice_eswitch_napi_enable(pf);
+ ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
+ ice_eswitch_start_all_tx_queues(pf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
new file mode 100644
index 000000000000..364cd2a79c37
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_ESWITCH_H_
+#define _ICE_ESWITCH_H_
+
+#include <net/devlink.h>
+
+#ifdef CONFIG_ICE_SWITCHDEV
+void ice_eswitch_release(struct ice_pf *pf);
+int ice_eswitch_configure(struct ice_pf *pf);
+int ice_eswitch_rebuild(struct ice_pf *pf);
+
+int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
+bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
+
+void ice_eswitch_update_repr(struct ice_vsi *vsi);
+
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
+
+struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc);
+
+void ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off);
+netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+#else /* CONFIG_ICE_SWITCHDEV */
+static inline void ice_eswitch_release(struct ice_pf *pf) { }
+
+static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
+
+static inline void
+ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off) { }
+
+static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
+
+static inline int ice_eswitch_configure(struct ice_pf *pf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_eswitch_rebuild(struct ice_pf *pf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ return DEVLINK_ESWITCH_MODE_LEGACY;
+}
+
+static inline int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ return rx_ring->netdev;
+}
+
+static inline netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ return NETDEV_TX_BUSY;
+}
+#endif /* CONFIG_ICE_SWITCHDEV */
+#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index c451cf401e63..cfe96a127ed4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -170,10 +170,9 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
static void
-ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+__ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct ice_orom_info *orom;
@@ -193,6 +192,15 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+}
+
+static void
+ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_drvinfo(netdev, drvinfo, np->vsi);
+
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -584,7 +592,7 @@ static bool ice_lbtest_check_frame(u8 *frame)
*
* Function sends loopback packets on a test Tx ring.
*/
-static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
+static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
{
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
@@ -637,7 +645,7 @@ static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
* Function receives loopback packets and verify their correctness.
* Returns number of received valid frames.
*/
-static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
+static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
{
struct ice_rx_buf *rx_buf;
int valid_frames, i;
@@ -676,9 +684,10 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
struct ice_pf *pf = orig_vsi->back;
- struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
struct device *dev;
u8 *tx_frame;
int i;
@@ -866,10 +875,10 @@ skip_ol_tests:
netdev_info(netdev, "testing finished\n");
}
-static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void
+__ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
unsigned int i;
u8 *p = data;
@@ -879,6 +888,9 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
ethtool_sprintf(&p,
ice_gstrings_vsi_stats[i].stat_string);
+ if (ice_is_port_repr_netdev(netdev))
+ return;
+
ice_for_each_alloc_txq(vsi, i) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -917,6 +929,13 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_strings(netdev, stringset, data, np->vsi);
+}
+
static int
ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
@@ -1215,6 +1234,13 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
enum ice_status status;
bool dcbx_agent_status;
+ if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
+ clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+ dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n");
+ ret = -EOPNOTSUPP;
+ goto ethtool_exit;
+ }
+
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
@@ -1312,13 +1338,13 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
}
static void
-ice_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats __always_unused *stats, u64 *data)
+__ice_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats, u64 *data,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
unsigned int j;
int i = 0;
char *p;
@@ -1332,14 +1358,17 @@ ice_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ if (ice_is_port_repr_netdev(netdev))
+ return;
+
/* populate per queue stats */
rcu_read_lock();
ice_for_each_alloc_txq(vsi, j) {
- ring = READ_ONCE(vsi->tx_rings[j]);
- if (ring) {
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ tx_ring = READ_ONCE(vsi->tx_rings[j]);
+ if (tx_ring) {
+ data[i++] = tx_ring->stats.pkts;
+ data[i++] = tx_ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1347,10 +1376,10 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
ice_for_each_alloc_rxq(vsi, j) {
- ring = READ_ONCE(vsi->rx_rings[j]);
- if (ring) {
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ rx_ring = READ_ONCE(vsi->rx_rings[j]);
+ if (rx_ring) {
+ data[i++] = rx_ring->stats.pkts;
+ data[i++] = rx_ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1379,6 +1408,15 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
}
+static void
+ice_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats, u64 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_ethtool_stats(netdev, stats, data, np->vsi);
+}
+
#define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \
ICE_PHY_TYPE_LOW_100M_SGMII)
@@ -2667,9 +2705,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
static int
ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
- struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_ring *xdp_rings = NULL;
+ struct ice_tx_ring *xdp_rings = NULL;
+ struct ice_tx_ring *tx_rings = NULL;
+ struct ice_rx_ring *rx_rings = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
@@ -2718,12 +2757,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- for (i = 0; i < vsi->alloc_txq; i++)
+ ice_for_each_alloc_txq(vsi, i)
vsi->tx_rings[i]->count = new_tx_cnt;
- for (i = 0; i < vsi->alloc_rxq; i++)
+ ice_for_each_alloc_rxq(vsi, i)
vsi->rx_rings[i]->count = new_rx_cnt;
if (ice_is_xdp_ena_vsi(vsi))
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = (u16)new_rx_cnt;
@@ -2772,7 +2811,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto free_tx;
}
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
@@ -2866,7 +2905,7 @@ process_link:
}
if (xdp_rings) {
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
ice_free_tx_ring(vsi->xdp_rings[i]);
*vsi->xdp_rings[i] = xdp_rings[i];
}
@@ -3155,6 +3194,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EIO;
}
+ if (ice_is_adq_active(pf)) {
+ netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n");
+ return -EOPNOTSUPP;
+ }
+
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
@@ -3255,7 +3299,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.ring && q_vector->tx.ring)
+ if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
combined++;
}
@@ -3365,6 +3409,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
return -EINVAL;
+ if (ice_is_adq_active(pf)) {
+ netdev_err(dev, "Cannot set channels with ADQ configured.\n");
+ return -EOPNOTSUPP;
+ }
+
if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
return -EOPNOTSUPP;
@@ -3466,15 +3515,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-enum ice_container_type {
- ICE_RX_CONTAINER,
- ICE_TX_CONTAINER,
-};
-
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
- * @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
@@ -3484,24 +3527,23 @@ enum ice_container_type {
* Returns 0 on success, negative otherwise.
*/
static int
-ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
- struct ice_ring_container *rc)
+ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
{
- if (!rc->ring)
+ if (!rc->rx_ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
ec->rx_coalesce_usecs = rc->itr_setting;
- ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
+ ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl;
break;
case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc);
ec->tx_coalesce_usecs = rc->itr_setting;
break;
default:
- dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type);
return -EINVAL;
}
@@ -3522,18 +3564,18 @@ static int
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else {
@@ -3585,7 +3627,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
- * @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container
@@ -3597,19 +3638,22 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
* Returns 0 on success, negative otherwise.
*/
static int
-ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
+ice_set_rc_coalesce(struct ethtool_coalesce *ec,
struct ice_ring_container *rc, struct ice_vsi *vsi)
{
- const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx";
+ const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
u32 use_adaptive_coalesce, coalesce_usecs;
struct ice_pf *pf = vsi->back;
u16 itr_setting;
- if (!rc->ring)
+ if (!rc->rx_ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
+ {
+ struct ice_q_vector *q_vector = rc->rx_ring->q_vector;
+
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
@@ -3618,22 +3662,20 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
ICE_MAX_INTRL);
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl &&
+ if (ec->rx_coalesce_usecs_high != q_vector->intrl &&
(ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) {
netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n",
c_type_str);
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
- rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
- ice_write_intrl(rc->ring->q_vector,
- ec->rx_coalesce_usecs_high);
- }
+ if (ec->rx_coalesce_usecs_high != q_vector->intrl)
+ q_vector->intrl = ec->rx_coalesce_usecs_high;
use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
coalesce_usecs = ec->rx_coalesce_usecs;
break;
+ }
case ICE_TX_CONTAINER:
use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
coalesce_usecs = ec->tx_coalesce_usecs;
@@ -3641,7 +3683,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
default:
dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
- c_type);
+ rc->type);
return -EINVAL;
}
@@ -3690,22 +3732,22 @@ static int
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
@@ -3778,6 +3820,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
if (ice_set_q_coalesce(vsi, ec, v_idx))
return -EINVAL;
+
+ ice_set_q_vector_intrl(vsi->q_vectors[v_idx]);
}
goto set_complete;
}
@@ -3785,6 +3829,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
if (ice_set_q_coalesce(vsi, ec, q_num))
return -EINVAL;
+ ice_set_q_vector_intrl(vsi->q_vectors[q_num]);
+
set_complete:
return 0;
}
@@ -3804,6 +3850,54 @@ ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
return __ice_set_coalesce(netdev, ec, q_num);
}
+static void
+ice_repr_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ if (ice_check_vf_ready_for_cfg(repr->vf))
+ return;
+
+ __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
+}
+
+static void
+ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ /* for port representors only ETH_SS_STATS is supported */
+ if (ice_check_vf_ready_for_cfg(repr->vf) ||
+ stringset != ETH_SS_STATS)
+ return;
+
+ __ice_get_strings(netdev, stringset, data, repr->src_vsi);
+}
+
+static void
+ice_repr_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ if (ice_check_vf_ready_for_cfg(repr->vf))
+ return;
+
+ __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
+}
+
+static int ice_repr_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ICE_VSI_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
#define ICE_I2C_EEPROM_DEV_ADDR 0xA0
#define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
#define ICE_MODULE_TYPE_SFP 0x03
@@ -4055,6 +4149,23 @@ void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
}
+static const struct ethtool_ops ice_ethtool_repr_ops = {
+ .get_drvinfo = ice_repr_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ice_repr_get_strings,
+ .get_ethtool_stats = ice_repr_get_ethtool_stats,
+ .get_sset_count = ice_repr_get_sset_count,
+};
+
+/**
+ * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops
+ * @netdev: network interface device structure
+ */
+void ice_set_ethtool_repr_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ice_ethtool_repr_ops;
+}
+
/**
* ice_set_ethtool_ops - setup netdev ethtool ops
* @netdev: network interface device structure
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 16de603b280c..38960bcc384c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -706,7 +706,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
if (!seg)
return -ENOMEM;
- tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
@@ -1068,7 +1068,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
if (!seg)
return -ENOMEM;
- tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 59ef68f072c0..cbd8424631e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -952,7 +952,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
if (frag)
- loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
+ loc[20] = ICE_FDIR_IPV4_PKT_FLAG_MF;
break;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index d2d40e18ae8a..da4163856f4c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -48,7 +48,7 @@
* requests that the packet not be fragmented. MF indicates that a packet has
* been fragmented.
*/
-#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20
+#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 06ac9badee77..e731b46270c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -735,7 +735,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
*
* This function will request ownership of the change lock.
*/
-static enum ice_status
+enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -748,7 +748,7 @@ ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
*
* This function will release the change lock using the proper Admin Command.
*/
-static void ice_release_change_lock(struct ice_hw *hw)
+void ice_release_change_lock(struct ice_hw *hw)
{
ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
}
@@ -1330,6 +1330,86 @@ fw_ddp_compat_free_alloc:
}
/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section = section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= le16_to_cpu(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = le16_to_cpu(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in HW for following use.
+ */
+static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return 0;
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1408,6 +1488,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
+ ice_get_prof_index_max(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
status);
@@ -1485,6 +1566,167 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
}
/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ unsigned long *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ if (req_profs == ICE_PROF_ALL) {
+ bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
+ return;
+ }
+
+ memset(&state, 0, sizeof(state));
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+ ice_seg = hw->seg;
+ do {
+ u32 offset;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ if (req_profs & ICE_PROF_NON_TUN)
+ set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @prot_ids: field vector to search for with a given protocol ID
+ * @ids_cnt: lookup/protocol count
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ids_cnt || !hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!test_bit((u16)offset, bm))
+ continue;
+
+ for (i = 0; i < ids_cnt; i++) {
+ int j;
+
+ /* This code assumes that if a switch field vector line
+ * has a matching protocol, then this line will contain
+ * the entries necessary to represent every field in
+ * that protocol header.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id == prot_ids[i])
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == ids_cnt) {
+ fvl = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*fvl), GFP_KERNEL);
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ list_add(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (list_empty(fv_list))
+ return ICE_ERR_CFG;
+ return 0;
+
+err:
+ list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
+ list_del(&fvl->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvl);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ bitmap_zero(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ set_bit(i, hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
* ice_pkg_buf_free
* @hw: pointer to the HW structure
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
@@ -1668,7 +1910,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid &&
hw->tnl.tbl[i].type == type &&
- idx--)
+ idx-- == 0)
return i;
WARN_ON_ONCE(1);
@@ -1828,7 +2070,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
- index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+ index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) {
@@ -1863,6 +2105,35 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
return 0;
}
+/**
+ * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
+ * @hw: pointer to the hardware structure
+ * @blk: hardware block
+ * @prof: profile ID
+ * @fv_idx: field vector word index
+ * @prot: variable to receive the protocol ID
+ * @off: variable to receive the protocol offset
+ */
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off)
+{
+ struct ice_fv_word *fv_ext;
+
+ if (prof >= hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (fv_idx >= hw->blk[blk].es.fvw)
+ return ICE_ERR_PARAM;
+
+ fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
+
+ *prot = fv_ext[fv_idx].prot_id;
+ *off = fv_ext[fv_idx].off;
+
+ return 0;
+}
+
/* PTG Management */
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 8a58e79729b9..344c2637facd 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,20 @@
#define ICE_PKG_CNT 4
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_change_lock(struct ice_hw *hw);
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off);
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
+ unsigned long *bm);
+void
+ice_init_prof_result_bm(struct ice_hw *hw);
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ unsigned long *bm, struct list_head *fv_list);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 7d8b517a63c9..120bcebaa080 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -13,6 +13,8 @@ struct ice_fv_word {
u8 resvrd;
} __packed;
+#define ICE_MAX_NUM_PROFILES 256
+
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
@@ -279,6 +281,12 @@ struct ice_sw_fv_section {
struct ice_fv fv[];
};
+struct ice_sw_fv_list_entry {
+ struct list_head list_entry;
+ u32 profile_id;
+ struct ice_fv *fv_ptr;
+};
+
/* The BOOST TCAM stores the match packet header in reverse order, meaning
* the fields are reversed; in addition, this means that the normally big endian
* fields of the packet are now little endian.
@@ -603,4 +611,9 @@ struct ice_chs_chg {
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
+
+enum ice_prof_type {
+ ICE_PROF_NON_TUN = 0x1,
+ ICE_PROF_ALL = 0xFF,
+};
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 2418d4fff037..c2e78eaf4ccb 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -395,3 +395,83 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list);
}
+
+/**
+ * ice_fltr_update_rule_flags - update lan_en/lb_en flags
+ * @hw: pointer to hw
+ * @rule_id: id of rule being updated
+ * @recipe_id: recipe id of rule
+ * @act: current action field
+ * @type: Rx or Tx
+ * @src: source VSI
+ * @new_flags: combinations of lb_en and lan_en
+ */
+static enum ice_status
+ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
+ u32 act, u16 type, u16 src, u32 new_flags)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status err;
+ u32 flags_mask;
+
+ s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
+ act &= ~flags_mask;
+ act |= (flags_mask & new_flags);
+
+ s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id);
+ s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
+ s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+
+ if (type & ICE_FLTR_RX) {
+ s_rule->pdata.lkup_tx_rx.src =
+ cpu_to_le16(hw->port_info->lport);
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+
+ } else {
+ s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src);
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ }
+
+ err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
+ ice_aqc_opc_update_sw_rules, NULL);
+
+ kfree(s_rule);
+ return err;
+}
+
+/**
+ * ice_fltr_build_action - build action for rule
+ * @vsi_id: id of VSI which is use to build action
+ */
+static u32 ice_fltr_build_action(u16 vsi_id)
+{
+ return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) |
+ ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
+}
+
+/**
+ * ice_fltr_update_flags_dflt_rule - update flags on default rule
+ * @vsi: pointer to VSI
+ * @rule_id: id of rule
+ * @direction: Tx or Rx
+ * @new_flags: flags to update
+ *
+ * Function updates flags on default rule with ICE_SW_LKUP_DFLT.
+ *
+ * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
+ * ICE_SINGLE_ACT_LAN_ENABLE.
+ */
+enum ice_status
+ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
+ u32 new_flags)
+{
+ u32 action = ice_fltr_build_action(vsi->vsi_num);
+ struct ice_hw *hw = &vsi->back->hw;
+
+ return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action,
+ direction, vsi->vsi_num, new_flags);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 361cb4da9b43..8eec4febead1 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -36,4 +36,7 @@ enum ice_status
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
+enum ice_status
+ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
+ u32 new_flags);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 76021d977b60..a49082485642 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -182,6 +182,7 @@
#define GLINT_DYN_CTL_INTERVAL_S 5
#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5)
#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24)
+#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 80736e0ec0dc..d981dc6f2323 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -301,6 +301,46 @@ struct ice_32b_rx_flex_desc_nic {
} flex_ts;
};
+/* Rx Flex Descriptor NIC Profile
+ * RxDID Profile ID 6
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Source VSI
+ * Flex-field 4: reserved, VLAN ID taken from L2Tag
+ */
+struct ice_32b_rx_flex_desc_nic_2 {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 flow_id;
+ __le16 src_vsi;
+ union {
+ struct {
+ __le16 rsvd;
+ __le16 flow_id_ipv6;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -529,6 +569,9 @@ struct ice_tx_ctx_desc {
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_QW1_VSI_S 50
+#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
+
enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_TSO = 0x01,
ICE_TX_CTX_DESC_TSYN = 0x02,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index dde9802c6c72..77dceab9fbbe 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -22,8 +22,12 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_VF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
+ case ICE_VSI_CHNL:
+ return "ICE_VSI_CHNL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
+ case ICE_VSI_SWITCHDEV_CTRL:
+ return "ICE_VSI_SWITCHDEV_CTRL";
default:
return "unknown";
}
@@ -44,12 +48,12 @@ static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
int ret = 0;
u16 i;
- for (i = 0; i < vsi->num_rxq; i++)
+ ice_for_each_rxq(vsi, i)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
ice_flush(&vsi->back->hw);
- for (i = 0; i < vsi->num_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
if (ret)
break;
@@ -71,6 +75,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
struct device *dev;
dev = ice_pf_to_dev(pf);
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
/* allocate memory for both Tx and Rx ring pointers */
vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
@@ -132,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -200,6 +207,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ /* The number of queues for ctrl VSI is equal to number of VFs.
+ * Each ring is associated to the corresponding VF_PR netdev.
+ */
+ vsi->alloc_txq = pf->num_alloc_vfs;
+ vsi->alloc_rxq = pf->num_alloc_vfs;
+ vsi->num_q_vectors = 1;
+ break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
if (vf->num_req_qs)
@@ -218,6 +233,10 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
vsi->num_q_vectors = 1;
break;
+ case ICE_VSI_CHNL:
+ vsi->alloc_txq = 0;
+ vsi->alloc_rxq = 0;
+ break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
@@ -263,7 +282,7 @@ static int ice_get_free_slot(void *array, int size, int curr)
* ice_vsi_delete - delete a VSI from the switch
* @vsi: pointer to VSI being removed
*/
-static void ice_vsi_delete(struct ice_vsi *vsi)
+void ice_vsi_delete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
@@ -334,7 +353,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
*
* Returns 0 on success, negative on failure
*/
-static int ice_vsi_clear(struct ice_vsi *vsi)
+int ice_vsi_clear(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
@@ -379,12 +398,12 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- if (!q_vector->tx.ring)
+ if (!q_vector->tx.tx_ring)
return IRQ_HANDLED;
#define FDIR_RX_DESC_CLEAN_BUDGET 64
- ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET);
- ice_clean_ctrl_tx_irq(q_vector->tx.ring);
+ ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED;
}
@@ -398,7 +417,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- if (!q_vector->tx.ring && !q_vector->rx.ring)
+ if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
q_vector->total_events++;
@@ -408,16 +427,33 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+ struct ice_pf *pf = q_vector->vsi->back;
+ int i;
+
+ if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
+ return IRQ_HANDLED;
+
+ ice_for_each_vf(pf, i)
+ napi_schedule(&pf->vf[i].repr->q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @vsi_type: type of VSI
+ * @ch: ptr to channel
* @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
+ struct ice_channel *ch, u16 vf_id)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -444,10 +480,17 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
- else
+ else if (vsi_type != ICE_VSI_CHNL)
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) {
+ case ICE_VSI_SWITCHDEV_CTRL:
+ if (ice_vsi_alloc_arrays(vsi))
+ goto err_rings;
+
+ /* Setup eswitch MSIX irq handler for VSI */
+ vsi->irq_handler = ice_eswitch_msix_clean_rings;
+ break;
case ICE_VSI_PF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -466,6 +509,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
+ case ICE_VSI_CHNL:
+ if (!ch)
+ goto err_rings;
+ vsi->num_rxq = ch->num_rxq;
+ vsi->num_txq = ch->num_txq;
+ vsi->next_base_q = ch->base_q;
+ break;
case ICE_VSI_LB:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -582,6 +632,9 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
};
int ret;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
+
ret = __ice_vsi_get_qs(&tx_qs_cfg);
if (ret)
return ret;
@@ -606,12 +659,12 @@ static void ice_vsi_put_qs(struct ice_vsi *vsi)
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_alloc_txq(vsi, i) {
clear_bit(vsi->txq_map[i], pf->avail_txqs);
vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
}
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_alloc_rxq(vsi, i) {
clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
}
@@ -700,12 +753,23 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
cap = &pf->hw.func_caps.common_cap;
switch (vsi->type) {
+ case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = (u16)cap->rss_table_size;
+ if (vsi->type == ICE_VSI_CHNL)
+ vsi->rss_size = min_t(u16, vsi->num_rxq,
+ BIT(cap->rss_table_entry_width));
+ else
+ vsi->rss_size = min_t(u16, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vsi->rss_size = min_t(u16, num_online_cpus(),
BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
@@ -775,21 +839,13 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
- bool ena_tc0 = false;
u8 netdev_tc = 0;
int i;
- /* at least TC0 should be enabled by default */
- if (vsi->tc_cfg.numtc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(0)))
- ena_tc0 = true;
- } else {
- ena_tc0 = true;
- }
-
- if (ena_tc0) {
- vsi->tc_cfg.numtc++;
- vsi->tc_cfg.ena_tc |= 1;
+ if (!vsi->tc_cfg.numtc) {
+ /* at least TC0 should be enabled by default */
+ vsi->tc_cfg.numtc = 1;
+ vsi->tc_cfg.ena_tc = 1;
}
num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
@@ -931,6 +987,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
switch (vsi->type) {
+ case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
@@ -953,6 +1010,28 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}
+static void
+ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 qcount, qmap;
+ u8 offset = 0;
+ int pow;
+
+ qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+
+ pow = order_base_2(qcount);
+ qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
+ ICE_AQ_VSI_TC_Q_NUM_M);
+
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
+ ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
+}
+
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
@@ -980,6 +1059,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_CHNL:
+ ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
+ break;
case ICE_VSI_VF:
ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
@@ -990,6 +1073,21 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
goto out;
}
+ /* Handle VLAN pruning for channel VSI if main VSI has VLAN
+ * prune enabled
+ */
+ if (vsi->type == ICE_VSI_CHNL) {
+ struct ice_vsi *main_vsi;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
+ ctxt->info.sw_flags2 |=
+ ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ else
+ ctxt->info.sw_flags2 &=
+ ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
ice_set_dflt_vsi_ctx(ctxt);
if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
ice_set_fd_vsi_ctx(ctxt, vsi);
@@ -1010,13 +1108,17 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
}
ctxt->info.sw_id = vsi->port_info->sw_id;
- ice_vsi_setup_q_map(vsi, ctxt);
- if (!init_vsi) /* means VSI being updated */
- /* must to indicate which section of VSI context are
- * being modified
- */
- ctxt->info.valid_sections |=
- cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ if (vsi->type == ICE_VSI_CHNL) {
+ ice_chnl_vsi_setup_q_map(vsi, ctxt);
+ } else {
+ ice_vsi_setup_q_map(vsi, ctxt);
+ if (!init_vsi) /* means VSI being updated */
+ /* must to indicate which section of VSI context are
+ * being modified
+ */
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ }
/* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
* respectively
@@ -1195,6 +1297,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
/* SRIOV doesn't grab irq_tracker entries for each VSI */
if (vsi->type == ICE_VSI_VF)
return 0;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
if (vsi->base_vector) {
dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
@@ -1249,14 +1353,14 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (q_vector) {
- q_vector->tx.ring = NULL;
- q_vector->rx.ring = NULL;
+ q_vector->tx.tx_ring = NULL;
+ q_vector->rx.rx_ring = NULL;
}
}
}
if (vsi->tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_alloc_txq(vsi, i) {
if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu);
WRITE_ONCE(vsi->tx_rings[i], NULL);
@@ -1264,7 +1368,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
}
}
if (vsi->rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_alloc_rxq(vsi, i) {
if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu);
WRITE_ONCE(vsi->rx_rings[i], NULL);
@@ -1285,8 +1389,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
- for (i = 0; i < vsi->alloc_txq; i++) {
- struct ice_ring *ring;
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_tx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1296,7 +1400,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i;
ring->reg_idx = vsi->txq_map[i];
- ring->ring_active = false;
ring->vsi = vsi;
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
@@ -1305,8 +1408,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
}
/* Allocate Rx rings */
- for (i = 0; i < vsi->alloc_rxq; i++) {
- struct ice_ring *ring;
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_rx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1315,7 +1418,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i;
ring->reg_idx = vsi->rxq_map[i];
- ring->ring_active = false;
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = dev;
@@ -1363,7 +1465,7 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
-static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
+int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct device *dev;
@@ -1371,7 +1473,25 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
int err;
dev = ice_pf_to_dev(pf);
- vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
+ if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
+ (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
+ } else {
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
+
+ /* If orig_rss_size is valid and it is less than determined
+ * main VSI's rss_size, update main VSI's rss_size to be
+ * orig_rss_size so that when tc-qdisc is deleted, main VSI
+ * RSS table gets programmed to be correct (whatever it was
+ * to begin with (prior to setup-tc for ADQ config)
+ */
+ if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
+ vsi->orig_rss_size <= vsi->num_rxq) {
+ vsi->rss_size = vsi->orig_rss_size;
+ /* now orig_rss_size is used, reset it to zero */
+ vsi->orig_rss_size = 0;
+ }
+ }
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -1710,7 +1830,7 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
}
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx)
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
int err;
@@ -1766,7 +1886,7 @@ setup_rings:
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
@@ -1817,8 +1937,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- for (i = 0; i < vsi->num_xdp_txq; i++)
- vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_xdp_txq(vsi, i)
+ vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
return ret;
}
@@ -1853,6 +1973,23 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
}
+static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
+{
+ switch (rc->type) {
+ case ICE_RX_CONTAINER:
+ if (rc->rx_ring)
+ return rc->rx_ring->q_vector;
+ break;
+ case ICE_TX_CONTAINER:
+ if (rc->tx_ring)
+ return rc->tx_ring->q_vector;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
/**
* __ice_write_itr - write throttle rate to register
* @q_vector: pointer to interrupt data structure
@@ -1877,15 +2014,39 @@ void ice_write_itr(struct ice_ring_container *rc, u16 itr)
{
struct ice_q_vector *q_vector;
- if (!rc->ring)
+ q_vector = ice_pull_qvec_from_rc(rc);
+ if (!q_vector)
return;
- q_vector = rc->ring->q_vector;
-
__ice_write_itr(q_vector, rc, itr);
}
/**
+ * ice_set_q_vector_intrl - set up interrupt rate limiting
+ * @q_vector: the vector to be configured
+ *
+ * Interrupt rate limiting is local to the vector, not per-queue so we must
+ * detect if either ring container has dynamic moderation enabled to decide
+ * what to set the interrupt rate limit to via INTRL settings. In the case that
+ * dynamic moderation is disabled on both, write the value with the cached
+ * setting to make sure INTRL register matches the user visible value.
+ */
+void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
+{
+ if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
+ /* in the case of dynamic enabled, cap each vector to no more
+ * than (4 us) 250,000 ints/sec, which allows low latency
+ * but still less than 500,000 interrupts per second, which
+ * reduces CPU a bit in the case of the lowest latency
+ * setting. The 4 here is a value in microseconds.
+ */
+ ice_write_intrl(q_vector, 4);
+ } else {
+ ice_write_intrl(q_vector, q_vector->intrl);
+ }
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
*
@@ -1899,7 +2060,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
u16 txq = 0, rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
u16 reg_idx = q_vector->reg_idx;
@@ -2057,7 +2218,7 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
*/
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
+ u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
{
u16 q_idx;
@@ -2179,10 +2340,14 @@ err_out:
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
- struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+ return;
+ }
- vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
- vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+ /* set VSI TC information based on DCB config */
+ ice_vsi_set_dcb_tc_cfg(vsi);
}
/**
@@ -2295,8 +2460,10 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2393,6 +2560,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
* @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
+ * @ch: ptr to channel
*
* This allocates the sw VSI structure and its queue resources.
*
@@ -2401,7 +2569,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id)
+ enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -2409,10 +2577,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
+ if (vsi_type == ICE_VSI_CHNL)
+ vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID);
+ else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id);
else
- vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2429,10 +2599,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_alloc_fd_res(vsi);
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto unroll_vsi_alloc;
+ if (vsi_type != ICE_VSI_CHNL) {
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_vsi_alloc;
+ }
}
/* set RSS capabilities */
@@ -2448,6 +2620,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2490,6 +2663,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
ice_init_arfs(vsi);
break;
+ case ICE_VSI_CHNL:
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
* map queues to vectors through Virtchnl, PF driver only
@@ -2528,9 +2707,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
/* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->alloc_txq;
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i)))
+ continue;
+
+ if (vsi->type == ICE_VSI_CHNL) {
+ if (!vsi->alloc_txq && vsi->num_txq)
+ max_txqs[i] = vsi->num_txq;
+ else
+ max_txqs[i] = pf->num_lan_tx;
+ } else {
+ max_txqs[i] = vsi->alloc_txq;
+ }
+ }
+ dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
@@ -2591,7 +2782,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
u32 rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
ice_write_intrl(q_vector, 0);
@@ -2757,7 +2948,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
- } else if (vsi->type == ICE_VSI_CTRL) {
+ } else if (vsi->type == ICE_VSI_CTRL ||
+ vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
ice_vsi_close(vsi);
}
}
@@ -2841,6 +3033,7 @@ void ice_napi_del(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
+ enum ice_status err;
struct ice_pf *pf;
if (!vsi->back)
@@ -2859,7 +3052,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
- ice_devlink_destroy_port(vsi);
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -2912,6 +3106,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, err);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -3036,7 +3234,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
vsi->q_vectors[i]->intrl = coalesce[i].intrl;
- ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl);
+ ice_set_q_vector_intrl(vsi->q_vectors[i]);
}
/* the number of queue vectors increased so write whatever is in
@@ -3054,7 +3252,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
- ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl);
+ ice_set_q_vector_intrl(vsi->q_vectors[i]);
}
}
@@ -3092,6 +3290,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (ret)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, ret);
ice_vsi_free_q_vectors(vsi);
/* SR-IOV determines needed MSIX resources all at once instead of per
@@ -3135,6 +3337,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
switch (vtype) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -3154,7 +3357,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) {
- vsi->num_xdp_txq = vsi->alloc_rxq;
+ ret = ice_vsi_determine_xdp_res(vsi);
+ if (ret)
+ goto err_vectors;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto err_vectors;
@@ -3182,20 +3387,42 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vectors;
break;
+ case ICE_VSI_CHNL:
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ break;
default:
break;
}
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++) {
- max_txqs[i] = vsi->alloc_txq;
+ /* configure VSI nodes based on number of queues and TC's.
+ * ADQ creates VSIs for each TC/Channel but doesn't
+ * allocate queues instead it reconfigures the PF queues
+ * as per the TC command. So max_txqs should point to the
+ * PF Tx queues.
+ */
+ if (vtype == ICE_VSI_CHNL)
+ max_txqs[i] = pf->num_lan_tx;
+ else
+ max_txqs[i] = vsi->alloc_txq;
if (ice_is_xdp_ena_vsi(vsi))
max_txqs[i] += vsi->num_xdp_txq;
}
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ /* If MQPRIO is set, means channel code path, hence for main
+ * VSI's, use TC as 1
+ */
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
+ else
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
+
if (status) {
dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
vsi->vsi_num, ice_stat_str(status));
@@ -3267,7 +3494,6 @@ int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
return 0;
}
-#ifdef CONFIG_DCB
/**
* ice_vsi_update_q_map - update our copy of the VSI info with new queue map
* @vsi: VSI being configured
@@ -3283,6 +3509,146 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
}
/**
+ * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @ena_tc: TC map to be enabled
+ */
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf = vsi->back;
+ int numtc = vsi->tc_cfg.numtc;
+ struct ice_dcbx_cfg *dcbcfg;
+ u8 netdev_tc;
+ int i;
+
+ if (!netdev)
+ return;
+
+ /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+
+ if (!ena_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
+ numtc = vsi->all_numtc;
+
+ if (netdev_set_num_tc(netdev, numtc))
+ return;
+
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+ /* setup TC queue map for CHNL TCs */
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ break;
+ if (!vsi->mqprio_qopt.qopt.count[i])
+ break;
+ netdev_set_tc_queue(netdev, i,
+ vsi->mqprio_qopt.qopt.count[i],
+ vsi->mqprio_qopt.qopt.offset[i]);
+ }
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ return;
+
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ u8 ets_tc = dcbcfg->etscfg.prio_table[i];
+
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
+ * @vsi: the VSI being configured,
+ * @ctxt: VSI context structure
+ * @ena_tc: number of traffic classes to enable
+ *
+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
+ */
+static void
+ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
+ u8 ena_tc)
+{
+ u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
+ u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
+ int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u8 netdev_tc = 0;
+ int i;
+
+ vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
+
+ pow = order_base_2(tc0_qcount);
+ qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
+
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
+ /* TC is not enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = 0;
+ vsi->tc_cfg.tc_info[i].qcount_rx = 1;
+ vsi->tc_cfg.tc_info[i].qcount_tx = 1;
+ vsi->tc_cfg.tc_info[i].netdev_tc = 0;
+ ctxt->info.tc_mapping[i] = 0;
+ continue;
+ }
+
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ vsi->tc_cfg.tc_info[i].qoffset = offset;
+ vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
+ vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
+ vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
+ }
+
+ if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ continue;
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ }
+ }
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_txq = offset + qcount_tx;
+ vsi->num_rxq = offset + qcount_rx;
+
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+ ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
+
+ /* Find queue count available for channel VSIs and starting offset
+ * for channel VSIs
+ */
+ if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
+ vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
+ vsi->next_base_q = tc0_qcount;
+ }
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
+ dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
+ vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
+}
+
+/**
* ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
* @vsi: VSI to be configured
* @ena_tc: TC bitmap
@@ -3300,6 +3666,9 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
u8 num_tc = 0;
dev = ice_pf_to_dev(pf);
+ if (vsi->tc_cfg.ena_tc == ena_tc &&
+ vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
+ return ret;
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
@@ -3307,6 +3676,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
num_tc++;
/* populate max_txqs per TC */
max_txqs[i] = vsi->alloc_txq;
+ /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
+ * zero for CHNL VSI, hence use num_txq instead as max_txqs
+ */
+ if (vsi->type == ICE_VSI_CHNL &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ max_txqs[i] = vsi->num_txq;
}
vsi->tc_cfg.ena_tc = ena_tc;
@@ -3319,7 +3694,11 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
ctx->vf_num = 0;
ctx->info = vsi->info;
- ice_vsi_setup_q_map(vsi, ctx);
+ if (vsi->type == ICE_VSI_PF &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+ else
+ ice_vsi_setup_q_map(vsi, ctx);
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
@@ -3330,8 +3709,13 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
goto out;
}
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
+ if (vsi->type == ICE_VSI_PF &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1,
+ max_txqs);
+ else
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
if (status) {
dev_err(dev, "VSI %d failed TC config, error %s\n",
@@ -3347,20 +3731,19 @@ out:
kfree(ctx);
return ret;
}
-#endif /* CONFIG_DCB */
/**
* ice_update_ring_stats - Update ring statistics
- * @ring: ring to update
+ * @stats: stats to be updated
* @pkts: number of processed packets
* @bytes: number of processed bytes
*
* This function assumes that caller has acquired a u64_stats_sync lock.
*/
-static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
+static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
{
- ring->stats.bytes += bytes;
- ring->stats.pkts += pkts;
+ stats->bytes += bytes;
+ stats->pkts += pkts;
}
/**
@@ -3369,10 +3752,10 @@ static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
-void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
+void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&tx_ring->syncp);
- ice_update_ring_stats(tx_ring, pkts, bytes);
+ ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
u64_stats_update_end(&tx_ring->syncp);
}
@@ -3382,10 +3765,10 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
-void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
+void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&rx_ring->syncp);
- ice_update_ring_stats(rx_ring, pkts, bytes);
+ ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
u64_stats_update_end(&rx_ring->syncp);
}
@@ -3538,6 +3921,180 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
}
/**
+ * ice_get_link_speed_mbps - get link speed in Mbps
+ * @vsi: the VSI whose link speed is being queried
+ *
+ * Return current VSI link speed and 0 if the speed is unknown.
+ */
+int ice_get_link_speed_mbps(struct ice_vsi *vsi)
+{
+ switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ return SPEED_100000;
+ case ICE_AQ_LINK_SPEED_50GB:
+ return SPEED_50000;
+ case ICE_AQ_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case ICE_AQ_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case ICE_AQ_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case ICE_AQ_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case ICE_AQ_LINK_SPEED_5GB:
+ return SPEED_5000;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return SPEED_2500;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return SPEED_1000;
+ case ICE_AQ_LINK_SPEED_100MB:
+ return SPEED_100;
+ case ICE_AQ_LINK_SPEED_10MB:
+ return SPEED_10;
+ case ICE_AQ_LINK_SPEED_UNKNOWN:
+ default:
+ return 0;
+ }
+}
+
+/**
+ * ice_get_link_speed_kbps - get link speed in Kbps
+ * @vsi: the VSI whose link speed is being queried
+ *
+ * Return current VSI link speed and 0 if the speed is unknown.
+ */
+int ice_get_link_speed_kbps(struct ice_vsi *vsi)
+{
+ int speed_mbps;
+
+ speed_mbps = ice_get_link_speed_mbps(vsi);
+
+ return speed_mbps * 1000;
+}
+
+/**
+ * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
+ * @vsi: VSI to be configured
+ * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
+ *
+ * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
+ * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
+ * on TC 0.
+ */
+int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ int speed;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->port_info) {
+ dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
+ vsi->idx, vsi->type);
+ return -EINVAL;
+ }
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (min_tx_rate > (u64)speed) {
+ dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
+ speed);
+ return -EINVAL;
+ }
+
+ /* Configure min BW for VSI limit */
+ if (min_tx_rate) {
+ status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
+ ICE_MIN_BW, min_tx_rate);
+ if (status) {
+ dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type),
+ vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type));
+ } else {
+ status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
+ vsi->idx, 0,
+ ICE_MIN_BW);
+ if (status) {
+ dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
+ *
+ * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
+ * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
+ * on TC 0.
+ */
+int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ int speed;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->port_info) {
+ dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
+ vsi->idx, vsi->type);
+ return -EINVAL;
+ }
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (max_tx_rate > (u64)speed) {
+ dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
+ speed);
+ return -EINVAL;
+ }
+
+ /* Configure max BW for VSI limit */
+ if (max_tx_rate) {
+ status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
+ ICE_MAX_BW, max_tx_rate);
+ if (status) {
+ dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type),
+ vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
+ } else {
+ status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
+ vsi->idx, 0,
+ ICE_MAX_BW);
+ if (status) {
+ dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ }
+
+ return 0;
+}
+
+/**
* ice_set_link - turn on/off physical link
* @vsi: VSI to modify physical link on
* @ena: turn on/off physical link
@@ -3573,3 +4130,126 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
return 0;
}
+
+/**
+ * ice_is_feature_supported
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to be checked
+ *
+ * returns true if feature is supported, false otherwise
+ */
+bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return false;
+
+ return test_bit(f, pf->features);
+}
+
+/**
+ * ice_set_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to set
+ */
+static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return;
+
+ set_bit(f, pf->features);
+}
+
+/**
+ * ice_clear_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to clear
+ */
+void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return;
+
+ clear_bit(f, pf->features);
+}
+
+/**
+ * ice_init_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ *
+ * called during init to setup supported feature
+ */
+void ice_init_feature_support(struct ice_pf *pf)
+{
+ switch (pf->hw.device_id) {
+ case ICE_DEV_ID_E810C_BACKPLANE:
+ case ICE_DEV_ID_E810C_QSFP:
+ case ICE_DEV_ID_E810C_SFP:
+ ice_set_feature_support(pf, ICE_F_DSCP);
+ if (ice_is_e810t(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_SMA_CTRL);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_vsi_update_security - update security block in VSI
+ * @vsi: pointer to VSI structure
+ * @fill: function pointer to fill ctx
+ */
+int
+ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
+{
+ struct ice_vsi_ctx ctx = { 0 };
+
+ ctx.info = vsi->info;
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ fill(&ctx);
+
+ if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
+ return -ENODEV;
+
+ vsi->info = ctx.info;
+ return 0;
+}
+
+/**
+ * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+}
+
+/**
+ * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
+ ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+}
+
+/**
+ * ice_vsi_ctx_set_allow_override - allow destination override on VSI
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+}
+
+/**
+ * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index d5a28bf0fc2c..e7f4ecbb8549 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -14,7 +14,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
@@ -51,13 +51,18 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
-#ifdef CONFIG_DCB
+void ice_vsi_delete(struct ice_vsi *vsi);
+int ice_vsi_clear(struct ice_vsi *vsi);
+
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
-#endif /* CONFIG_DCB */
+
+int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
+
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id);
+ enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch);
void ice_napi_del(struct ice_vsi *vsi);
@@ -93,9 +98,9 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
-void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
-void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
@@ -103,6 +108,7 @@ int ice_status_to_errno(enum ice_status err);
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
+void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
@@ -116,4 +122,22 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_clear_dflt_vsi(struct ice_sw *sw);
+int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
+int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
+int ice_get_link_speed_kbps(struct ice_vsi *vsi);
+int ice_get_link_speed_mbps(struct ice_vsi *vsi);
+int
+ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *));
+
+void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
+
+bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
+void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
+void ice_init_feature_support(struct ice_pf *pf);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 34e64533026a..9ba22778011d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -19,6 +19,8 @@
*/
#define CREATE_TRACE_POINTS
#include "ice_trace.h"
+#include "ice_eswitch.h"
+#include "ice_tc_lib.h"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -42,16 +44,20 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
static DEFINE_IDA(ice_aux_ida);
+DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+EXPORT_SYMBOL(ice_xdp_locking_key);
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
-static int ice_vsi_open(struct ice_vsi *vsi);
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
static void ice_vsi_release_all(struct ice_pf *pf);
+static int ice_rebuild_channels(struct ice_pf *pf);
+static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
+
bool netif_is_ice(struct net_device *dev)
{
return dev && (dev->netdev_ops == &ice_netdev_ops);
@@ -61,7 +67,7 @@ bool netif_is_ice(struct net_device *dev)
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
*/
-static u16 ice_get_tx_pending(struct ice_ring *ring)
+static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
{
u16 head, tail;
@@ -100,10 +106,15 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
hw = &vsi->back->hw;
- for (i = 0; i < vsi->num_txq; i++) {
- struct ice_ring *tx_ring = vsi->tx_rings[i];
+ ice_for_each_txq(vsi, i) {
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
+
+ if (!tx_ring)
+ continue;
+ if (ice_ring_ch_enabled(tx_ring))
+ continue;
- if (tx_ring && tx_ring->desc) {
+ if (tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
* queue.
@@ -455,17 +466,21 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
}
/**
- * ice_prepare_for_reset - prep for the core to reset
+ * ice_prepare_for_reset - prep for reset
* @pf: board private structure
+ * @reset_type: reset type requested
*
* Inform or close all dependent features in prep for reset.
*/
static void
-ice_prepare_for_reset(struct ice_pf *pf)
+ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi *vsi;
unsigned int i;
+ dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
+
/* already prepared for reset */
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
@@ -480,6 +495,38 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
+ /* release ADQ specific HW and SW resources */
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ goto skip;
+
+ /* to be on safe side, reset orig_rss_size so that normal flow
+ * of deciding rss_size can take precedence
+ */
+ vsi->orig_rss_size = 0;
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ if (reset_type == ICE_RESET_PFR) {
+ vsi->old_ena_tc = vsi->all_enatc;
+ vsi->old_numtc = vsi->all_numtc;
+ } else {
+ ice_remove_q_channels(vsi, true);
+
+ /* for other reset type, do not support channel rebuild
+ * hence reset needed info
+ */
+ vsi->old_ena_tc = 0;
+ vsi->all_enatc = 0;
+ vsi->old_numtc = 0;
+ vsi->all_numtc = 0;
+ vsi->req_txq = 0;
+ vsi->req_rxq = 0;
+ clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
+ }
+ }
+skip:
+
/* clear SW filtering DB */
ice_clear_hw_tbls(hw);
/* disable the VSIs and their queues that are not already DOWN */
@@ -499,8 +546,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
/**
* ice_do_reset - Initiate one of many types of resets
* @pf: board private structure
- * @reset_type: reset type requested
- * before this function was called.
+ * @reset_type: reset type requested before this function was called.
*/
static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
@@ -509,7 +555,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, reset_type);
/* trigger the reset */
if (ice_reset(hw, reset_type)) {
@@ -567,7 +613,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* return if no valid reset type requested */
if (reset_type == ICE_RESET_INVAL)
return;
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, reset_type);
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw)) {
@@ -624,7 +670,10 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
break;
case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
- netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
+ netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
+ else
+ netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
break;
default:
break;
@@ -1965,7 +2014,8 @@ static int ice_configure_phy(struct ice_vsi *vsi)
ice_print_topo_conflict(vsi);
- if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
+ if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
+ phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
return -EPERM;
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
@@ -2302,14 +2352,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
irq_num = pf->msix_entries[base + vector].vector;
- if (q_vector->tx.ring && q_vector->rx.ring) {
+ if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++;
- } else if (q_vector->rx.ring) {
+ } else if (q_vector->rx.rx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++);
- } else if (q_vector->tx.ring) {
+ } else if (q_vector->tx.tx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++);
} else {
@@ -2367,11 +2417,12 @@ free_q_irqs:
static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
- int i;
+ struct ice_tx_desc *tx_desc;
+ int i, j;
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
u16 xdp_q_idx = vsi->alloc_txq + i;
- struct ice_ring *xdp_ring;
+ struct ice_tx_ring *xdp_ring;
xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
@@ -2380,16 +2431,29 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->q_index = xdp_q_idx;
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
- xdp_ring->ring_active = false;
xdp_ring->vsi = vsi;
xdp_ring->netdev = NULL;
+ xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_rs = ICE_TX_THRESH - 1;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
+ xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ spin_lock_init(&xdp_ring->tx_lock);
+ for (j = 0; j < xdp_ring->count; j++) {
+ tx_desc = ICE_TX_DESC(xdp_ring, j);
+ tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
+ }
+ }
+
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key))
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ else
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
}
return 0;
@@ -2455,6 +2519,10 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (__ice_vsi_get_qs(&xdp_qs_cfg))
goto err_map_xdp;
+ if (static_key_enabled(&ice_xdp_locking_key))
+ netdev_warn(vsi->netdev,
+ "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
+
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
@@ -2468,11 +2536,11 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
q_base = vsi->num_xdp_txq - xdp_rings_rem;
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = xdp_ring;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
}
xdp_rings_rem -= xdp_rings_per_v;
}
@@ -2501,7 +2569,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
return 0;
clear_xdp_rings:
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
@@ -2509,7 +2577,7 @@ clear_xdp_rings:
err_map_xdp:
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
}
@@ -2542,25 +2610,25 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_ring *ring;
+ struct ice_tx_ring *ring;
- ice_for_each_ring(ring, q_vector->tx)
+ ice_for_each_tx_ring(ring, q_vector->tx)
if (!ring->tx_buf || !ice_ring_is_xdp(ring))
break;
/* restore the value of last node prior to XDP setup */
- q_vector->tx.ring = ring;
+ q_vector->tx.tx_ring = ring;
}
free_qmap:
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
}
mutex_unlock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
if (vsi->xdp_rings[i]->desc)
ice_free_tx_ring(vsi->xdp_rings[i]);
@@ -2571,6 +2639,9 @@ free_qmap:
devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
vsi->xdp_rings = NULL;
+ if (static_key_enabled(&ice_xdp_locking_key))
+ static_branch_dec(&ice_xdp_locking_key);
+
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
return 0;
@@ -2598,7 +2669,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
int i;
ice_for_each_rxq(vsi, i) {
- struct ice_ring *rx_ring = vsi->rx_rings[i];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
@@ -2606,6 +2677,29 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
+ * @vsi: VSI to determine the count of XDP Tx qs
+ *
+ * returns 0 if Tx qs count is higher than at least half of CPU count,
+ * -ENOMEM otherwise
+ */
+int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
+{
+ u16 avail = ice_get_avail_txq_count(vsi->back);
+ u16 cpus = num_possible_cpus();
+
+ if (avail < cpus / 2)
+ return -ENOMEM;
+
+ vsi->num_xdp_txq = min_t(u16, avail, cpus);
+
+ if (vsi->num_xdp_txq < cpus)
+ static_branch_inc(&ice_xdp_locking_key);
+
+ return 0;
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2634,10 +2728,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
if (!ice_is_xdp_ena_vsi(vsi) && prog) {
- vsi->num_xdp_txq = vsi->alloc_rxq;
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
+ if (xdp_ring_err) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ } else {
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ }
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
@@ -3103,6 +3201,9 @@ static void ice_set_netdev_features(struct net_device *netdev)
/* enable features */
netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_HW_TC;
+
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
tso_features;
@@ -3139,7 +3240,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF) {
SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(netdev, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
}
@@ -3182,7 +3283,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
+}
+
+static struct ice_vsi *
+ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+ struct ice_channel *ch)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
}
/**
@@ -3196,7 +3304,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
}
/**
@@ -3210,7 +3318,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
}
/**
@@ -3303,6 +3411,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
if (!vsi)
return -ENOMEM;
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
status = ice_cfg_netdev(vsi);
if (status) {
status = -ENODEV;
@@ -3538,6 +3649,13 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= needed;
}
+ /* reserve for switchdev */
+ needed = ICE_ESWITCH_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ v_budget += needed;
+ v_left -= needed;
+
/* total used for non-traffic vectors */
v_other = v_budget;
@@ -4170,11 +4288,11 @@ static int ice_register_netdev(struct ice_pf *pf)
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- err = ice_devlink_create_port(vsi);
+ err = ice_devlink_create_pf_port(pf);
if (err)
goto err_devlink_create;
- devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
+ devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
return 0;
err_devlink_create:
@@ -4224,6 +4342,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (!pf)
return -ENOMEM;
+ /* initialize Auxiliary index to invalid value */
+ pf->aux_idx = -1;
+
/* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
@@ -4258,8 +4379,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
- ice_devlink_register(pf);
-
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
@@ -4272,6 +4391,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_exit_unroll;
}
+ ice_init_feature_support(pf);
+
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
@@ -4493,6 +4614,7 @@ probe_done:
dev_warn(dev, "RDMA is not supported on this device\n");
}
+ ice_devlink_register(pf);
return 0;
err_init_aux_unroll:
@@ -4516,7 +4638,6 @@ err_init_pf_unroll:
ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
err_exit_unroll:
- ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return err;
@@ -4593,9 +4714,7 @@ static void ice_remove(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
int i;
- if (!pf)
- return;
-
+ ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
break;
@@ -4611,7 +4730,8 @@ static void ice_remove(struct pci_dev *pdev)
ice_aq_cancel_waiting_tasks(pf);
ice_unplug_aux_dev(pf);
- ida_free(&ice_aux_ida, pf->aux_idx);
+ if (pf->aux_idx >= 0)
+ ida_free(&ice_aux_ida, pf->aux_idx);
set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
@@ -4632,7 +4752,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
- ice_devlink_unregister(pf);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
@@ -4894,7 +5013,7 @@ ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
set_bit(ICE_PFR_REQ, pf->state);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, ICE_RESET_PFR);
}
}
@@ -4986,7 +5105,7 @@ static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
set_bit(ICE_PFR_REQ, pf->state);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, ICE_RESET_PFR);
}
}
}
@@ -5012,6 +5131,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
@@ -5140,10 +5261,16 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
+ if (ice_chnl_dmac_fltr_cnt(pf)) {
+ netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
+ mac);
+ return -EAGAIN;
+ }
+
netif_addr_lock_bh(netdev);
ether_addr_copy(old_mac, netdev->dev_addr);
/* change the netdev's MAC address */
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ eth_hw_addr_set(netdev, mac);
netif_addr_unlock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
@@ -5171,7 +5298,7 @@ err_update_filters:
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, old_mac);
+ eth_hw_addr_set(netdev, old_mac);
netif_addr_unlock_bh(netdev);
return err;
}
@@ -5391,6 +5518,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
ice_clear_arfs(vsi);
}
+ /* don't turn off hw_tc_offload when ADQ is already enabled */
+ if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
+ dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
+ return -EACCES;
+ }
+
+ if ((features & NETIF_F_HW_TC) &&
+ !(netdev->features & NETIF_F_HW_TC))
+ set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ else
+ clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+
return ret;
}
@@ -5440,77 +5579,59 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
}
/* THEORY OF MODERATION:
- * The below code creates custom DIM profiles for use by this driver, because
- * the ice driver hardware works differently than the hardware that DIMLIB was
+ * The ice driver hardware works differently than the hardware that DIMLIB was
* originally made for. ice hardware doesn't have packet count limits that
* can trigger an interrupt, but it *does* have interrupt rate limit support,
- * and this code adds that capability to be used by the driver when it's using
- * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
- * for how to "respond" to traffic and interrupts, so this driver uses a
- * slightly different set of moderation parameters to get best performance.
+ * which is hard-coded to a limit of 250,000 ints/second.
+ * If not using dynamic moderation, the INTRL value can be modified
+ * by ethtool rx-usecs-high.
*/
struct ice_dim {
/* the throttle rate for interrupts, basically worst case delay before
* an initial interrupt fires, value is stored in microseconds.
*/
u16 itr;
- /* the rate limit for interrupts, which can cap a delay from a small
- * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
- * could yield as much as 500,000 interrupts per second, but with a
- * 10us rate limit, it limits to 100,000 interrupts per second. Value
- * is stored in microseconds.
- */
- u16 intrl;
};
/* Make a different profile for Rx that doesn't allow quite so aggressive
- * moderation at the high end (it maxes out at 128us or about 8k interrupts a
- * second. The INTRL/rate parameters here are only useful to cap small ITR
- * values, which is why for larger ITR's - like 128, which can only generate
- * 8k interrupts per second, there is no point to rate limit and the values
- * are set to zero. The rate limit values do affect latency, and so must
- * be reasonably small so to not impact latency sensitive tests.
+ * moderation at the high end (it maxes out at 126us or about 8k interrupts a
+ * second.
*/
static const struct ice_dim rx_profile[] = {
- {2, 10},
- {8, 16},
- {32, 0},
- {96, 0},
- {128, 0}
+ {2}, /* 500,000 ints/s, capped at 250K by INTRL */
+ {8}, /* 125,000 ints/s */
+ {16}, /* 62,500 ints/s */
+ {62}, /* 16,129 ints/s */
+ {126} /* 7,936 ints/s */
};
/* The transmit profile, which has the same sorts of values
* as the previous struct
*/
static const struct ice_dim tx_profile[] = {
- {2, 10},
- {8, 16},
- {64, 0},
- {128, 0},
- {256, 0}
+ {2}, /* 500,000 ints/s, capped at 250K by INTRL */
+ {8}, /* 125,000 ints/s */
+ {40}, /* 16,125 ints/s */
+ {128}, /* 7,812 ints/s */
+ {256} /* 3,906 ints/s */
};
static void ice_tx_dim_work(struct work_struct *work)
{
struct ice_ring_container *rc;
- struct ice_q_vector *q_vector;
struct dim *dim;
- u16 itr, intrl;
+ u16 itr;
dim = container_of(work, struct dim, work);
- rc = container_of(dim, struct ice_ring_container, dim);
- q_vector = container_of(rc, struct ice_q_vector, tx);
+ rc = (struct ice_ring_container *)dim->priv;
- if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
- dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
+ WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
/* look up the values in our local table */
itr = tx_profile[dim->profile_ix].itr;
- intrl = tx_profile[dim->profile_ix].intrl;
- ice_trace(tx_dim_work, q_vector, dim);
+ ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
ice_write_itr(rc, itr);
- ice_write_intrl(q_vector, intrl);
dim->state = DIM_START_MEASURE;
}
@@ -5518,28 +5639,65 @@ static void ice_tx_dim_work(struct work_struct *work)
static void ice_rx_dim_work(struct work_struct *work)
{
struct ice_ring_container *rc;
- struct ice_q_vector *q_vector;
struct dim *dim;
- u16 itr, intrl;
+ u16 itr;
dim = container_of(work, struct dim, work);
- rc = container_of(dim, struct ice_ring_container, dim);
- q_vector = container_of(rc, struct ice_q_vector, rx);
+ rc = (struct ice_ring_container *)dim->priv;
- if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
- dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
+ WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
/* look up the values in our local table */
itr = rx_profile[dim->profile_ix].itr;
- intrl = rx_profile[dim->profile_ix].intrl;
- ice_trace(rx_dim_work, q_vector, dim);
+ ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
ice_write_itr(rc, itr);
- ice_write_intrl(q_vector, intrl);
dim->state = DIM_START_MEASURE;
}
+#define ICE_DIM_DEFAULT_PROFILE_IX 1
+
+/**
+ * ice_init_moderation - set up interrupt moderation
+ * @q_vector: the vector containing rings to be configured
+ *
+ * Set up interrupt moderation registers, with the intent to do the right thing
+ * when called from reset or from probe, and whether or not dynamic moderation
+ * is enabled or not. Take special care to write all the registers in both
+ * dynamic moderation mode or not in order to make sure hardware is in a known
+ * state.
+ */
+static void ice_init_moderation(struct ice_q_vector *q_vector)
+{
+ struct ice_ring_container *rc;
+ bool tx_dynamic, rx_dynamic;
+
+ rc = &q_vector->tx;
+ INIT_WORK(&rc->dim.work, ice_tx_dim_work);
+ rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
+ rc->dim.priv = rc;
+ tx_dynamic = ITR_IS_DYNAMIC(rc);
+
+ /* set the initial TX ITR to match the above */
+ ice_write_itr(rc, tx_dynamic ?
+ tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
+
+ rc = &q_vector->rx;
+ INIT_WORK(&rc->dim.work, ice_rx_dim_work);
+ rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
+ rc->dim.priv = rc;
+ rx_dynamic = ITR_IS_DYNAMIC(rc);
+
+ /* set the initial RX ITR to match the above */
+ ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
+ rc->itr_setting);
+
+ ice_set_q_vector_intrl(q_vector);
+}
+
/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @vsi: the VSI being configured
@@ -5554,13 +5712,9 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
- q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-
- INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
- q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ ice_init_moderation(q_vector);
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
napi_enable(&q_vector->napi);
}
}
@@ -5620,7 +5774,8 @@ int ice_up(struct ice_vsi *vsi)
/**
* ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
- * @ring: Tx or Rx ring to read stats from
+ * @syncp: pointer to u64_stats_sync
+ * @stats: stats that pkts and bytes count will be taken from
* @pkts: packets stats counter
* @bytes: bytes stats counter
*
@@ -5628,19 +5783,16 @@ int ice_up(struct ice_vsi *vsi)
* that needs to be performed to read u64 values in 32 bit machine.
*/
static void
-ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
+ u64 *pkts, u64 *bytes)
{
unsigned int start;
- *pkts = 0;
- *bytes = 0;
- if (!ring)
- return;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- *pkts = ring->stats.pkts;
- *bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ start = u64_stats_fetch_begin_irq(syncp);
+ *pkts = stats.pkts;
+ *bytes = stats.bytes;
+ } while (u64_stats_fetch_retry_irq(syncp, start));
}
/**
@@ -5650,18 +5802,19 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
* @count: number of rings
*/
static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
u16 count)
{
struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
u16 i;
for (i = 0; i < count; i++) {
- struct ice_ring *ring;
- u64 pkts, bytes;
+ struct ice_tx_ring *ring;
+ u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ if (ring)
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
@@ -5700,9 +5853,9 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
- struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
+ struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
@@ -5966,7 +6119,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
napi_disable(&q_vector->napi);
cancel_work_sync(&q_vector->tx.dim.work);
@@ -5985,9 +6138,11 @@ int ice_down(struct ice_vsi *vsi)
/* Caller of this function is expected to set the
* vsi->state ICE_DOWN bit
*/
- if (vsi->netdev) {
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
+ } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ ice_eswitch_stop_all_tx_queues(vsi->back);
}
ice_vsi_dis_irq(vsi);
@@ -6049,12 +6204,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
- struct ice_ring *ring = vsi->tx_rings[i];
+ struct ice_tx_ring *ring = vsi->tx_rings[i];
if (!ring)
return -EINVAL;
- ring->netdev = vsi->netdev;
+ if (vsi->netdev)
+ ring->netdev = vsi->netdev;
err = ice_setup_tx_ring(ring);
if (err)
break;
@@ -6080,12 +6236,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
- struct ice_ring *ring = vsi->rx_rings[i];
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
if (!ring)
return -EINVAL;
- ring->netdev = vsi->netdev;
+ if (vsi->netdev)
+ ring->netdev = vsi->netdev;
err = ice_setup_rx_ring(ring);
if (err)
break;
@@ -6158,7 +6315,7 @@ err_setup_tx:
*
* Returns 0 on success, negative value on error
*/
-static int ice_vsi_open(struct ice_vsi *vsi)
+int ice_vsi_open(struct ice_vsi *vsi)
{
char int_name[ICE_INT_NAME_STR_LEN];
struct ice_pf *pf = vsi->back;
@@ -6183,14 +6340,16 @@ static int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
- if (err)
- goto err_set_qs;
+ if (vsi->type == ICE_VSI_PF) {
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
+ if (err)
+ goto err_set_qs;
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
- if (err)
- goto err_set_qs;
+ err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
+ if (err)
+ goto err_set_qs;
+ }
err = ice_up_complete(vsi);
if (err)
@@ -6225,6 +6384,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
if (!pf->vsi[i])
continue;
+ if (pf->vsi[i]->type == ICE_VSI_CHNL)
+ continue;
+
err = ice_vsi_release(pf->vsi[i]);
if (err)
dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
@@ -6429,6 +6591,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
+ if (err) {
+ dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
+ goto err_vsi_rebuild;
+ }
+
+ if (reset_type == ICE_RESET_PFR) {
+ err = ice_rebuild_channels(pf);
+ if (err) {
+ dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
+ err);
+ goto err_vsi_rebuild;
+ }
+ }
+
/* If Flow Director is active */
if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
@@ -6975,7 +7152,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_ring *tx_ring = NULL;
+ struct ice_tx_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u32 i;
@@ -6993,7 +7170,7 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++)
+ ice_for_each_txq(vsi, i)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
if (txqueue == vsi->tx_rings[i]->q_index) {
tx_ring = vsi->tx_rings[i];
@@ -7050,6 +7227,935 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
+ * ice_setup_tc_cls_flower - flower classifier offloads
+ * @np: net device to configure
+ * @filter_dev: device on which filter is added
+ * @cls_flower: offload data
+ */
+static int
+ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
+ struct net_device *filter_dev,
+ struct flow_cls_offload *cls_flower)
+{
+ struct ice_vsi *vsi = np->vsi;
+
+ if (cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return ice_del_cls_flower(vsi, cls_flower);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb - callback handler registered for TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ */
+static int
+ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_validate_mqprio_qopt - Validate TCF input parameters
+ * @vsi: Pointer to VSI
+ * @mqprio_qopt: input parameters for mqprio queue configuration
+ *
+ * This function validates MQPRIO params, such as qcount (power of 2 wherever
+ * needed), and make sure user doesn't specify qcount and BW rate limit
+ * for TCs, which are more than "num_tc"
+ */
+static int
+ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u64 sum_max_rate = 0, sum_min_rate = 0;
+ int non_power_of_2_qcount = 0;
+ struct ice_pf *pf = vsi->back;
+ int max_rss_q_cnt = 0;
+ struct device *dev;
+ int i, speed;
+ u8 num_tc;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ if (mqprio_qopt->qopt.offset[0] != 0 ||
+ mqprio_qopt->qopt.num_tc < 1 ||
+ mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+ vsi->ch_rss_size = 0;
+ num_tc = mqprio_qopt->qopt.num_tc;
+
+ for (i = 0; num_tc; i++) {
+ int qcount = mqprio_qopt->qopt.count[i];
+ u64 max_rate, min_rate, rem;
+
+ if (!qcount)
+ return -EINVAL;
+
+ if (is_power_of_2(qcount)) {
+ if (non_power_of_2_qcount &&
+ qcount > non_power_of_2_qcount) {
+ dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
+ qcount, non_power_of_2_qcount);
+ return -EINVAL;
+ }
+ if (qcount > max_rss_q_cnt)
+ max_rss_q_cnt = qcount;
+ } else {
+ if (non_power_of_2_qcount &&
+ qcount != non_power_of_2_qcount) {
+ dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
+ qcount, non_power_of_2_qcount);
+ return -EINVAL;
+ }
+ if (qcount < max_rss_q_cnt) {
+ dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
+ qcount, max_rss_q_cnt);
+ return -EINVAL;
+ }
+ max_rss_q_cnt = qcount;
+ non_power_of_2_qcount = qcount;
+ }
+
+ /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
+ * converts the bandwidth rate limit into Bytes/s when
+ * passing it down to the driver. So convert input bandwidth
+ * from Bytes/s to Kbps
+ */
+ max_rate = mqprio_qopt->max_rate[i];
+ max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
+ sum_max_rate += max_rate;
+
+ /* min_rate is minimum guaranteed rate and it can't be zero */
+ min_rate = mqprio_qopt->min_rate[i];
+ min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
+ sum_min_rate += min_rate;
+
+ if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
+ dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
+ min_rate, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
+ if (rem) {
+ dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
+ i, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
+ if (rem) {
+ dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
+ i, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ /* min_rate can't be more than max_rate, except when max_rate
+ * is zero (implies max_rate sought is max line rate). In such
+ * a case min_rate can be more than max.
+ */
+ if (max_rate && min_rate > max_rate) {
+ dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
+ min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ if (i >= mqprio_qopt->qopt.num_tc - 1)
+ break;
+ if (mqprio_qopt->qopt.offset[i + 1] !=
+ (mqprio_qopt->qopt.offset[i] + qcount))
+ return -EINVAL;
+ }
+ if (vsi->num_rxq <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+ if (vsi->num_txq <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (sum_max_rate && sum_max_rate > (u64)speed) {
+ dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
+ sum_max_rate, speed);
+ return -EINVAL;
+ }
+ if (sum_min_rate && sum_min_rate > (u64)speed) {
+ dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
+ sum_min_rate, speed);
+ return -EINVAL;
+ }
+
+ /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
+ vsi->ch_rss_size = max_rss_q_cnt;
+
+ return 0;
+}
+
+/**
+ * ice_add_channel - add a channel by adding VSI
+ * @pf: ptr to PF device
+ * @sw_id: underlying HW switching element ID
+ * @ch: ptr to channel structure
+ *
+ * Add a channel (VSI) using add_vsi and queue_map
+ */
+static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi;
+
+ if (ch->type != ICE_VSI_CHNL) {
+ dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
+ return -EINVAL;
+ }
+
+ vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
+ if (!vsi || vsi->type != ICE_VSI_CHNL) {
+ dev_err(dev, "create chnl VSI failure\n");
+ return -EINVAL;
+ }
+
+ ch->sw_id = sw_id;
+ ch->vsi_num = vsi->vsi_num;
+ ch->info.mapping_flags = vsi->info.mapping_flags;
+ ch->ch_vsi = vsi;
+ /* set the back pointer of channel for newly created VSI */
+ vsi->ch = ch;
+
+ memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
+ sizeof(vsi->info.q_mapping));
+ memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+
+ return 0;
+}
+
+/**
+ * ice_chnl_cfg_res
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Configure channel specific resources such as rings, vector.
+ */
+static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ int i;
+
+ for (i = 0; i < ch->num_txq; i++) {
+ struct ice_q_vector *tx_q_vector, *rx_q_vector;
+ struct ice_ring_container *rc;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+
+ tx_ring = vsi->tx_rings[ch->base_q + i];
+ rx_ring = vsi->rx_rings[ch->base_q + i];
+ if (!tx_ring || !rx_ring)
+ continue;
+
+ /* setup ring being channel enabled */
+ tx_ring->ch = ch;
+ rx_ring->ch = ch;
+
+ /* following code block sets up vector specific attributes */
+ tx_q_vector = tx_ring->q_vector;
+ rx_q_vector = rx_ring->q_vector;
+ if (!tx_q_vector && !rx_q_vector)
+ continue;
+
+ if (tx_q_vector) {
+ tx_q_vector->ch = ch;
+ /* setup Tx and Rx ITR setting if DIM is off */
+ rc = &tx_q_vector->tx;
+ if (!ITR_IS_DYNAMIC(rc))
+ ice_write_itr(rc, rc->itr_setting);
+ }
+ if (rx_q_vector) {
+ rx_q_vector->ch = ch;
+ /* setup Tx and Rx ITR setting if DIM is off */
+ rc = &rx_q_vector->rx;
+ if (!ITR_IS_DYNAMIC(rc))
+ ice_write_itr(rc, rc->itr_setting);
+ }
+ }
+
+ /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
+ * GLINT_ITR register would have written to perform in-context
+ * update, hence perform flush
+ */
+ if (ch->num_txq || ch->num_rxq)
+ ice_flush(&vsi->back->hw);
+}
+
+/**
+ * ice_cfg_chnl_all_res - configure channel resources
+ * @vsi: pte to main_vsi
+ * @ch: ptr to channel structure
+ *
+ * This function configures channel specific resources such as flow-director
+ * counter index, and other resources such as queues, vectors, ITR settings
+ */
+static void
+ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ /* configure channel (aka ADQ) resources such as queues, vectors,
+ * ITR settings for channel specific vectors and anything else
+ */
+ ice_chnl_cfg_res(vsi, ch);
+}
+
+/**
+ * ice_setup_hw_channel - setup new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ * @sw_id: underlying HW switching element ID
+ * @type: type of channel to be created (VMDq2/VF)
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and configures Tx rings accordingly
+ */
+static int
+ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
+ struct ice_channel *ch, u16 sw_id, u8 type)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret;
+
+ ch->base_q = vsi->next_base_q;
+ ch->type = type;
+
+ ret = ice_add_channel(pf, sw_id, ch);
+ if (ret) {
+ dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
+ return ret;
+ }
+
+ /* configure/setup ADQ specific resources */
+ ice_cfg_chnl_all_res(vsi, ch);
+
+ /* make sure to update the next_base_q so that subsequent channel's
+ * (aka ADQ) VSI queue map is correct
+ */
+ vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
+ dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
+ ch->num_rxq);
+
+ return 0;
+}
+
+/**
+ * ice_setup_channel - setup new channel using uplink element
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and uplink switching element
+ */
+static bool
+ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
+ struct ice_channel *ch)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ u16 sw_id;
+ int ret;
+
+ if (vsi->type != ICE_VSI_PF) {
+ dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
+ return false;
+ }
+
+ sw_id = pf->first_sw->sw_id;
+
+ /* create channel (VSI) */
+ ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
+ if (ret) {
+ dev_err(dev, "failed to setup hw_channel\n");
+ return false;
+ }
+ dev_dbg(dev, "successfully created channel()\n");
+
+ return ch->ch_vsi ? true : false;
+}
+
+/**
+ * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
+ * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
+ */
+static int
+ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
+{
+ int err;
+
+ err = ice_set_min_bw_limit(vsi, min_tx_rate);
+ if (err)
+ return err;
+
+ return ice_set_max_bw_limit(vsi, max_tx_rate);
+}
+
+/**
+ * ice_create_q_channel - function to create channel
+ * @vsi: VSI to be configured
+ * @ch: ptr to channel (it contains channel specific params)
+ *
+ * This function creates channel (VSI) using num_queues specified by user,
+ * reconfigs RSS if needed.
+ */
+static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+
+ if (!ch)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+ if (!ch->num_txq || !ch->num_rxq) {
+ dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
+ return -EINVAL;
+ }
+
+ if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
+ dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
+ vsi->cnt_q_avail, ch->num_txq);
+ return -EINVAL;
+ }
+
+ if (!ice_setup_channel(pf, vsi, ch)) {
+ dev_info(dev, "Failed to setup channel\n");
+ return -EINVAL;
+ }
+ /* configure BW rate limit */
+ if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
+ int ret;
+
+ ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
+ ch->min_tx_rate);
+ if (ret)
+ dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->ch_vsi->vsi_num);
+ else
+ dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->ch_vsi->vsi_num);
+ }
+
+ vsi->cnt_q_avail -= ch->num_txq;
+
+ return 0;
+}
+
+/**
+ * ice_rem_all_chnl_fltrs - removes all channel filters
+ * @pf: ptr to PF, TC-flower based filter are tracked at PF level
+ *
+ * Remove all advanced switch filters only if they are channel specific
+ * tc-flower based filter
+ */
+static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct hlist_node *node;
+
+ /* to remove all channel filters, iterate an ordered list of filters */
+ hlist_for_each_entry_safe(fltr, node,
+ &pf->tc_flower_fltr_list,
+ tc_flower_node) {
+ struct ice_rule_query_data rule;
+ int status;
+
+ /* for now process only channel specific filters */
+ if (!ice_is_chnl_fltr(fltr))
+ continue;
+
+ rule.rid = fltr->rid;
+ rule.rule_id = fltr->rule_id;
+ rule.vsi_handle = fltr->dest_id;
+ status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
+ if (status) {
+ if (status == -ENOENT)
+ dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
+ rule.rule_id);
+ else
+ dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
+ status);
+ } else if (fltr->dest_vsi) {
+ /* update advanced switch filter count */
+ if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
+ u32 flags = fltr->flags;
+
+ fltr->dest_vsi->num_chnl_fltr--;
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC))
+ pf->num_dmac_chnl_fltrs--;
+ }
+ }
+
+ hlist_del(&fltr->tc_flower_node);
+ kfree(fltr);
+ }
+}
+
+/**
+ * ice_remove_q_channels - Remove queue channels for the TCs
+ * @vsi: VSI to be configured
+ * @rem_fltr: delete advanced switch filter or not
+ *
+ * Remove queue channels for the TCs
+ */
+static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
+{
+ struct ice_channel *ch, *ch_tmp;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ /* remove all tc-flower based filter if they are channel filters only */
+ if (rem_fltr)
+ ice_rem_all_chnl_fltrs(pf);
+
+ /* perform cleanup for channels if they exist */
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ struct ice_vsi *ch_vsi;
+
+ list_del(&ch->list);
+ ch_vsi = ch->ch_vsi;
+ if (!ch_vsi) {
+ kfree(ch);
+ continue;
+ }
+
+ /* Reset queue contexts */
+ for (i = 0; i < ch->num_rxq; i++) {
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+
+ tx_ring = vsi->tx_rings[ch->base_q + i];
+ rx_ring = vsi->rx_rings[ch->base_q + i];
+ if (tx_ring) {
+ tx_ring->ch = NULL;
+ if (tx_ring->q_vector)
+ tx_ring->q_vector->ch = NULL;
+ }
+ if (rx_ring) {
+ rx_ring->ch = NULL;
+ if (rx_ring->q_vector)
+ rx_ring->q_vector->ch = NULL;
+ }
+ }
+
+ /* clear the VSI from scheduler tree */
+ ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
+
+ /* Delete VSI from FW */
+ ice_vsi_delete(ch->ch_vsi);
+
+ /* Delete VSI from PF and HW VSI arrays */
+ ice_vsi_clear(ch->ch_vsi);
+
+ /* free the channel */
+ kfree(ch);
+ }
+
+ /* clear the channel VSI map which is stored in main VSI */
+ ice_for_each_chnl_tc(i)
+ vsi->tc_map_vsi[i] = NULL;
+
+ /* reset main VSI's all TC information */
+ vsi->all_enatc = 0;
+ vsi->all_numtc = 0;
+}
+
+/**
+ * ice_rebuild_channels - rebuild channel
+ * @pf: ptr to PF
+ *
+ * Recreate channel VSIs and replay filters
+ */
+static int ice_rebuild_channels(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *main_vsi;
+ bool rem_adv_fltr = true;
+ struct ice_channel *ch;
+ struct ice_vsi *vsi;
+ int tc_idx = 1;
+ int i, err;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return 0;
+
+ if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
+ main_vsi->old_numtc == 1)
+ return 0; /* nothing to be done */
+
+ /* reconfigure main VSI based on old value of TC and cached values
+ * for MQPRIO opts
+ */
+ err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
+ if (err) {
+ dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
+ main_vsi->old_ena_tc, main_vsi->vsi_num);
+ return err;
+ }
+
+ /* rebuild ADQ VSIs */
+ ice_for_each_vsi(pf, i) {
+ enum ice_vsi_type type;
+
+ vsi = pf->vsi[i];
+ if (!vsi || vsi->type != ICE_VSI_CHNL)
+ continue;
+
+ type = vsi->type;
+
+ /* rebuild ADQ VSI */
+ err = ice_vsi_rebuild(vsi, true);
+ if (err) {
+ dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
+ ice_vsi_type_str(type), vsi->idx, err);
+ goto cleanup;
+ }
+
+ /* Re-map HW VSI number, using VSI handle that has been
+ * previously validated in ice_replay_vsi() call above
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+
+ /* replay filters for the VSI */
+ err = ice_replay_vsi(&pf->hw, vsi->idx);
+ if (err) {
+ dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
+ ice_vsi_type_str(type), err, vsi->idx);
+ rem_adv_fltr = false;
+ goto cleanup;
+ }
+ dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
+ ice_vsi_type_str(type), vsi->idx);
+
+ /* store ADQ VSI at correct TC index in main VSI's
+ * map of TC to VSI
+ */
+ main_vsi->tc_map_vsi[tc_idx++] = vsi;
+ }
+
+ /* ADQ VSI(s) has been rebuilt successfully, so setup
+ * channel for main VSI's Tx and Rx rings
+ */
+ list_for_each_entry(ch, &main_vsi->ch_list, list) {
+ struct ice_vsi *ch_vsi;
+
+ ch_vsi = ch->ch_vsi;
+ if (!ch_vsi)
+ continue;
+
+ /* reconfig channel resources */
+ ice_cfg_chnl_all_res(main_vsi, ch);
+
+ /* replay BW rate limit if it is non-zero */
+ if (!ch->max_tx_rate && !ch->min_tx_rate)
+ continue;
+
+ err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
+ ch->min_tx_rate);
+ if (err)
+ dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
+ err, ch->max_tx_rate, ch->min_tx_rate,
+ ch_vsi->vsi_num);
+ else
+ dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->min_tx_rate,
+ ch_vsi->vsi_num);
+ }
+
+ /* reconfig RSS for main VSI */
+ if (main_vsi->ch_rss_size)
+ ice_vsi_cfg_rss_lut_key(main_vsi);
+
+ return 0;
+
+cleanup:
+ ice_remove_q_channels(main_vsi, rem_adv_fltr);
+ return err;
+}
+
+/**
+ * ice_create_q_channels - Add queue channel for the given TCs
+ * @vsi: VSI to be configured
+ *
+ * Configures queue channel mapping to the given TCs
+ */
+static int ice_create_q_channels(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_channel *ch;
+ int ret = 0, i;
+
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ continue;
+
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ INIT_LIST_HEAD(&ch->list);
+ ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
+ ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
+ ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
+ ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
+ ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
+
+ /* convert to Kbits/s */
+ if (ch->max_tx_rate)
+ ch->max_tx_rate = div_u64(ch->max_tx_rate,
+ ICE_BW_KBPS_DIVISOR);
+ if (ch->min_tx_rate)
+ ch->min_tx_rate = div_u64(ch->min_tx_rate,
+ ICE_BW_KBPS_DIVISOR);
+
+ ret = ice_create_q_channel(vsi, ch);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "failed creating channel TC:%d\n", i);
+ kfree(ch);
+ goto err_free;
+ }
+ list_add_tail(&ch->list, &vsi->ch_list);
+ vsi->tc_map_vsi[i] = ch->ch_vsi;
+ dev_dbg(ice_pf_to_dev(pf),
+ "successfully created channel: VSI %pK\n", ch->ch_vsi);
+ }
+ return 0;
+
+err_free:
+ ice_remove_q_channels(vsi, false);
+
+ return ret;
+}
+
+/**
+ * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @type_data: TC offload data
+ */
+static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 mode, ena_tc_qdisc = 0;
+ int cur_txq, cur_rxq;
+ u8 hw = 0, num_tcf;
+ struct device *dev;
+ int ret, i;
+
+ dev = ice_pf_to_dev(pf);
+ num_tcf = mqprio_qopt->qopt.num_tc;
+ hw = mqprio_qopt->qopt.hw;
+ mode = mqprio_qopt->mode;
+ if (!hw) {
+ clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ vsi->ch_rss_size = 0;
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ goto config_tcf;
+ }
+
+ /* Generate queue region map for number of TCF requested */
+ for (i = 0; i < num_tcf; i++)
+ ena_tc_qdisc |= BIT(i);
+
+ switch (mode) {
+ case TC_MQPRIO_MODE_CHANNEL:
+
+ ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
+ if (ret) {
+ netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
+ ret);
+ return ret;
+ }
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ /* don't assume state of hw_tc_offload during driver load
+ * and set the flag for TC flower filter if hw_tc_offload
+ * already ON
+ */
+ if (vsi->netdev->features & NETIF_F_HW_TC)
+ set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+config_tcf:
+
+ /* Requesting same TCF configuration as already enabled */
+ if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
+ mode != TC_MQPRIO_MODE_CHANNEL)
+ return 0;
+
+ /* Pause VSI queues */
+ ice_dis_vsi(vsi, true);
+
+ if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ ice_remove_q_channels(vsi, true);
+
+ if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
+ num_online_cpus());
+ vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
+ num_online_cpus());
+ } else {
+ /* logic to rebuild VSI, same like ethtool -L */
+ u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
+
+ for (i = 0; i < num_tcf; i++) {
+ if (!(ena_tc_qdisc & BIT(i)))
+ continue;
+
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ }
+ vsi->req_txq = offset + qcount_tx;
+ vsi->req_rxq = offset + qcount_rx;
+
+ /* store away original rss_size info, so that it gets reused
+ * form ice_vsi_rebuild during tc-qdisc delete stage - to
+ * determine, what should be the rss_sizefor main VSI
+ */
+ vsi->orig_rss_size = vsi->rss_size;
+ }
+
+ /* save current values of Tx and Rx queues before calling VSI rebuild
+ * for fallback option
+ */
+ cur_txq = vsi->num_txq;
+ cur_rxq = vsi->num_rxq;
+
+ /* proceed with rebuild main VSI using correct number of queues */
+ ret = ice_vsi_rebuild(vsi, false);
+ if (ret) {
+ /* fallback to current number of queues */
+ dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
+ vsi->req_txq = cur_txq;
+ vsi->req_rxq = cur_rxq;
+ clear_bit(ICE_RESET_FAILED, pf->state);
+ if (ice_vsi_rebuild(vsi, false)) {
+ dev_err(dev, "Rebuild of main VSI failed again\n");
+ return ret;
+ }
+ }
+
+ vsi->all_numtc = num_tcf;
+ vsi->all_enatc = ena_tc_qdisc;
+ ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
+ if (ret) {
+ netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
+ vsi->vsi_num);
+ goto exit;
+ }
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
+
+ /* set TC0 rate limit if specified */
+ if (max_tx_rate || min_tx_rate) {
+ /* convert to Kbits/s */
+ if (max_tx_rate)
+ max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
+ if (min_tx_rate)
+ min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
+
+ ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
+ if (!ret) {
+ dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
+ max_tx_rate, min_tx_rate, vsi->vsi_num);
+ } else {
+ dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
+ max_tx_rate, min_tx_rate, vsi->vsi_num);
+ goto exit;
+ }
+ }
+ ret = ice_create_q_channels(vsi);
+ if (ret) {
+ netdev_err(netdev, "failed configuring queue channels\n");
+ goto exit;
+ } else {
+ netdev_dbg(netdev, "successfully configured channels\n");
+ }
+ }
+
+ if (vsi->ch_rss_size)
+ ice_vsi_cfg_rss_lut_key(vsi);
+
+exit:
+ /* if error, reset the all_numtc and all_enatc */
+ if (ret) {
+ vsi->all_numtc = 0;
+ vsi->all_enatc = 0;
+ }
+ /* resume VSI */
+ ice_ena_vsi(vsi, true);
+
+ return ret;
+}
+
+static LIST_HEAD(ice_block_cb_list);
+
+static int
+ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ int err;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &ice_block_cb_list,
+ ice_setup_tc_block_cb,
+ np, np, true);
+ case TC_SETUP_QDISC_MQPRIO:
+ /* setup traffic classifier for receive side */
+ mutex_lock(&pf->tc_mutex);
+ err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return -EOPNOTSUPP;
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -7235,6 +8341,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_open = ice_open,
.ndo_stop = ice_stop,
.ndo_start_xmit = ice_start_xmit,
+ .ndo_select_queue = ice_select_queue,
.ndo_features_check = ice_features_check,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
@@ -7250,8 +8357,10 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_set_vf_vlan = ice_set_vf_port_vlan,
.ndo_set_vf_link_state = ice_set_vf_link_state,
.ndo_get_vf_stats = ice_get_vf_stats,
+ .ndo_set_vf_rate = ice_set_vf_bw,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_setup_tc = ice_setup_tc,
.ndo_set_features = ice_set_features,
.ndo_bridge_getlink = ice_bridge_getlink,
.ndo_bridge_setlink = ice_bridge_setlink,
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 199aa5b71540..0b220dfa7457 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -3,6 +3,44 @@
#ifndef _ICE_PROTOCOL_TYPE_H_
#define _ICE_PROTOCOL_TYPE_H_
+#define ICE_IPV6_ADDR_LENGTH 16
+
+/* Each recipe can match up to 5 different fields. Fields to match can be meta-
+ * data, values extracted from packet headers, or results from other recipes.
+ * One of the 5 fields is reserved for matching the switch ID. So, up to 4
+ * recipes can provide intermediate results to another one through chaining,
+ * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
+ */
+#define ICE_NUM_WORDS_RECIPE 4
+
+/* Max recipes that can be chained */
+#define ICE_MAX_CHAIN_RECIPE 5
+
+/* 1 word reserved for switch ID from allowed 5 words.
+ * So a recipe can have max 4 words. And you can chain 5 such recipes
+ * together. So maximum words that can be programmed for look up is 5 * 4.
+ */
+#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
+
+/* Field vector index corresponding to chaining */
+#define ICE_CHAIN_FV_INDEX_START 47
+
+enum ice_protocol_type {
+ ICE_MAC_OFOS = 0,
+ ICE_MAC_IL,
+ ICE_ETYPE_OL,
+ ICE_VLAN_OFOS,
+ ICE_IPV4_OFOS,
+ ICE_IPV4_IL,
+ ICE_IPV6_OFOS,
+ ICE_IPV6_IL,
+ ICE_TCP_IL,
+ ICE_UDP_OF,
+ ICE_UDP_ILOS,
+ ICE_SCTP_IL,
+ ICE_PROTOCOL_LAST
+};
+
/* Decoders for ice_prot_id:
* - F: First
* - I: Inner
@@ -35,4 +73,135 @@ enum ice_prot_id {
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
+
+#define ICE_MAC_OFOS_HW 1
+#define ICE_MAC_IL_HW 4
+#define ICE_ETYPE_OL_HW 9
+#define ICE_VLAN_OF_HW 16
+#define ICE_VLAN_OL_HW 17
+#define ICE_IPV4_OFOS_HW 32
+#define ICE_IPV4_IL_HW 33
+#define ICE_IPV6_OFOS_HW 40
+#define ICE_IPV6_IL_HW 41
+#define ICE_TCP_IL_HW 49
+#define ICE_UDP_ILOS_HW 53
+
+#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
+
+#define ICE_TUN_FLAG_FV_IND 2
+
+/* Mapping of software defined protocol ID to hardware defined protocol ID */
+struct ice_protocol_entry {
+ enum ice_protocol_type type;
+ u8 protocol_id;
+};
+
+struct ice_ether_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+};
+
+struct ice_ethtype_hdr {
+ __be16 ethtype_id;
+};
+
+struct ice_ether_vlan_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 vlan_id;
+};
+
+struct ice_vlan_hdr {
+ __be16 type;
+ __be16 vlan;
+};
+
+struct ice_ipv4_hdr {
+ u8 version;
+ u8 tos;
+ __be16 total_length;
+ __be16 id;
+ __be16 frag_off;
+ u8 time_to_live;
+ u8 protocol;
+ __be16 check;
+ __be32 src_addr;
+ __be32 dst_addr;
+};
+
+struct ice_ipv6_hdr {
+ __be32 be_ver_tc_flow;
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 hop_limit;
+ u8 src_addr[ICE_IPV6_ADDR_LENGTH];
+ u8 dst_addr[ICE_IPV6_ADDR_LENGTH];
+};
+
+struct ice_sctp_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 verification_tag;
+ __be32 check;
+};
+
+struct ice_l4_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 len;
+ __be16 check;
+};
+
+union ice_prot_hdr {
+ struct ice_ether_hdr eth_hdr;
+ struct ice_ethtype_hdr ethertype;
+ struct ice_vlan_hdr vlan_hdr;
+ struct ice_ipv4_hdr ipv4_hdr;
+ struct ice_ipv6_hdr ipv6_hdr;
+ struct ice_l4_hdr l4_hdr;
+ struct ice_sctp_hdr sctp_hdr;
+};
+
+/* This is mapping table entry that maps every word within a given protocol
+ * structure to the real byte offset as per the specification of that
+ * protocol header.
+ * for e.g. dst address is 3 words in ethertype header and corresponding bytes
+ * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
+ */
+struct ice_prot_ext_tbl_entry {
+ enum ice_protocol_type prot_type;
+ /* Byte offset into header of given protocol type */
+ u8 offs[sizeof(union ice_prot_hdr)];
+};
+
+/* Extractions to be looked up for a given recipe */
+struct ice_prot_lkup_ext {
+ u16 prot_type;
+ u8 n_val_words;
+ /* create a buffer to hold max words per recipe */
+ u16 field_off[ICE_MAX_CHAIN_WORDS];
+ u16 field_mask[ICE_MAX_CHAIN_WORDS];
+
+ struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
+
+ /* Indicate field offsets that have field vector indices assigned */
+ DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS);
+};
+
+struct ice_pref_recipe_group {
+ u8 n_val_pairs; /* Number of valid pairs */
+ struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
+ u16 mask[ICE_NUM_WORDS_RECIPE];
+};
+
+struct ice_recp_grp_entry {
+ struct list_head l_entry;
+
+#define ICE_INVAL_CHAIN_IND 0xFF
+ u16 rid;
+ u8 chain_idx;
+ u16 fv_idx[ICE_NUM_WORDS_RECIPE];
+ u16 fv_mask[ICE_NUM_WORDS_RECIPE];
+ struct ice_pref_recipe_group r_group;
+};
#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 05cc5870e4ef..a1be0d04a2d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -6,6 +6,252 @@
#define E810_OUT_PROP_DELAY_NS 1
+static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
+ /* name idx func chan */
+ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
+ { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
+ { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
+ { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
+ { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+};
+
+/**
+ * ice_get_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Read the configuration of the SMA control logic and put it into the
+ * ptp_pin_desc structure
+ */
+static int
+ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
+{
+ u8 data, i;
+ int status;
+
+ /* Read initial pin state */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* initialize with defaults */
+ for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
+ snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
+ "%s", ice_pin_desc_e810t[i].name);
+ ptp_pins[i].index = ice_pin_desc_e810t[i].index;
+ ptp_pins[i].func = ice_pin_desc_e810t[i].func;
+ ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
+ }
+
+ /* Parse SMA1/UFL1 */
+ switch (data & ICE_SMA1_MASK_E810T) {
+ case ICE_SMA1_MASK_E810T:
+ default:
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_DIR_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_PEROUT;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_TX_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case 0:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_PEROUT;
+ break;
+ }
+
+ /* Parse SMA2/UFL2 */
+ switch (data & ICE_SMA2_MASK_E810T) {
+ case ICE_SMA2_MASK_E810T:
+ default:
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_EXTTS;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ case ICE_SMA2_DIR_EN_E810T:
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Set the configuration of the SMA control logic based on the configuration in
+ * num_pins parameter
+ */
+static int
+ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
+ const struct ptp_pin_desc *ptp_pins)
+{
+ int status;
+ u8 data;
+
+ /* SMA1 and UFL1 cannot be set to TX at the same time */
+ if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT)
+ return -EINVAL;
+
+ /* SMA2 and UFL2 cannot be set to RX at the same time */
+ if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS)
+ return -EINVAL;
+
+ /* Read initial pin state value */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* Set the right sate based on the desired configuration */
+ data &= ~ICE_SMA1_MASK_E810T;
+ if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
+ data |= ICE_SMA1_MASK_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX");
+ data |= ICE_SMA1_TX_EN_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ /* U.FL 1 TX will always enable SMA 1 RX */
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 TX");
+ data |= ICE_SMA1_DIR_EN_E810T;
+ }
+
+ data &= ~ICE_SMA2_MASK_E810T;
+ if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
+ data |= ICE_SMA2_MASK_E810T;
+ } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 RX");
+ data |= (ICE_SMA2_TX_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "UFL2 RX");
+ data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX");
+ data |= (ICE_SMA2_DIR_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
+ data |= ICE_SMA2_DIR_EN_E810T;
+ }
+
+ return ice_write_sma_ctrl_e810t(hw, data);
+}
+
+/**
+ * ice_ptp_set_sma_e810t
+ * @info: the driver's PTP info structure
+ * @pin: pin index in kernel structure
+ * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
+ *
+ * Set the configuration of a single SMA pin
+ */
+static int
+ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func)
+{
+ struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (pin < SMA1 || func > PTP_PF_PEROUT)
+ return -EOPNOTSUPP;
+
+ err = ice_get_sma_config_e810t(hw, ptp_pins);
+ if (err)
+ return err;
+
+ /* Disable the same function on the other pin sharing the channel */
+ if (pin == SMA1 && ptp_pins[UFL1].func == func)
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ if (pin == UFL1 && ptp_pins[SMA1].func == func)
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+
+ if (pin == SMA2 && ptp_pins[UFL2].func == func)
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ if (pin == UFL2 && ptp_pins[SMA2].func == func)
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+
+ /* Set up new pin function in the temp table */
+ ptp_pins[pin].func = func;
+
+ return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
+}
+
+/**
+ * ice_verify_pin_e810t
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Verify if pin supports requested pin function. If the Check pins consistency.
+ * Reconfigure the SMA logic attached to the given pin to enable its
+ * desired functionality
+ */
+static int
+ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ /* Don't allow channel reassignment */
+ if (chan != ice_pin_desc_e810t[pin].chan)
+ return -EOPNOTSUPP;
+
+ /* Check if functions are properly assigned */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (pin == UFL1)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PEROUT:
+ if (pin == UFL2 || pin == GNSS)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+
+ return ice_ptp_set_sma_e810t(info, pin, func);
+}
+
/**
* ice_set_tx_tstamp - Enable or disable Tx timestamping
* @pf: The PF pointer to search in
@@ -735,17 +981,34 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_perout_channel clk_cfg = {0};
+ bool sma_pres = false;
unsigned int chan;
u32 gpio_pin;
int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ sma_pres = true;
+
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
chan = rq->perout.index;
- if (chan == PPS_CLK_GEN_CHAN)
+ if (sma_pres) {
+ if (chan == ice_pin_desc_e810t[SMA1].chan)
+ clk_cfg.gpio_pin = GPIO_20;
+ else if (chan == ice_pin_desc_e810t[SMA2].chan)
+ clk_cfg.gpio_pin = GPIO_22;
+ else
+ return -1;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ clk_cfg.gpio_pin = GPIO_20;
+ else
+ clk_cfg.gpio_pin = GPIO_22;
+ } else if (chan == PPS_CLK_GEN_CHAN) {
clk_cfg.gpio_pin = PPS_PIN_INDEX;
- else
+ } else {
clk_cfg.gpio_pin = chan;
+ }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
rq->perout.period.nsec);
@@ -757,7 +1020,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
break;
case PTP_CLK_REQ_EXTTS:
chan = rq->extts.index;
- gpio_pin = chan;
+ if (sma_pres) {
+ if (chan < ice_pin_desc_e810t[SMA2].chan)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else {
+ gpio_pin = chan;
+ }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
rq->extts.flags);
@@ -1012,7 +1287,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
* The timestamp is in ns, so we must convert the result first.
*/
void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
u32 ts_high;
@@ -1038,13 +1313,93 @@ ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
}
/**
+ * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Disable the OS access to the SMA pins. Called to clear out the OS
+ * indications of pin support when we fail to setup the E810-T SMA control
+ * register.
+ */
+static void
+ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
+
+ info->enable = NULL;
+ info->verify = NULL;
+ info->n_pins = 0;
+ info->n_ext_ts = 0;
+ info->n_per_out = 0;
+}
+
+/**
+ * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Finish setting up the SMA pins by allocating pin_config, and setting it up
+ * according to the current status of the SMA. On failure, disable all of the
+ * extended SMA pin support.
+ */
+static void
+ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Allocate memory for kernel pins interface */
+ info->pin_config = devm_kcalloc(dev, info->n_pins,
+ sizeof(*info->pin_config), GFP_KERNEL);
+ if (!info->pin_config) {
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+ return;
+ }
+
+ /* Read current SMA status */
+ err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
+ if (err)
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+}
+
+/**
+ * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
+ * @info: PTP clock capabilities
+ */
+static void
+ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ /* Check if SMA controller is in the netlist */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
+ !ice_is_pca9575_present(&pf->hw))
+ ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
+
+ if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
+ info->n_per_out = N_PER_OUT_E810T_NO_SMA;
+ return;
+ }
+
+ info->n_per_out = N_PER_OUT_E810T;
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+}
+
+/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @info: PTP clock capabilities
*/
static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
{
- info->n_per_out = E810_N_PER_OUT;
- info->n_ext_ts = E810_N_EXT_TS;
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
/**
@@ -1062,7 +1417,10 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info);
+ if (ice_is_e810t(&pf->hw))
+ ice_ptp_setup_pins_e810t(pf, info);
+ else
+ ice_ptp_setup_pins_e810(info);
}
/**
@@ -1313,22 +1671,21 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
u8 idx;
- spin_lock(&tx->lock);
-
for (idx = 0; idx < tx->len; idx++) {
u8 phy_idx = idx + tx->quad_offset;
- /* Clear any potential residual timestamp in the PHY block */
- if (!pf->hw.reset_ongoing)
- ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
-
+ spin_lock(&tx->lock);
if (tx->tstamps[idx].skb) {
dev_kfree_skb_any(tx->tstamps[idx].skb);
tx->tstamps[idx].skb = NULL;
}
- }
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
- spin_unlock(&tx->lock);
+ /* Clear any potential residual timestamp in the PHY block */
+ if (!pf->hw.reset_ongoing)
+ ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index e1c787bd5b96..f71ad317d6c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -9,12 +9,21 @@
#include "ice_ptp_hw.h"
-enum ice_ptp_pin {
+enum ice_ptp_pin_e810 {
GPIO_20 = 0,
GPIO_21,
GPIO_22,
GPIO_23,
- NUM_ICE_PTP_PIN
+ NUM_PTP_PIN_E810
+};
+
+enum ice_ptp_pin_e810t {
+ GNSS = 0,
+ SMA1,
+ UFL1,
+ SMA2,
+ UFL2,
+ NUM_PTP_PINS_E810T
};
struct ice_perout_channel {
@@ -155,8 +164,11 @@ struct ice_ptp {
#define PPS_CLK_SRC_CHAN 2
#define PPS_PIN_INDEX 5
#define TIME_SYNC_PIN_INDEX 4
-#define E810_N_EXT_TS 3
-#define E810_N_PER_OUT 4
+#define N_EXT_TS_E810 3
+#define N_PER_OUT_E810 4
+#define N_PER_OUT_E810T 3
+#define N_PER_OUT_E810T_NO_SMA 2
+#define N_EXT_TS_E810_NO_SMA 2
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf;
@@ -168,7 +180,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_process_ts(struct ice_pf *pf);
void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
@@ -196,7 +208,7 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 3eca0e4eab0b..29f947c0cd2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -649,3 +649,154 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
return ice_clear_phy_tstamp_e810(hw, block, idx);
}
+
+/* E810T SMA functions
+ *
+ * The following functions operate specifically on E810T hardware and are used
+ * to access the extended GPIOs available.
+ */
+
+/**
+ * ice_get_pca9575_handle
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value
+ */
+static int
+ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+ /* If handle was not detected read it from the netlist */
+ cmd = &desc.params.get_link_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ /* Set node type to GPIO controller */
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -EOPNOTSUPP;
+
+ cmd->addr.topo_params.index = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (status)
+ return -EOPNOTSUPP;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -EOPNOTSUPP;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
+ * PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ *data = 0;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ &pin, NULL);
+ if (status)
+ break;
+ *data |= (u8)(!pin) << i;
+ }
+
+ return status;
+}
+
+/**
+ * ice_write_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: data to be written to the GPIO controller
+ *
+ * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
+ * of the PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ pin = !(data & (1 << i));
+ status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ pin, NULL);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_is_pca9575_present
+ * @hw: pointer to the hw struct
+ *
+ * Check if the SW IO expander is present in the netlist
+ */
+bool ice_is_pca9575_present(struct ice_hw *hw)
+{
+ u16 handle = 0;
+ int status;
+
+ if (!ice_is_e810t(hw))
+ return false;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+
+ return !status && handle;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 55a414e87018..b2984b5c22c1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -30,6 +30,9 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
+bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
@@ -76,4 +79,23 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw);
#define LOW_TX_MEMORY_BANK_START 0x03090000
#define HIGH_TX_MEMORY_BANK_START 0x03090004
+/* E810T SMA controller pin control */
+#define ICE_SMA1_DIR_EN_E810T BIT(4)
+#define ICE_SMA1_TX_EN_E810T BIT(5)
+#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3)
+#define ICE_SMA2_DIR_EN_E810T BIT(6)
+#define ICE_SMA2_TX_EN_E810T BIT(7)
+
+#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \
+ ICE_SMA1_TX_EN_E810T)
+#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \
+ ICE_SMA2_DIR_EN_E810T | \
+ ICE_SMA2_TX_EN_E810T)
+#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \
+ ICE_SMA2_MASK_E810T)
+
+#define ICE_SMA_MIN_BIT_E810T 3
+#define ICE_SMA_MAX_BIT_E810T 7
+#define ICE_PCA9575_P1_OFFSET 8
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
new file mode 100644
index 000000000000..c49eeea7cb67
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_eswitch.h"
+#include "ice_devlink.h"
+#include "ice_virtchnl_pf.h"
+#include "ice_tc_lib.h"
+
+/**
+ * ice_repr_get_sw_port_id - get port ID associated with representor
+ * @repr: pointer to port representor
+ */
+static int ice_repr_get_sw_port_id(struct ice_repr *repr)
+{
+ return repr->vf->pf->hw.port_info->lport;
+}
+
+/**
+ * ice_repr_get_phys_port_name - get phys port name
+ * @netdev: pointer to port representor netdev
+ * @buf: write here port name
+ * @len: max length of buf
+ */
+static int
+ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_repr *repr = np->repr;
+ int res;
+
+ /* Devlink port is registered and devlink core is taking care of name formatting. */
+ if (repr->vf->devlink_port.devlink)
+ return -EOPNOTSUPP;
+
+ res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
+ repr->vf->vf_id);
+ if (res <= 0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+/**
+ * ice_repr_get_stats64 - get VF stats for VFPR use
+ * @netdev: pointer to port representor netdev
+ * @stats: pointer to struct where stats can be stored
+ */
+static void
+ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_eth_stats *eth_stats;
+ struct ice_vsi *vsi;
+
+ if (ice_is_vf_disabled(np->repr->vf))
+ return;
+ vsi = np->repr->src_vsi;
+
+ ice_update_vsi_stats(vsi);
+ eth_stats = &vsi->eth_stats;
+
+ stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
+ eth_stats->tx_multicast;
+ stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
+ eth_stats->rx_multicast;
+ stats->tx_bytes = eth_stats->tx_bytes;
+ stats->rx_bytes = eth_stats->rx_bytes;
+ stats->multicast = eth_stats->rx_multicast;
+ stats->tx_errors = eth_stats->tx_errors;
+ stats->tx_dropped = eth_stats->tx_discards;
+ stats->rx_dropped = eth_stats->rx_discards;
+}
+
+/**
+ * ice_netdev_to_repr - Get port representor for given netdevice
+ * @netdev: pointer to port representor netdev
+ */
+struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ return np->repr;
+}
+
+/**
+ * ice_repr_open - Enable port representor's network interface
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a port representor's network
+ * interface is made active by the system (IFF_UP). Corresponding
+ * VF is notified about link status change.
+ *
+ * Returns 0 on success
+ */
+static int ice_repr_open(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_vf *vf;
+
+ vf = repr->vf;
+ vf->link_forced = true;
+ vf->link_up = true;
+ ice_vc_notify_vf_link_state(vf);
+
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
+/**
+ * ice_repr_stop - Disable port representor's network interface
+ * @netdev: network interface device structure
+ *
+ * The stop entry point is called when a port representor's network
+ * interface is de-activated by the system. Corresponding
+ * VF is notified about link status change.
+ *
+ * Returns 0 on success
+ */
+static int ice_repr_stop(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_vf *vf;
+
+ vf = repr->vf;
+ vf->link_forced = true;
+ vf->link_up = false;
+ ice_vc_notify_vf_link_state(vf);
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
+static struct devlink_port *
+ice_repr_get_devlink_port(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ return &repr->vf->devlink_port;
+}
+
+static int
+ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
+ struct flow_cls_offload *flower)
+{
+ switch (flower->command) {
+ case FLOW_CLS_REPLACE:
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ case FLOW_CLS_DESTROY:
+ return ice_del_cls_flower(repr->src_vsi, flower);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
+ struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_repr_setup_tc_cls_flower(np->repr, flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(ice_repr_block_cb_list);
+
+static int
+ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple((struct flow_block_offload *)
+ type_data,
+ &ice_repr_block_cb_list,
+ ice_repr_setup_tc_block_cb,
+ np, np, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops ice_repr_netdev_ops = {
+ .ndo_get_phys_port_name = ice_repr_get_phys_port_name,
+ .ndo_get_stats64 = ice_repr_get_stats64,
+ .ndo_open = ice_repr_open,
+ .ndo_stop = ice_repr_stop,
+ .ndo_start_xmit = ice_eswitch_port_start_xmit,
+ .ndo_get_devlink_port = ice_repr_get_devlink_port,
+ .ndo_setup_tc = ice_repr_setup_tc,
+};
+
+/**
+ * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
+ * @netdev: pointer to netdev
+ */
+bool ice_is_port_repr_netdev(struct net_device *netdev)
+{
+ return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
+}
+
+/**
+ * ice_repr_reg_netdev - register port representor netdev
+ * @netdev: pointer to port representor netdev
+ */
+static int
+ice_repr_reg_netdev(struct net_device *netdev)
+{
+ eth_hw_addr_random(netdev);
+ netdev->netdev_ops = &ice_repr_netdev_ops;
+ ice_set_ethtool_repr_ops(netdev);
+
+ netdev->hw_features |= NETIF_F_HW_TC;
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return register_netdev(netdev);
+}
+
+/**
+ * ice_repr_add - add representor for VF
+ * @vf: pointer to VF structure
+ */
+static int ice_repr_add(struct ice_vf *vf)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_netdev_priv *np;
+ struct ice_repr *repr;
+ int err;
+
+ repr = kzalloc(sizeof(*repr), GFP_KERNEL);
+ if (!repr)
+ return -ENOMEM;
+
+ repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
+ if (!repr->netdev) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ repr->src_vsi = ice_get_vf_vsi(vf);
+ repr->vf = vf;
+ vf->repr = repr;
+ np = netdev_priv(repr->netdev);
+ np->repr = repr;
+
+ q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
+ if (!q_vector) {
+ err = -ENOMEM;
+ goto err_alloc_q_vector;
+ }
+ repr->q_vector = q_vector;
+
+ err = ice_devlink_create_vf_port(vf);
+ if (err)
+ goto err_devlink;
+
+ err = ice_repr_reg_netdev(repr->netdev);
+ if (err)
+ goto err_netdev;
+
+ devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
+
+ return 0;
+
+err_netdev:
+ ice_devlink_destroy_vf_port(vf);
+err_devlink:
+ kfree(repr->q_vector);
+ vf->repr->q_vector = NULL;
+err_alloc_q_vector:
+ free_netdev(repr->netdev);
+ repr->netdev = NULL;
+err_alloc:
+ kfree(repr);
+ vf->repr = NULL;
+ return err;
+}
+
+/**
+ * ice_repr_rem - remove representor from VF
+ * @vf: pointer to VF structure
+ */
+static void ice_repr_rem(struct ice_vf *vf)
+{
+ ice_devlink_destroy_vf_port(vf);
+ kfree(vf->repr->q_vector);
+ vf->repr->q_vector = NULL;
+ unregister_netdev(vf->repr->netdev);
+ free_netdev(vf->repr->netdev);
+ vf->repr->netdev = NULL;
+ kfree(vf->repr);
+ vf->repr = NULL;
+}
+
+/**
+ * ice_repr_add_for_all_vfs - add port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+int ice_repr_add_for_all_vfs(struct ice_pf *pf)
+{
+ int err;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ err = ice_repr_add(vf);
+ if (err)
+ goto err;
+
+ ice_vc_change_ops_to_repr(&vf->vc_ops);
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_repr_rem(vf);
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+ }
+
+ return err;
+}
+
+/**
+ * ice_repr_rem_from_all_vfs - remove port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_repr_rem(vf);
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+ }
+}
+
+/**
+ * ice_repr_start_tx_queues - start Tx queues of port representor
+ * @repr: pointer to repr structure
+ */
+void ice_repr_start_tx_queues(struct ice_repr *repr)
+{
+ netif_carrier_on(repr->netdev);
+ netif_tx_start_all_queues(repr->netdev);
+}
+
+/**
+ * ice_repr_stop_tx_queues - stop Tx queues of port representor
+ * @repr: pointer to repr structure
+ */
+void ice_repr_stop_tx_queues(struct ice_repr *repr)
+{
+ netif_carrier_off(repr->netdev);
+ netif_tx_stop_all_queues(repr->netdev);
+}
+
+/**
+ * ice_repr_set_traffic_vsi - set traffic VSI for port representor
+ * @repr: repr on with VSI will be set
+ * @vsi: pointer to VSI that will be used by port representor to pass traffic
+ */
+void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
+{
+ struct ice_netdev_priv *np = netdev_priv(repr->netdev);
+
+ np->vsi = vsi;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
new file mode 100644
index 000000000000..806de22933c6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_REPR_H_
+#define _ICE_REPR_H_
+
+#include <net/dst_metadata.h>
+#include "ice.h"
+
+struct ice_repr {
+ struct ice_vsi *src_vsi;
+ struct ice_vf *vf;
+ struct ice_q_vector *q_vector;
+ struct net_device *netdev;
+ struct metadata_dst *dst;
+};
+
+int ice_repr_add_for_all_vfs(struct ice_pf *pf);
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
+
+void ice_repr_start_tx_queues(struct ice_repr *repr);
+void ice_repr_stop_tx_queues(struct ice_repr *repr);
+
+void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
+
+struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
+bool ice_is_port_repr_netdev(struct net_device *netdev);
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 9f07b6641705..ce3c7bded4cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -2071,6 +2071,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
}
/**
+ * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its RDMA children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
+}
+
+/**
* ice_get_agg_info - get the aggregator ID
* @hw: pointer to the hardware structure
* @agg_id: aggregator ID
@@ -2999,6 +3012,43 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
}
/**
+ * ice_sched_save_vsi_bw - save VSI node's BW information
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of VSI type node for post replay use.
+ */
+static int
+ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return -EINVAL;
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return -EINVAL;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
* ice_sched_calc_wakeup - calculate RL profile wakeup parameter
* @hw: pointer to the HW struct
* @bw: bandwidth in Kbps
@@ -3771,6 +3821,153 @@ ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
}
/**
+ * ice_sched_get_node_by_id_type - get node from ID type
+ * @pi: port information structure
+ * @id: identifier
+ * @agg_type: type of aggregator
+ * @tc: traffic class
+ *
+ * This function returns node identified by ID of type aggregator, and
+ * based on traffic class (TC). This function needs to be called with
+ * the scheduler lock held.
+ */
+static struct ice_sched_node *
+ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc)
+{
+ struct ice_sched_node *node = NULL;
+
+ switch (agg_type) {
+ case ICE_AGG_TYPE_VSI: {
+ struct ice_vsi_ctx *vsi_ctx;
+ u16 vsi_handle = (u16)id;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ break;
+ /* Get sched_vsi_info */
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ break;
+ node = vsi_ctx->sched.vsi_node[tc];
+ break;
+ }
+
+ case ICE_AGG_TYPE_AGG: {
+ struct ice_sched_node *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (tc_node)
+ node = ice_sched_get_agg_node(pi, tc_node, id);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return node;
+}
+
+/**
+ * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
+ * @pi: port information structure
+ * @id: ID (software VSI handle or AGG ID)
+ * @agg_type: aggregator type (VSI or AGG type node)
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function sets BW limit of VSI or Aggregator scheduling node
+ * based on TC information from passed in argument BW.
+ */
+static enum ice_status
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *node;
+
+ if (!pi)
+ return status;
+
+ if (rl_type == ICE_UNKNOWN_BW)
+ return status;
+
+ mutex_lock(&pi->sched_lock);
+ node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
+ if (!node) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
+ goto exit_set_node_bw_lmt_per_tc;
+ }
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
+
+exit_set_node_bw_lmt_per_tc:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ int status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type, bw);
+ if (!status) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
+ mutex_unlock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ *
+ * This function configures default BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type)
+{
+ int status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ if (!status) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ mutex_unlock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
* ice_cfg_rl_burst_size - Set burst size value
* @hw: pointer to the HW struct
* @bytes: burst size in bytes
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 9beef8f0ec76..6bddcbecaf5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -58,6 +58,8 @@ struct ice_sched_agg_info {
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u32 agg_id;
enum ice_agg_type agg_type;
+ /* bw_t_info saves aggregator BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
/* save aggregator TC bitmap */
DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
@@ -89,6 +91,7 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
/* Tx scheduler rate limiter functions */
enum ice_status
@@ -103,6 +106,12 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type);
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 3b6c1420aa7b..2742e1c1e337 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -8,6 +8,7 @@
#define ICE_ETH_ETHTYPE_OFFSET 12
#define ICE_ETH_VLAN_TCI_OFFSET 14
#define ICE_MAX_VLAN_ID 0xFFF
+#define ICE_IPV6_ETHER_ID 0x86DD
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
* struct to configure any switch filter rules.
@@ -29,6 +30,290 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+struct ice_dummy_pkt_offsets {
+ enum ice_protocol_type type;
+ u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
+};
+
+/* offset info for MAC + IPv4 + UDP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* Dummy packet for MAC + IPv4 + UDP */
+static const u8 dummy_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV4_OFOS, 18 },
+ { ICE_UDP_ILOS, 38 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (801.1Q), IPv4:UDP dummy packet */
+static const u8 dummy_vlan_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_TCP_IL, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* Dummy packet for MAC + IPv4 + TCP */
+static const u8 dummy_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV4_OFOS, 18 },
+ { ICE_TCP_IL, 38 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (801.1Q), IPv4:TCP dummy packet */
+static const u8 dummy_vlan_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_TCP_IL, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_tcp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + TCP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_tcp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV6_OFOS, 18 },
+ { ICE_TCP_IL, 58 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (802.1Q), IPv6 + TCP dummy packet */
+static const u8 dummy_vlan_tcp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_ILOS, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* IPv6 + UDP dummy packet */
+static const u8 dummy_udp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x10, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_udp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV6_OFOS, 18 },
+ { ICE_UDP_ILOS, 58 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (802.1Q), IPv6 + UDP dummy packet */
+static const u8 dummy_vlan_udp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+ 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
@@ -42,6 +327,14 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
+/* this is a recipe to profile association bitmap */
+static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
+ ICE_MAX_NUM_PROFILES);
+
+/* this is a profile to recipe association bitmap */
+static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
+ ICE_MAX_NUM_RECIPES);
+
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
@@ -59,10 +352,11 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
if (!recps)
return ICE_ERR_NO_MEMORY;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
recps[i].root_rid = i;
INIT_LIST_HEAD(&recps[i].filt_rules);
INIT_LIST_HEAD(&recps[i].filt_replay_rules);
+ INIT_LIST_HEAD(&recps[i].rg_list);
mutex_init(&recps[i].filt_rule_lock);
}
@@ -518,7 +812,7 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-static enum ice_status
+enum ice_status
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
@@ -543,6 +837,358 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
return status;
}
+/**
+ * ice_aq_add_recipe - add switch recipe
+ * @hw: pointer to the HW struct
+ * @s_recipe_list: pointer to switch rule population list
+ * @num_recipes: number of switch recipes in the list
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add(0x0290)
+ */
+static enum ice_status
+ice_aq_add_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 num_recipes, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_get_recipe *cmd;
+ struct ice_aq_desc desc;
+ u16 buf_size;
+
+ cmd = &desc.params.add_get_recipe;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
+
+ cmd->num_sub_recipes = cpu_to_le16(num_recipes);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ buf_size = num_recipes * sizeof(*s_recipe_list);
+
+ return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
+}
+
+/**
+ * ice_aq_get_recipe - get switch recipe
+ * @hw: pointer to the HW struct
+ * @s_recipe_list: pointer to switch rule population list
+ * @num_recipes: pointer to the number of recipes (input and output)
+ * @recipe_root: root recipe number of recipe(s) to retrieve
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get(0x0292)
+ *
+ * On input, *num_recipes should equal the number of entries in s_recipe_list.
+ * On output, *num_recipes will equal the number of entries returned in
+ * s_recipe_list.
+ *
+ * The caller must supply enough space in s_recipe_list to hold all possible
+ * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
+ */
+static enum ice_status
+ice_aq_get_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_get_recipe *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 buf_size;
+
+ if (*num_recipes != ICE_MAX_NUM_RECIPES)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.add_get_recipe;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
+
+ cmd->return_index = cpu_to_le16(recipe_root);
+ cmd->num_sub_recipes = 0;
+
+ buf_size = *num_recipes * sizeof(*s_recipe_list);
+
+ status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
+ *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
+
+ return status;
+}
+
+/**
+ * ice_aq_map_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+ * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Recipe to profile association (0x0291)
+ */
+static enum ice_status
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_recipe_to_profile *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.recipe_to_profile;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
+ cmd->profile_id = cpu_to_le16(profile_id);
+ /* Set the recipe ID bit in the bitmask to let the device know which
+ * profile we are associating the recipe to
+ */
+ memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_get_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+ * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Associate profile ID with given recipe (0x0293)
+ */
+static enum ice_status
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_recipe_to_profile *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.recipe_to_profile;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
+ cmd->profile_id = cpu_to_le16(profile_id);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status)
+ memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
+
+ return status;
+}
+
+/**
+ * ice_alloc_recipe - add recipe resource
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID returned as response to AQ call
+ */
+static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(sw_buf, elem, 1);
+ sw_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ sw_buf->num_elems = cpu_to_le16(1);
+ sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
+ ICE_AQC_RES_TYPE_S) |
+ ICE_AQC_RES_TYPE_FLAG_SHARED);
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (!status)
+ *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
+ kfree(sw_buf);
+
+ return status;
+}
+
+/**
+ * ice_get_recp_to_prof_map - updates recipe to profile mapping
+ * @hw: pointer to hardware structure
+ *
+ * This function is used to populate recipe_to_profile matrix where index to
+ * this array is the recipe ID and the element is the mapping of which profiles
+ * is this recipe mapped to.
+ */
+static void ice_get_recp_to_prof_map(struct ice_hw *hw)
+{
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u16 i;
+
+ for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
+ u16 j;
+
+ bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
+ bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
+ if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
+ continue;
+ bitmap_copy(profile_to_recipe[i], r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
+ set_bit(i, recipe_to_profile[j]);
+ }
+}
+
+/**
+ * ice_collect_result_idx - copy result index values
+ * @buf: buffer that contains the result index
+ * @recp: the recipe struct to copy data into
+ */
+static void
+ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
+ struct ice_sw_recipe *recp)
+{
+ if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
+ set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
+ recp->res_idxs);
+}
+
+/**
+ * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
+ * @hw: pointer to hardware structure
+ * @recps: struct that we need to populate
+ * @rid: recipe ID that we are populating
+ * @refresh_required: true if we should get recipe to profile mapping from FW
+ *
+ * This function is used to populate all the necessary entries into our
+ * bookkeeping so that we have a current list of all the recipes that are
+ * programmed in the firmware.
+ */
+static enum ice_status
+ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
+ bool *refresh_required)
+{
+ DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *tmp;
+ u16 num_recps = ICE_MAX_NUM_RECIPES;
+ struct ice_prot_lkup_ext *lkup_exts;
+ enum ice_status status;
+ u8 fv_word_idx = 0;
+ u16 sub_recps;
+
+ bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
+
+ /* we need a buffer big enough to accommodate all the recipes */
+ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ tmp[0].recipe_indx = rid;
+ status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
+ /* non-zero status meaning recipe doesn't exist */
+ if (status)
+ goto err_unroll;
+
+ /* Get recipe to profile map so that we can get the fv from lkups that
+ * we read for a recipe from FW. Since we want to minimize the number of
+ * times we make this FW call, just make one call and cache the copy
+ * until a new recipe is added. This operation is only required the
+ * first time to get the changes from FW. Then to search existing
+ * entries we don't need to update the cache again until another recipe
+ * gets added.
+ */
+ if (*refresh_required) {
+ ice_get_recp_to_prof_map(hw);
+ *refresh_required = false;
+ }
+
+ /* Start populating all the entries for recps[rid] based on lkups from
+ * firmware. Note that we are only creating the root recipe in our
+ * database.
+ */
+ lkup_exts = &recps[rid].lkup_exts;
+
+ for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
+ struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
+ struct ice_recp_grp_entry *rg_entry;
+ u8 i, prof, idx, prot = 0;
+ bool is_root;
+ u16 off = 0;
+
+ rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
+ GFP_KERNEL);
+ if (!rg_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+
+ idx = root_bufs.recipe_indx;
+ is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
+
+ /* Mark all result indices in this chain */
+ if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
+ set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
+ result_bm);
+
+ /* get the first profile that is associated with rid */
+ prof = find_first_bit(recipe_to_profile[idx],
+ ICE_MAX_NUM_PROFILES);
+ for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
+ u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
+
+ rg_entry->fv_idx[i] = lkup_indx;
+ rg_entry->fv_mask[i] =
+ le16_to_cpu(root_bufs.content.mask[i + 1]);
+
+ /* If the recipe is a chained recipe then all its
+ * child recipe's result will have a result index.
+ * To fill fv_words we should not use those result
+ * index, we only need the protocol ids and offsets.
+ * We will skip all the fv_idx which stores result
+ * index in them. We also need to skip any fv_idx which
+ * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
+ * valid offset value.
+ */
+ if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
+ rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
+ rg_entry->fv_idx[i] == 0)
+ continue;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, prof,
+ rg_entry->fv_idx[i], &prot, &off);
+ lkup_exts->fv_words[fv_word_idx].prot_id = prot;
+ lkup_exts->fv_words[fv_word_idx].off = off;
+ lkup_exts->field_mask[fv_word_idx] =
+ rg_entry->fv_mask[i];
+ fv_word_idx++;
+ }
+ /* populate rg_list with the data from the child entry of this
+ * recipe
+ */
+ list_add(&rg_entry->l_entry, &recps[rid].rg_list);
+
+ /* Propagate some data to the recipe database */
+ recps[idx].is_root = !!is_root;
+ recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
+ if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
+ recps[idx].chain_idx = root_bufs.content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN;
+ set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
+ } else {
+ recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
+ }
+
+ if (!is_root)
+ continue;
+
+ /* Only do the following for root recipes entries */
+ memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
+ sizeof(recps[idx].r_bitmap));
+ recps[idx].root_rid = root_bufs.content.rid &
+ ~ICE_AQ_RECIPE_ID_IS_ROOT;
+ recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ }
+
+ /* Complete initialization of the root recipe entry */
+ lkup_exts->n_val_words = fv_word_idx;
+ recps[rid].big_recp = (num_recps > 1);
+ recps[rid].n_grp_count = (u8)num_recps;
+ recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
+ recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
+ GFP_KERNEL);
+ if (!recps[rid].root_buf)
+ goto err_unroll;
+
+ /* Copy result indexes */
+ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
+ recps[rid].recp_created = true;
+
+err_unroll:
+ kfree(tmp);
+ return status;
+}
+
/* ice_init_port_info - Initialize port_info with switch configuration data
* @pi: pointer to port_info
* @vsi_port_num: VSI number or port number
@@ -1627,6 +2273,125 @@ exit:
}
/**
+ * ice_mac_fltr_exist - does this MAC filter exist for given VSI
+ * @hw: pointer to the hardware structure
+ * @mac: MAC address to be checked (for MAC filter)
+ * @vsi_handle: check MAC filter for this VSI
+ */
+bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
+{
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 hw_vsi_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return false;
+
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ sw = hw->switch_info;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+ if (!rule_head)
+ return false;
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_for_each_entry(entry, rule_head, list_entry) {
+ struct ice_fltr_info *f_info = &entry->fltr_info;
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
+
+ if (is_zero_ether_addr(mac_addr))
+ continue;
+
+ if (f_info->flag != ICE_FLTR_TX ||
+ f_info->src_id != ICE_SRC_ID_VSI ||
+ f_info->lkup_type != ICE_SW_LKUP_MAC ||
+ f_info->fltr_act != ICE_FWD_TO_VSI ||
+ hw_vsi_id != f_info->fwd_id.hw_vsi_id)
+ continue;
+
+ if (ether_addr_equal(mac, mac_addr)) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+ mutex_unlock(rule_lock);
+ return false;
+}
+
+/**
+ * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
+ * @hw: pointer to the hardware structure
+ * @vlan_id: VLAN ID
+ * @vsi_handle: check MAC filter for this VSI
+ */
+bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
+{
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 hw_vsi_id;
+
+ if (vlan_id > ICE_MAX_VLAN_ID)
+ return false;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return false;
+
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ sw = hw->switch_info;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+ if (!rule_head)
+ return false;
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_for_each_entry(entry, rule_head, list_entry) {
+ struct ice_fltr_info *f_info = &entry->fltr_info;
+ u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
+ struct ice_vsi_list_map_info *map_info;
+
+ if (entry_vlan_id > ICE_MAX_VLAN_ID)
+ continue;
+
+ if (f_info->flag != ICE_FLTR_TX ||
+ f_info->src_id != ICE_SRC_ID_VSI ||
+ f_info->lkup_type != ICE_SW_LKUP_VLAN)
+ continue;
+
+ /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
+ if (f_info->fltr_act != ICE_FWD_TO_VSI &&
+ f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
+ continue;
+
+ if (f_info->fltr_act == ICE_FWD_TO_VSI) {
+ if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
+ continue;
+ } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
+ /* If filter_action is FWD_TO_VSI_LIST, make sure
+ * that VSI being checked is part of VSI list
+ */
+ if (entry->vsi_count == 1 &&
+ entry->vsi_list_info) {
+ map_info = entry->vsi_list_info;
+ if (!test_bit(vsi_handle, map_info->vsi_map))
+ continue;
+ }
+ }
+
+ if (vlan_id == entry_vlan_id) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+ mutex_unlock(rule_lock);
+
+ return false;
+}
+
+/**
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
@@ -2037,6 +2802,27 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
}
/**
+ * ice_rem_adv_rule_info
+ * @hw: pointer to the hardware structure
+ * @rule_head: pointer to the switch list structure that we want to delete
+ */
+static void
+ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
+{
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ if (list_empty(rule_head))
+ return;
+
+ list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+}
+
+/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to set as default
@@ -2773,6 +3559,1459 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
return status;
}
+/* This is mapping table entry that maps every word within a given protocol
+ * structure to the real byte offset as per the specification of that
+ * protocol header.
+ * for example dst address is 3 words in ethertype header and corresponding
+ * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
+ * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
+ * matching entry describing its field. This needs to be updated if new
+ * structure is added to that union.
+ */
+static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
+ { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
+ { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
+ { ICE_ETYPE_OL, { 0 } },
+ { ICE_VLAN_OFOS, { 2, 0 } },
+ { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
+ { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
+ { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
+ 26, 28, 30, 32, 34, 36, 38 } },
+ { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
+ 26, 28, 30, 32, 34, 36, 38 } },
+ { ICE_TCP_IL, { 0, 2 } },
+ { ICE_UDP_OF, { 0, 2 } },
+ { ICE_UDP_ILOS, { 0, 2 } },
+};
+
+static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
+ { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
+ { ICE_MAC_IL, ICE_MAC_IL_HW },
+ { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
+ { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
+ { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
+ { ICE_IPV4_IL, ICE_IPV4_IL_HW },
+ { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
+ { ICE_IPV6_IL, ICE_IPV6_IL_HW },
+ { ICE_TCP_IL, ICE_TCP_IL_HW },
+ { ICE_UDP_OF, ICE_UDP_OF_HW },
+ { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
+};
+
+/**
+ * ice_find_recp - find a recipe
+ * @hw: pointer to the hardware structure
+ * @lkup_exts: extension sequence to match
+ *
+ * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
+ */
+static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+{
+ bool refresh_required = true;
+ struct ice_sw_recipe *recp;
+ u8 i;
+
+ /* Walk through existing recipes to find a match */
+ recp = hw->switch_info->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ /* If recipe was not created for this ID, in SW bookkeeping,
+ * check if FW has an entry for this recipe. If the FW has an
+ * entry update it in our SW bookkeeping and continue with the
+ * matching.
+ */
+ if (!recp[i].recp_created)
+ if (ice_get_recp_frm_fw(hw,
+ hw->switch_info->recp_list, i,
+ &refresh_required))
+ continue;
+
+ /* Skip inverse action recipes */
+ if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_INV_ACT)
+ continue;
+
+ /* if number of words we are looking for match */
+ if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
+ struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
+ struct ice_fv_word *be = lkup_exts->fv_words;
+ u16 *cr = recp[i].lkup_exts.field_mask;
+ u16 *de = lkup_exts->field_mask;
+ bool found = true;
+ u8 pe, qr;
+
+ /* ar, cr, and qr are related to the recipe words, while
+ * be, de, and pe are related to the lookup words
+ */
+ for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
+ for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
+ qr++) {
+ if (ar[qr].off == be[pe].off &&
+ ar[qr].prot_id == be[pe].prot_id &&
+ cr[qr] == de[pe])
+ /* Found the "pe"th word in the
+ * given recipe
+ */
+ break;
+ }
+ /* After walking through all the words in the
+ * "i"th recipe if "p"th word was not found then
+ * this recipe is not what we are looking for.
+ * So break out from this loop and try the next
+ * recipe
+ */
+ if (qr >= recp[i].lkup_exts.n_val_words) {
+ found = false;
+ break;
+ }
+ }
+ /* If for "i"th recipe the found was never set to false
+ * then it means we found our match
+ */
+ if (found)
+ return i; /* Return the recipe ID */
+ }
+ }
+ return ICE_MAX_NUM_RECIPES;
+}
+
+/**
+ * ice_prot_type_to_id - get protocol ID from protocol type
+ * @type: protocol type
+ * @id: pointer to variable that will receive the ID
+ *
+ * Returns true if found, false otherwise
+ */
+static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
+{
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
+ if (ice_prot_id_tbl[i].type == type) {
+ *id = ice_prot_id_tbl[i].protocol_id;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ice_fill_valid_words - count valid words
+ * @rule: advanced rule with lookup information
+ * @lkup_exts: byte offset extractions of the words that are valid
+ *
+ * calculate valid words in a lookup rule using mask value
+ */
+static u8
+ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ u8 j, word, prot_id, ret_val;
+
+ if (!ice_prot_type_to_id(rule->type, &prot_id))
+ return 0;
+
+ word = lkup_exts->n_val_words;
+
+ for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
+ if (((u16 *)&rule->m_u)[j] &&
+ rule->type < ARRAY_SIZE(ice_prot_ext)) {
+ /* No more space to accommodate */
+ if (word >= ICE_MAX_CHAIN_WORDS)
+ return 0;
+ lkup_exts->fv_words[word].off =
+ ice_prot_ext[rule->type].offs[j];
+ lkup_exts->fv_words[word].prot_id =
+ ice_prot_id_tbl[rule->type].protocol_id;
+ lkup_exts->field_mask[word] =
+ be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
+ word++;
+ }
+
+ ret_val = word - lkup_exts->n_val_words;
+ lkup_exts->n_val_words = word;
+
+ return ret_val;
+}
+
+/**
+ * ice_create_first_fit_recp_def - Create a recipe grouping
+ * @hw: pointer to the hardware structure
+ * @lkup_exts: an array of protocol header extractions
+ * @rg_list: pointer to a list that stores new recipe groups
+ * @recp_cnt: pointer to a variable that stores returned number of recipe groups
+ *
+ * Using first fit algorithm, take all the words that are still not done
+ * and start grouping them in 4-word groups. Each group makes up one
+ * recipe.
+ */
+static enum ice_status
+ice_create_first_fit_recp_def(struct ice_hw *hw,
+ struct ice_prot_lkup_ext *lkup_exts,
+ struct list_head *rg_list,
+ u8 *recp_cnt)
+{
+ struct ice_pref_recipe_group *grp = NULL;
+ u8 j;
+
+ *recp_cnt = 0;
+
+ /* Walk through every word in the rule to check if it is not done. If so
+ * then this word needs to be part of a new recipe.
+ */
+ for (j = 0; j < lkup_exts->n_val_words; j++)
+ if (!test_bit(j, lkup_exts->done)) {
+ if (!grp ||
+ grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
+ struct ice_recp_grp_entry *entry;
+
+ entry = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry)
+ return ICE_ERR_NO_MEMORY;
+ list_add(&entry->l_entry, rg_list);
+ grp = &entry->r_group;
+ (*recp_cnt)++;
+ }
+
+ grp->pairs[grp->n_val_pairs].prot_id =
+ lkup_exts->fv_words[j].prot_id;
+ grp->pairs[grp->n_val_pairs].off =
+ lkup_exts->fv_words[j].off;
+ grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
+ grp->n_val_pairs++;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
+ * @hw: pointer to the hardware structure
+ * @fv_list: field vector with the extraction sequence information
+ * @rg_list: recipe groupings with protocol-offset pairs
+ *
+ * Helper function to fill in the field vector indices for protocol-offset
+ * pairs. These indexes are then ultimately programmed into a recipe.
+ */
+static enum ice_status
+ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
+ struct list_head *rg_list)
+{
+ struct ice_sw_fv_list_entry *fv;
+ struct ice_recp_grp_entry *rg;
+ struct ice_fv_word *fv_ext;
+
+ if (list_empty(fv_list))
+ return 0;
+
+ fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
+ list_entry);
+ fv_ext = fv->fv_ptr->ew;
+
+ list_for_each_entry(rg, rg_list, l_entry) {
+ u8 i;
+
+ for (i = 0; i < rg->r_group.n_val_pairs; i++) {
+ struct ice_fv_word *pr;
+ bool found = false;
+ u16 mask;
+ u8 j;
+
+ pr = &rg->r_group.pairs[i];
+ mask = rg->r_group.mask[i];
+
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv_ext[j].prot_id == pr->prot_id &&
+ fv_ext[j].off == pr->off) {
+ found = true;
+
+ /* Store index of field vector */
+ rg->fv_idx[i] = j;
+ rg->fv_mask[i] = mask;
+ break;
+ }
+
+ /* Protocol/offset could not be found, caller gave an
+ * invalid pair
+ */
+ if (!found)
+ return ICE_ERR_PARAM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_find_free_recp_res_idx - find free result indexes for recipe
+ * @hw: pointer to hardware structure
+ * @profiles: bitmap of profiles that will be associated with the new recipe
+ * @free_idx: pointer to variable to receive the free index bitmap
+ *
+ * The algorithm used here is:
+ * 1. When creating a new recipe, create a set P which contains all
+ * Profiles that will be associated with our new recipe
+ *
+ * 2. For each Profile p in set P:
+ * a. Add all recipes associated with Profile p into set R
+ * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
+ * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
+ * i. Or just assume they all have the same possible indexes:
+ * 44, 45, 46, 47
+ * i.e., PossibleIndexes = 0x0000F00000000000
+ *
+ * 3. For each Recipe r in set R:
+ * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
+ * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
+ *
+ * FreeIndexes will contain the bits indicating the indexes free for use,
+ * then the code needs to update the recipe[r].used_result_idx_bits to
+ * indicate which indexes were selected for use by this recipe.
+ */
+static u16
+ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
+ unsigned long *free_idx)
+{
+ DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
+ DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
+ DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
+ u16 bit;
+
+ bitmap_zero(possible_idx, ICE_MAX_FV_WORDS);
+ bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
+ bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
+ bitmap_zero(free_idx, ICE_MAX_FV_WORDS);
+
+ bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+
+ /* For each profile we are going to associate the recipe with, add the
+ * recipes that are associated with that profile. This will give us
+ * the set of recipes that our recipe may collide with. Also, determine
+ * what possible result indexes are usable given this set of profiles.
+ */
+ for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
+ bitmap_or(recipes, recipes, profile_to_recipe[bit],
+ ICE_MAX_NUM_RECIPES);
+ bitmap_and(possible_idx, possible_idx,
+ hw->switch_info->prof_res_bm[bit],
+ ICE_MAX_FV_WORDS);
+ }
+
+ /* For each recipe that our new recipe may collide with, determine
+ * which indexes have been used.
+ */
+ for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
+ bitmap_or(used_idx, used_idx,
+ hw->switch_info->recp_list[bit].res_idxs,
+ ICE_MAX_FV_WORDS);
+
+ bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
+
+ /* return number of free indexes */
+ return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
+}
+
+/**
+ * ice_add_sw_recipe - function to call AQ calls to create switch recipe
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @match_tun_mask: tunnel mask that needs to be programmed
+ * @profiles: bitmap of profiles that will be associated.
+ */
+static enum ice_status
+ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ u16 match_tun_mask, unsigned long *profiles)
+{
+ DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *tmp;
+ struct ice_aqc_recipe_data_elem *buf;
+ struct ice_recp_grp_entry *entry;
+ enum ice_status status;
+ u16 free_res_idx;
+ u16 recipe_count;
+ u8 chain_idx;
+ u8 recps = 0;
+
+ /* When more than one recipe are required, another recipe is needed to
+ * chain them together. Matching a tunnel metadata ID takes up one of
+ * the match fields in the chaining recipe reducing the number of
+ * chained recipes by one.
+ */
+ /* check number of free result indices */
+ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
+ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
+
+ ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
+ free_res_idx, rm->n_grp_count);
+
+ if (rm->n_grp_count > 1) {
+ if (rm->n_grp_count > free_res_idx)
+ return ICE_ERR_MAX_LIMIT;
+
+ rm->n_grp_count++;
+ }
+
+ if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
+ return ICE_ERR_MAX_LIMIT;
+
+ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
+ GFP_KERNEL);
+ if (!buf) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_mem;
+ }
+
+ bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
+ recipe_count = ICE_MAX_NUM_RECIPES;
+ status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
+ NULL);
+ if (status || recipe_count == 0)
+ goto err_unroll;
+
+ /* Allocate the recipe resources, and configure them according to the
+ * match fields from protocol headers and extracted field vectors.
+ */
+ chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ u8 i;
+
+ status = ice_alloc_recipe(hw, &entry->rid);
+ if (status)
+ goto err_unroll;
+
+ /* Clear the result index of the located recipe, as this will be
+ * updated, if needed, later in the recipe creation process.
+ */
+ tmp[0].content.result_indx = 0;
+
+ buf[recps] = tmp[0];
+ buf[recps].recipe_indx = (u8)entry->rid;
+ /* if the recipe is a non-root recipe RID should be programmed
+ * as 0 for the rules to be applied correctly.
+ */
+ buf[recps].content.rid = 0;
+ memset(&buf[recps].content.lkup_indx, 0,
+ sizeof(buf[recps].content.lkup_indx));
+
+ /* All recipes use look-up index 0 to match switch ID. */
+ buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ buf[recps].content.mask[0] =
+ cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
+ * to be 0
+ */
+ for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
+ buf[recps].content.lkup_indx[i] = 0x80;
+ buf[recps].content.mask[i] = 0;
+ }
+
+ for (i = 0; i < entry->r_group.n_val_pairs; i++) {
+ buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
+ buf[recps].content.mask[i + 1] =
+ cpu_to_le16(entry->fv_mask[i]);
+ }
+
+ if (rm->n_grp_count > 1) {
+ /* Checks to see if there really is a valid result index
+ * that can be used.
+ */
+ if (chain_idx >= ICE_MAX_FV_WORDS) {
+ ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
+ status = ICE_ERR_MAX_LIMIT;
+ goto err_unroll;
+ }
+
+ entry->chain_idx = chain_idx;
+ buf[recps].content.result_indx =
+ ICE_AQ_RECIPE_RESULT_EN |
+ ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
+ ICE_AQ_RECIPE_RESULT_DATA_M);
+ clear_bit(chain_idx, result_idx_bm);
+ chain_idx = find_first_bit(result_idx_bm,
+ ICE_MAX_FV_WORDS);
+ }
+
+ /* fill recipe dependencies */
+ bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ set_bit(buf[recps].recipe_indx,
+ (unsigned long *)buf[recps].recipe_bitmap);
+ buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ recps++;
+ }
+
+ if (rm->n_grp_count == 1) {
+ rm->root_rid = buf[0].recipe_indx;
+ set_bit(buf[0].recipe_indx, rm->r_bitmap);
+ buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
+ if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
+ memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
+ sizeof(buf[0].recipe_bitmap));
+ } else {
+ status = ICE_ERR_BAD_PTR;
+ goto err_unroll;
+ }
+ /* Applicable only for ROOT_RECIPE, set the fwd_priority for
+ * the recipe which is getting created if specified
+ * by user. Usually any advanced switch filter, which results
+ * into new extraction sequence, ended up creating a new recipe
+ * of type ROOT and usually recipes are associated with profiles
+ * Switch rule referreing newly created recipe, needs to have
+ * either/or 'fwd' or 'join' priority, otherwise switch rule
+ * evaluation will not happen correctly. In other words, if
+ * switch rule to be evaluated on priority basis, then recipe
+ * needs to have priority, otherwise it will be evaluated last.
+ */
+ buf[0].content.act_ctrl_fwd_priority = rm->priority;
+ } else {
+ struct ice_recp_grp_entry *last_chain_entry;
+ u16 rid, i;
+
+ /* Allocate the last recipe that will chain the outcomes of the
+ * other recipes together
+ */
+ status = ice_alloc_recipe(hw, &rid);
+ if (status)
+ goto err_unroll;
+
+ buf[recps].recipe_indx = (u8)rid;
+ buf[recps].content.rid = (u8)rid;
+ buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
+ /* the new entry created should also be part of rg_list to
+ * make sure we have complete recipe
+ */
+ last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*last_chain_entry),
+ GFP_KERNEL);
+ if (!last_chain_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+ last_chain_entry->rid = rid;
+ memset(&buf[recps].content.lkup_indx, 0,
+ sizeof(buf[recps].content.lkup_indx));
+ /* All recipes use look-up index 0 to match switch ID. */
+ buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ buf[recps].content.mask[0] =
+ cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
+ buf[recps].content.lkup_indx[i] =
+ ICE_AQ_RECIPE_LKUP_IGNORE;
+ buf[recps].content.mask[i] = 0;
+ }
+
+ i = 1;
+ /* update r_bitmap with the recp that is used for chaining */
+ set_bit(rid, rm->r_bitmap);
+ /* this is the recipe that chains all the other recipes so it
+ * should not have a chaining ID to indicate the same
+ */
+ last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ last_chain_entry->fv_idx[i] = entry->chain_idx;
+ buf[recps].content.lkup_indx[i] = entry->chain_idx;
+ buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
+ set_bit(entry->rid, rm->r_bitmap);
+ }
+ list_add(&last_chain_entry->l_entry, &rm->rg_list);
+ if (sizeof(buf[recps].recipe_bitmap) >=
+ sizeof(rm->r_bitmap)) {
+ memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
+ sizeof(buf[recps].recipe_bitmap));
+ } else {
+ status = ICE_ERR_BAD_PTR;
+ goto err_unroll;
+ }
+ buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+
+ /* To differentiate among different UDP tunnels, a meta data ID
+ * flag is used.
+ */
+ if (match_tun_mask) {
+ buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
+ buf[recps].content.mask[i] =
+ cpu_to_le16(match_tun_mask);
+ }
+
+ recps++;
+ rm->root_rid = (u8)rid;
+ }
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ goto err_unroll;
+
+ status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
+ ice_release_change_lock(hw);
+ if (status)
+ goto err_unroll;
+
+ /* Every recipe that just got created add it to the recipe
+ * book keeping list
+ */
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ struct ice_switch_info *sw = hw->switch_info;
+ bool is_root, idx_found = false;
+ struct ice_sw_recipe *recp;
+ u16 idx, buf_idx = 0;
+
+ /* find buffer index for copying some data */
+ for (idx = 0; idx < rm->n_grp_count; idx++)
+ if (buf[idx].recipe_indx == entry->rid) {
+ buf_idx = idx;
+ idx_found = true;
+ }
+
+ if (!idx_found) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto err_unroll;
+ }
+
+ recp = &sw->recp_list[entry->rid];
+ is_root = (rm->root_rid == entry->rid);
+ recp->is_root = is_root;
+
+ recp->root_rid = entry->rid;
+ recp->big_recp = (is_root && rm->n_grp_count > 1);
+
+ memcpy(&recp->ext_words, entry->r_group.pairs,
+ entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
+
+ memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
+ sizeof(recp->r_bitmap));
+
+ /* Copy non-result fv index values and masks to recipe. This
+ * call will also update the result recipe bitmask.
+ */
+ ice_collect_result_idx(&buf[buf_idx], recp);
+
+ /* for non-root recipes, also copy to the root, this allows
+ * easier matching of a complete chained recipe
+ */
+ if (!is_root)
+ ice_collect_result_idx(&buf[buf_idx],
+ &sw->recp_list[rm->root_rid]);
+
+ recp->n_ext_words = entry->r_group.n_val_pairs;
+ recp->chain_idx = entry->chain_idx;
+ recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
+ recp->n_grp_count = rm->n_grp_count;
+ recp->recp_created = true;
+ }
+ rm->root_buf = buf;
+ kfree(tmp);
+ return status;
+
+err_unroll:
+err_mem:
+ kfree(tmp);
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return status;
+}
+
+/**
+ * ice_create_recipe_group - creates recipe group
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @lkup_exts: lookup elements
+ */
+static enum ice_status
+ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ enum ice_status status;
+ u8 recp_count = 0;
+
+ rm->n_grp_count = 0;
+
+ /* Create recipes for words that are marked not done by packing them
+ * as best fit.
+ */
+ status = ice_create_first_fit_recp_def(hw, lkup_exts,
+ &rm->rg_list, &recp_count);
+ if (!status) {
+ rm->n_grp_count += recp_count;
+ rm->n_ext_words = lkup_exts->n_val_words;
+ memcpy(&rm->ext_words, lkup_exts->fv_words,
+ sizeof(rm->ext_words));
+ memcpy(rm->word_masks, lkup_exts->field_mask,
+ sizeof(rm->word_masks));
+ }
+
+ return status;
+}
+
+/**
+ * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
+ * @hw: pointer to hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: pointer to a list that holds the returned field vectors
+ */
+static enum ice_status
+ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ enum ice_status status;
+ u8 *prot_ids;
+ u16 i;
+
+ prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
+ if (!prot_ids)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < lkups_cnt; i++)
+ if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
+ status = ICE_ERR_CFG;
+ goto free_mem;
+ }
+
+ /* Find field vectors that include all specified protocol types */
+ status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
+
+free_mem:
+ kfree(prot_ids);
+ return status;
+}
+
+/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
+ * @hw: pointer to hardware structure
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+static void
+ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
+ unsigned long *bm)
+{
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+
+ ice_get_sw_fv_bitmap(hw, ICE_PROF_NON_TUN, bm);
+}
+
+/**
+ * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
+ * @hw: pointer to hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @rid: return the recipe ID of the recipe created
+ */
+static enum ice_status
+ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
+{
+ DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
+ DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
+ struct ice_prot_lkup_ext *lkup_exts;
+ struct ice_recp_grp_entry *r_entry;
+ struct ice_sw_fv_list_entry *fvit;
+ struct ice_recp_grp_entry *r_tmp;
+ struct ice_sw_fv_list_entry *tmp;
+ enum ice_status status = 0;
+ struct ice_sw_recipe *rm;
+ u16 match_tun_mask = 0;
+ u8 i;
+
+ if (!lkups_cnt)
+ return ICE_ERR_PARAM;
+
+ lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
+ if (!lkup_exts)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Determine the number of words to be matched and if it exceeds a
+ * recipe's restrictions
+ */
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 count;
+
+ if (lkups[i].type >= ICE_PROTOCOL_LAST) {
+ status = ICE_ERR_CFG;
+ goto err_free_lkup_exts;
+ }
+
+ count = ice_fill_valid_words(&lkups[i], lkup_exts);
+ if (!count) {
+ status = ICE_ERR_CFG;
+ goto err_free_lkup_exts;
+ }
+ }
+
+ rm = kzalloc(sizeof(*rm), GFP_KERNEL);
+ if (!rm) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_free_lkup_exts;
+ }
+
+ /* Get field vectors that contain fields extracted from all the protocol
+ * headers being programmed.
+ */
+ INIT_LIST_HEAD(&rm->fv_list);
+ INIT_LIST_HEAD(&rm->rg_list);
+
+ /* Get bitmap of field vectors (profiles) that are compatible with the
+ * rule request; only these will be searched in the subsequent call to
+ * ice_get_fv.
+ */
+ ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
+
+ status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ if (status)
+ goto err_unroll;
+
+ /* Group match words into recipes using preferred recipe grouping
+ * criteria.
+ */
+ status = ice_create_recipe_group(hw, rm, lkup_exts);
+ if (status)
+ goto err_unroll;
+
+ /* set the recipe priority if specified */
+ rm->priority = (u8)rinfo->priority;
+
+ /* Find offsets from the field vector. Pick the first one for all the
+ * recipes.
+ */
+ status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
+ if (status)
+ goto err_unroll;
+
+ /* get bitmap of all profiles the recipe will be associated with */
+ bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
+ list_for_each_entry(fvit, &rm->fv_list, list_entry) {
+ ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
+ set_bit((u16)fvit->profile_id, profiles);
+ }
+
+ /* Look for a recipe which matches our requested fv / mask list */
+ *rid = ice_find_recp(hw, lkup_exts);
+ if (*rid < ICE_MAX_NUM_RECIPES)
+ /* Success if found a recipe that match the existing criteria */
+ goto err_unroll;
+
+ /* Recipe we need does not exist, add a recipe */
+ status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
+ if (status)
+ goto err_unroll;
+
+ /* Associate all the recipes created with all the profiles in the
+ * common field vector.
+ */
+ list_for_each_entry(fvit, &rm->fv_list, list_entry) {
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u16 j;
+
+ status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
+ (u8 *)r_bitmap, NULL);
+ if (status)
+ goto err_unroll;
+
+ bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ goto err_unroll;
+
+ status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
+ (u8 *)r_bitmap,
+ NULL);
+ ice_release_change_lock(hw);
+
+ if (status)
+ goto err_unroll;
+
+ /* Update profile to recipe bitmap array */
+ bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+
+ /* Update recipe to profile bitmap array */
+ for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
+ set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
+ }
+
+ *rid = rm->root_rid;
+ memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
+ sizeof(*lkup_exts));
+err_unroll:
+ list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
+ list_del(&r_entry->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r_entry);
+ }
+
+ list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
+ list_del(&fvit->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvit);
+ }
+
+ if (rm->root_buf)
+ devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
+
+ kfree(rm);
+
+err_free_lkup_exts:
+ kfree(lkup_exts);
+
+ return status;
+}
+
+/**
+ * ice_find_dummy_packet - find dummy packet
+ *
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @pkt: dummy packet to fill according to filter match criteria
+ * @pkt_len: packet length of dummy packet
+ * @offsets: pointer to receive the pointer to the offsets for the packet
+ */
+static void
+ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ const u8 **pkt, u16 *pkt_len,
+ const struct ice_dummy_pkt_offsets **offsets)
+{
+ bool tcp = false, udp = false, ipv6 = false, vlan = false;
+ u16 i;
+
+ for (i = 0; i < lkups_cnt; i++) {
+ if (lkups[i].type == ICE_UDP_ILOS)
+ udp = true;
+ else if (lkups[i].type == ICE_TCP_IL)
+ tcp = true;
+ else if (lkups[i].type == ICE_IPV6_OFOS)
+ ipv6 = true;
+ else if (lkups[i].type == ICE_VLAN_OFOS)
+ vlan = true;
+ else if (lkups[i].type == ICE_ETYPE_OL &&
+ lkups[i].h_u.ethertype.ethtype_id ==
+ cpu_to_be16(ICE_IPV6_ETHER_ID) &&
+ lkups[i].m_u.ethertype.ethtype_id ==
+ cpu_to_be16(0xFFFF))
+ ipv6 = true;
+ }
+
+ if (udp && !ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_udp_packet;
+ *pkt_len = sizeof(dummy_vlan_udp_packet);
+ *offsets = dummy_vlan_udp_packet_offsets;
+ return;
+ }
+ *pkt = dummy_udp_packet;
+ *pkt_len = sizeof(dummy_udp_packet);
+ *offsets = dummy_udp_packet_offsets;
+ return;
+ } else if (udp && ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_udp_ipv6_packet;
+ *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
+ *offsets = dummy_vlan_udp_ipv6_packet_offsets;
+ return;
+ }
+ *pkt = dummy_udp_ipv6_packet;
+ *pkt_len = sizeof(dummy_udp_ipv6_packet);
+ *offsets = dummy_udp_ipv6_packet_offsets;
+ return;
+ } else if ((tcp && ipv6) || ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_tcp_ipv6_packet;
+ *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
+ *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
+ return;
+ }
+ *pkt = dummy_tcp_ipv6_packet;
+ *pkt_len = sizeof(dummy_tcp_ipv6_packet);
+ *offsets = dummy_tcp_ipv6_packet_offsets;
+ return;
+ }
+
+ if (vlan) {
+ *pkt = dummy_vlan_tcp_packet;
+ *pkt_len = sizeof(dummy_vlan_tcp_packet);
+ *offsets = dummy_vlan_tcp_packet_offsets;
+ } else {
+ *pkt = dummy_tcp_packet;
+ *pkt_len = sizeof(dummy_tcp_packet);
+ *offsets = dummy_tcp_packet_offsets;
+ }
+}
+
+/**
+ * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
+ *
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @s_rule: stores rule information from the match criteria
+ * @dummy_pkt: dummy packet to fill according to filter match criteria
+ * @pkt_len: packet length of dummy packet
+ * @offsets: offset info for the dummy packet
+ */
+static enum ice_status
+ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ struct ice_aqc_sw_rules_elem *s_rule,
+ const u8 *dummy_pkt, u16 pkt_len,
+ const struct ice_dummy_pkt_offsets *offsets)
+{
+ u8 *pkt;
+ u16 i;
+
+ /* Start with a packet with a pre-defined/dummy content. Then, fill
+ * in the header values to be looked up or matched.
+ */
+ pkt = s_rule->pdata.lkup_tx_rx.hdr;
+
+ memcpy(pkt, dummy_pkt, pkt_len);
+
+ for (i = 0; i < lkups_cnt; i++) {
+ enum ice_protocol_type type;
+ u16 offset = 0, len = 0, j;
+ bool found = false;
+
+ /* find the start of this layer; it should be found since this
+ * was already checked when search for the dummy packet
+ */
+ type = lkups[i].type;
+ for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
+ if (type == offsets[j].type) {
+ offset = offsets[j].offset;
+ found = true;
+ break;
+ }
+ }
+ /* this should never happen in a correct calling sequence */
+ if (!found)
+ return ICE_ERR_PARAM;
+
+ switch (lkups[i].type) {
+ case ICE_MAC_OFOS:
+ case ICE_MAC_IL:
+ len = sizeof(struct ice_ether_hdr);
+ break;
+ case ICE_ETYPE_OL:
+ len = sizeof(struct ice_ethtype_hdr);
+ break;
+ case ICE_VLAN_OFOS:
+ len = sizeof(struct ice_vlan_hdr);
+ break;
+ case ICE_IPV4_OFOS:
+ case ICE_IPV4_IL:
+ len = sizeof(struct ice_ipv4_hdr);
+ break;
+ case ICE_IPV6_OFOS:
+ case ICE_IPV6_IL:
+ len = sizeof(struct ice_ipv6_hdr);
+ break;
+ case ICE_TCP_IL:
+ case ICE_UDP_OF:
+ case ICE_UDP_ILOS:
+ len = sizeof(struct ice_l4_hdr);
+ break;
+ case ICE_SCTP_IL:
+ len = sizeof(struct ice_sctp_hdr);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ /* the length should be a word multiple */
+ if (len % ICE_BYTES_PER_WORD)
+ return ICE_ERR_CFG;
+
+ /* We have the offset to the header start, the length, the
+ * caller's header values and mask. Use this information to
+ * copy the data into the dummy packet appropriately based on
+ * the mask. Note that we need to only write the bits as
+ * indicated by the mask to make sure we don't improperly write
+ * over any significant packet data.
+ */
+ for (j = 0; j < len / sizeof(u16); j++)
+ if (((u16 *)&lkups[i].m_u)[j])
+ ((u16 *)(pkt + offset))[j] =
+ (((u16 *)(pkt + offset))[j] &
+ ~((u16 *)&lkups[i].m_u)[j]) |
+ (((u16 *)&lkups[i].h_u)[j] &
+ ((u16 *)&lkups[i].m_u)[j]);
+ }
+
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
+
+ return 0;
+}
+
+/**
+ * ice_find_adv_rule_entry - Search a rule entry
+ * @hw: pointer to the hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @recp_id: recipe ID for which we are finding the rule
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ *
+ * Helper function to search for a given advance rule entry
+ * Returns pointer to entry storing the rule if found
+ */
+static struct ice_adv_fltr_mgmt_list_entry *
+ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, u16 recp_id,
+ struct ice_adv_rule_info *rinfo)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct ice_switch_info *sw = hw->switch_info;
+ int i;
+
+ list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
+ list_entry) {
+ bool lkups_matched = true;
+
+ if (lkups_cnt != list_itr->lkups_cnt)
+ continue;
+ for (i = 0; i < list_itr->lkups_cnt; i++)
+ if (memcmp(&list_itr->lkups[i], &lkups[i],
+ sizeof(*lkups))) {
+ lkups_matched = false;
+ break;
+ }
+ if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
+ lkups_matched)
+ return list_itr;
+ }
+ return NULL;
+}
+
+/**
+ * ice_adv_add_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @m_entry: pointer to current adv filter management list entry
+ * @cur_fltr: filter information from the book keeping entry
+ * @new_fltr: filter information with the new VSI to be added
+ *
+ * Call AQ command to add or update previously created VSI list with new VSI.
+ *
+ * Helper function to do book keeping associated with adding filter information
+ * The algorithm to do the booking keeping is described below :
+ * When a VSI needs to subscribe to a given advanced filter
+ * if only one VSI has been added till now
+ * Allocate a new VSI list and add two VSIs
+ * to this list using switch rule command
+ * Update the previously created switch rule with the
+ * newly created VSI list ID
+ * if a VSI list was previously created
+ * Add the new VSI to the previously created VSI list set
+ * using the update switch rule command
+ */
+static enum ice_status
+ice_adv_add_update_vsi_list(struct ice_hw *hw,
+ struct ice_adv_fltr_mgmt_list_entry *m_entry,
+ struct ice_adv_rule_info *cur_fltr,
+ struct ice_adv_rule_info *new_fltr)
+{
+ enum ice_status status;
+ u16 vsi_list_id = 0;
+
+ if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
+ cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
+ return ICE_ERR_NOT_IMPL;
+
+ if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
+ (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
+ return ICE_ERR_NOT_IMPL;
+
+ if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
+ /* Only one entry existed in the mapping and it was not already
+ * a part of a VSI list. So, create a VSI list with the old and
+ * new VSIs.
+ */
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
+ new_fltr->sw_act.fwd_id.hw_vsi_id)
+ return ICE_ERR_ALREADY_EXISTS;
+
+ vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
+ vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+ &vsi_list_id,
+ ICE_SW_LKUP_LAST);
+ if (status)
+ return status;
+
+ memset(&tmp_fltr, 0, sizeof(tmp_fltr));
+ tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
+ tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
+
+ /* Update the previous switch rule of "forward to VSI" to
+ * "fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status)
+ return status;
+
+ cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
+ cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
+ m_entry->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ vsi_list_id);
+ } else {
+ u16 vsi_handle = new_fltr->sw_act.vsi_handle;
+
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_CFG;
+
+ /* A rule already exists with the new VSI being added */
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
+ return 0;
+
+ /* Update the previously created VSI list set with
+ * the new VSI ID passed in
+ */
+ vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
+
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
+ vsi_list_id, false,
+ ice_aqc_opc_update_sw_rules,
+ ICE_SW_LKUP_LAST);
+ /* update VSI list mapping info with new VSI ID */
+ if (!status)
+ set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
+ }
+ if (!status)
+ m_entry->vsi_count++;
+ return status;
+}
+
+/**
+ * ice_add_adv_rule - helper function to create an advanced switch rule
+ * @hw: pointer to the hardware structure
+ * @lkups: information on the words that needs to be looked up. All words
+ * together makes one recipe
+ * @lkups_cnt: num of entries in the lkups array
+ * @rinfo: other information related to the rule that needs to be programmed
+ * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
+ * ignored is case of error.
+ *
+ * This function can program only 1 rule at a time. The lkups is used to
+ * describe the all the words that forms the "lookup" portion of the recipe.
+ * These words can span multiple protocols. Callers to this function need to
+ * pass in a list of protocol headers with lookup information along and mask
+ * that determines which words are valid from the given protocol header.
+ * rinfo describes other information related to this rule such as forwarding
+ * IDs, priority of this rule, etc.
+ */
+enum ice_status
+ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
+ struct ice_rule_query_data *added_entry)
+{
+ struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
+ u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
+ const struct ice_dummy_pkt_offsets *pkt_offsets;
+ struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ enum ice_status status;
+ const u8 *pkt = NULL;
+ u16 word_cnt;
+ u32 act = 0;
+ u8 q_rgn;
+
+ /* Initialize profile to result index bitmap */
+ if (!hw->switch_info->prof_res_bm_init) {
+ hw->switch_info->prof_res_bm_init = 1;
+ ice_init_prof_result_bm(hw);
+ }
+
+ if (!lkups_cnt)
+ return ICE_ERR_PARAM;
+
+ /* get # of words we need to match */
+ word_cnt = 0;
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 j, *ptr;
+
+ ptr = (u16 *)&lkups[i].m_u;
+ for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
+ if (ptr[j] != 0)
+ word_cnt++;
+ }
+
+ if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
+ return ICE_ERR_PARAM;
+
+ /* make sure that we can locate a dummy packet */
+ ice_find_dummy_packet(lkups, lkups_cnt, &pkt, &pkt_len,
+ &pkt_offsets);
+ if (!pkt) {
+ status = ICE_ERR_PARAM;
+ goto err_ice_add_adv_rule;
+ }
+
+ if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
+ return ICE_ERR_CFG;
+
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ rinfo->sw_act.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, vsi_handle);
+ if (rinfo->sw_act.flag & ICE_FLTR_TX)
+ rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
+ if (status)
+ return status;
+ m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
+ if (m_entry) {
+ /* we have to add VSI to VSI_LIST and increment vsi_count.
+ * Also Update VSI list so that we can change forwarding rule
+ * if the rule already exists, we will check if it exists with
+ * same vsi_id, if not then add it to the VSI list if it already
+ * exists if not then create a VSI list and add the existing VSI
+ * ID and the new VSI ID to the list
+ * We will add that VSI to the list
+ */
+ status = ice_adv_add_update_vsi_list(hw, m_entry,
+ &m_entry->rule_info,
+ rinfo);
+ if (added_entry) {
+ added_entry->rid = rid;
+ added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
+ added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
+ }
+ return status;
+ }
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ if (!rinfo->flags_info.act_valid) {
+ act |= ICE_SINGLE_ACT_LAN_ENABLE;
+ act |= ICE_SINGLE_ACT_LB_ENABLE;
+ } else {
+ act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_LB_ENABLE);
+ }
+
+ switch (rinfo->sw_act.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
+ ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ case ICE_FWD_TO_Q:
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ break;
+ case ICE_FWD_TO_QGRP:
+ q_rgn = rinfo->sw_act.qgrp_size > 0 ?
+ (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
+ ICE_SINGLE_ACT_Q_REGION_M;
+ break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ default:
+ status = ICE_ERR_CFG;
+ goto err_ice_add_adv_rule;
+ }
+
+ /* set the rule LOOKUP type based on caller specified 'Rx'
+ * instead of hardcoding it to be either LOOKUP_TX/RX
+ *
+ * for 'Rx' set the source to be the port number
+ * for 'Tx' set the source to be the source HW VSI number (determined
+ * by caller)
+ */
+ if (rinfo->rx) {
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->pdata.lkup_tx_rx.src =
+ cpu_to_le16(hw->port_info->lport);
+ } else {
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
+ }
+
+ s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
+ s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+
+ status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
+ pkt_len, pkt_offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+
+ status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
+ NULL);
+ if (status)
+ goto err_ice_add_adv_rule;
+ adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(struct ice_adv_fltr_mgmt_list_entry),
+ GFP_KERNEL);
+ if (!adv_fltr) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_adv_rule;
+ }
+
+ adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
+ lkups_cnt * sizeof(*lkups), GFP_KERNEL);
+ if (!adv_fltr->lkups) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_adv_rule;
+ }
+
+ adv_fltr->lkups_cnt = lkups_cnt;
+ adv_fltr->rule_info = *rinfo;
+ adv_fltr->rule_info.fltr_rule_id =
+ le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ sw = hw->switch_info;
+ sw->recp_list[rid].adv_rule = true;
+ rule_head = &sw->recp_list[rid].filt_rules;
+
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ adv_fltr->vsi_count = 1;
+
+ /* Add rule entry to book keeping list */
+ list_add(&adv_fltr->list_entry, rule_head);
+ if (added_entry) {
+ added_entry->rid = rid;
+ added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
+ added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
+ }
+err_ice_add_adv_rule:
+ if (status && adv_fltr) {
+ devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), adv_fltr);
+ }
+
+ kfree(s_rule);
+
+ return status;
+}
+
/**
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
@@ -2831,6 +5070,229 @@ end:
}
/**
+ * ice_adv_rem_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle of the VSI to remove
+ * @fm_list: filter management entry for which the VSI list management needs to
+ * be done
+ */
+static enum ice_status
+ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_adv_fltr_mgmt_list_entry *fm_list)
+{
+ struct ice_vsi_list_map_info *vsi_list_info;
+ enum ice_sw_lkup_type lkup_type;
+ enum ice_status status;
+ u16 vsi_list_id;
+
+ if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
+ fm_list->vsi_count == 0)
+ return ICE_ERR_PARAM;
+
+ /* A rule with the VSI being removed does not exist */
+ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ lkup_type = ICE_SW_LKUP_LAST;
+ vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ fm_list->vsi_count--;
+ clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
+ vsi_list_info = fm_list->vsi_list_info;
+ if (fm_list->vsi_count == 1) {
+ struct ice_fltr_info tmp_fltr;
+ u16 rem_vsi_handle;
+
+ rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (!ice_is_vsi_valid(hw, rem_vsi_handle))
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* Make sure VSI list is empty before removing it below */
+ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
+ vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ memset(&tmp_fltr, 0, sizeof(tmp_fltr));
+ tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
+ tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
+ fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
+
+ /* Update the previous switch rule of "MAC forward to VSI" to
+ * "MAC fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ tmp_fltr.fwd_id.hw_vsi_id, status);
+ return status;
+ }
+ fm_list->vsi_list_info->ref_cnt--;
+
+ /* Remove the VSI list since it is no longer used */
+ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
+ vsi_list_id, status);
+ return status;
+ }
+
+ list_del(&vsi_list_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
+ fm_list->vsi_list_info = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_adv_rule - removes existing advanced switch rule
+ * @hw: pointer to the hardware structure
+ * @lkups: information on the words that needs to be looked up. All words
+ * together makes one recipe
+ * @lkups_cnt: num of entries in the lkups array
+ * @rinfo: Its the pointer to the rule information for the rule
+ *
+ * This function can be used to remove 1 rule at a time. The lkups is
+ * used to describe all the words that forms the "lookup" portion of the
+ * rule. These words can span multiple protocols. Callers to this function
+ * need to pass in a list of protocol headers with lookup information along
+ * and mask that determines which words are valid from the given protocol
+ * header. rinfo describes other information related to this rule such as
+ * forwarding IDs, priority of this rule, etc.
+ */
+static enum ice_status
+ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_elem;
+ struct ice_prot_lkup_ext lkup_exts;
+ enum ice_status status = 0;
+ bool remove_rule = false;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 i, rid, vsi_handle;
+
+ memset(&lkup_exts, 0, sizeof(lkup_exts));
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 count;
+
+ if (lkups[i].type >= ICE_PROTOCOL_LAST)
+ return ICE_ERR_CFG;
+
+ count = ice_fill_valid_words(&lkups[i], &lkup_exts);
+ if (!count)
+ return ICE_ERR_CFG;
+ }
+
+ rid = ice_find_recp(hw, &lkup_exts);
+ /* If did not find a recipe that match the existing criteria */
+ if (rid == ICE_MAX_NUM_RECIPES)
+ return ICE_ERR_PARAM;
+
+ rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
+ list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
+ /* the rule is already removed */
+ if (!list_elem)
+ return 0;
+ mutex_lock(rule_lock);
+ if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
+ remove_rule = true;
+ } else if (list_elem->vsi_count > 1) {
+ remove_rule = false;
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ } else {
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ if (status) {
+ mutex_unlock(rule_lock);
+ return status;
+ }
+ if (list_elem->vsi_count == 0)
+ remove_rule = true;
+ }
+ mutex_unlock(rule_lock);
+ if (remove_rule) {
+ struct ice_aqc_sw_rules_elem *s_rule;
+ u16 rule_buf_sz;
+
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ s_rule->pdata.lkup_tx_rx.act = 0;
+ s_rule->pdata.lkup_tx_rx.index =
+ cpu_to_le16(list_elem->rule_info.fltr_rule_id);
+ s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+ if (!status || status == ICE_ERR_DOES_NOT_EXIST) {
+ struct ice_switch_info *sw = hw->switch_info;
+
+ mutex_lock(rule_lock);
+ list_del(&list_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
+ devm_kfree(ice_hw_to_dev(hw), list_elem);
+ mutex_unlock(rule_lock);
+ if (list_empty(&sw->recp_list[rid].filt_rules))
+ sw->recp_list[rid].adv_rule = false;
+ }
+ kfree(s_rule);
+ }
+ return status;
+}
+
+/**
+ * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
+ * @hw: pointer to the hardware structure
+ * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
+ *
+ * This function is used to remove 1 rule at a time. The removal is based on
+ * the remove_entry parameter. This function will remove rule for a given
+ * vsi_handle with a given rule_id which is passed as parameter in remove_entry
+ */
+enum ice_status
+ice_rem_adv_rule_by_id(struct ice_hw *hw,
+ struct ice_rule_query_data *remove_entry)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+ struct ice_adv_rule_info rinfo;
+ struct ice_switch_info *sw;
+
+ sw = hw->switch_info;
+ if (!sw->recp_list[remove_entry->rid].recp_created)
+ return ICE_ERR_PARAM;
+ list_head = &sw->recp_list[remove_entry->rid].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (list_itr->rule_info.fltr_rule_id ==
+ remove_entry->rule_id) {
+ rinfo = list_itr->rule_info;
+ rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
+ return ice_rem_adv_rule(hw, list_itr->lkups,
+ list_itr->lkups_cnt, &rinfo);
+ }
+ }
+ /* either list is empty or unable to find rule */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
* ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
@@ -2868,12 +5330,15 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
if (!sw)
return;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
struct list_head *l_head;
l_head = &sw->recp_list[i].filt_replay_rules;
- ice_rem_sw_rule_info(hw, l_head);
+ if (!sw->recp_list[i].adv_rule)
+ ice_rem_sw_rule_info(hw, l_head);
+ else
+ ice_rem_adv_rule_info(hw, l_head);
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index c5db8d56133f..c4dd2062c469 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,6 +14,9 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
+
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
@@ -122,30 +125,121 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_adv_lkup_elem {
+ enum ice_protocol_type type;
+ union ice_prot_hdr h_u; /* Header values */
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+};
+
+struct ice_sw_act_ctrl {
+ /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
+ u16 src;
+ u16 flag;
+ enum ice_sw_fwd_act_type fltr_act;
+ /* Depending on filter action */
+ union {
+ /* This is a queue ID in case of ICE_FWD_TO_Q and starting
+ * queue ID in case of ICE_FWD_TO_QGRP.
+ */
+ u16 q_id:11;
+ u16 vsi_id:10;
+ u16 hw_vsi_id:10;
+ u16 vsi_list_id:10;
+ } fwd_id;
+ /* software VSI handle */
+ u16 vsi_handle;
+ u8 qgrp_size;
+};
+
+struct ice_rule_query_data {
+ /* Recipe ID for which the requested rule was added */
+ u16 rid;
+ /* Rule ID that was added or is supposed to be removed */
+ u16 rule_id;
+ /* vsi_handle for which Rule was added or is supposed to be removed */
+ u16 vsi_handle;
+};
+
+/* This structure allows to pass info about lb_en and lan_en
+ * flags to ice_add_adv_rule. Values in act would be used
+ * only if act_valid was set to true, otherwise default
+ * values would be used.
+ */
+struct ice_adv_rule_flags_info {
+ u32 act;
+ u8 act_valid; /* indicate if flags in act are valid */
+};
+
+struct ice_adv_rule_info {
+ struct ice_sw_act_ctrl sw_act;
+ u32 priority;
+ u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
+ u16 fltr_rule_id;
+ struct ice_adv_rule_flags_info flags_info;
+};
+
+/* A collection of one or more four word recipe */
struct ice_sw_recipe {
- struct list_head l_entry;
+ /* For a chained recipe the root recipe is what should be used for
+ * programming rules
+ */
+ u8 is_root;
+ u8 root_rid;
+ u8 recp_created;
+
+ /* Number of extraction words */
+ u8 n_ext_words;
+ /* Protocol ID and Offset pair (extraction word) to describe the
+ * recipe
+ */
+ struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
+ u16 word_masks[ICE_MAX_CHAIN_WORDS];
+
+ /* if this recipe is a collection of other recipe */
+ u8 big_recp;
- /* To protect modification of filt_rule list
- * defined below
+ /* if this recipe is part of another bigger recipe then chain index
+ * corresponding to this recipe
*/
- struct mutex filt_rule_lock;
+ u8 chain_idx;
+
+ /* if this recipe is a collection of other recipe then count of other
+ * recipes and recipe IDs of those recipes
+ */
+ u8 n_grp_count;
+
+ /* Bit map specifying the IDs associated with this group of recipe */
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
- /* List of type ice_fltr_mgmt_list_entry */
+ /* List of type ice_fltr_mgmt_list_entry or adv_rule */
+ u8 adv_rule;
struct list_head filt_rules;
struct list_head filt_replay_rules;
- /* linked list of type recipe_list_entry */
- struct list_head rg_list;
- /* linked list of type ice_sw_fv_list_entry*/
+ struct mutex filt_rule_lock; /* protect filter rule structure */
+
+ /* Profiles this recipe should be associated with */
struct list_head fv_list;
- struct ice_aqc_recipe_data_elem *r_buf;
- u8 recp_count;
- u8 root_rid;
- u8 num_profs;
- u8 *prof_ids;
- /* recipe bitmap: what all recipes makes this recipe */
- DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ /* Profiles this recipe is associated with */
+ u8 num_profs, *prof_ids;
+
+ /* Bit map for possible result indexes */
+ DECLARE_BITMAP(res_idxs, ICE_MAX_FV_WORDS);
+
+ /* This allows user to specify the recipe priority.
+ * For now, this becomes 'fwd_priority' when recipe
+ * is created, usually recipes can have 'fwd' and 'join'
+ * priority.
+ */
+ u8 priority;
+
+ struct list_head rg_list;
+
+ /* AQ buffer associated with this recipe */
+ struct ice_aqc_recipe_data_elem *root_buf;
+ /* This struct saves the fv_words for a given lookup */
+ struct ice_prot_lkup_ext lkup_exts;
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
@@ -183,6 +277,16 @@ struct ice_fltr_mgmt_list_entry {
u8 counter_index;
};
+struct ice_adv_fltr_mgmt_list_entry {
+ struct list_head list_entry;
+
+ struct ice_adv_lkup_elem *lkups;
+ struct ice_adv_rule_info rule_info;
+ u16 lkups_cnt;
+ struct ice_vsi_list_map_info *vsi_list_info;
+ u16 vsi_count;
+};
+
enum ice_promisc_flags {
ICE_PROMISC_UCAST_RX = 0x1,
ICE_PROMISC_UCAST_TX = 0x2,
@@ -218,6 +322,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
+enum ice_status
+ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
+ struct ice_rule_query_data *added_entry);
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
@@ -227,6 +335,8 @@ enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
+bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
+bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
@@ -245,10 +355,19 @@ enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
+enum ice_status
+ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_rem_adv_rule_by_id(struct ice_hw *hw,
+ struct ice_rule_query_data *remove_entry);
+
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
+enum ice_status
+ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
+ u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
new file mode 100644
index 000000000000..725caa160b13
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_tc_lib.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+
+/**
+ * ice_tc_count_lkups - determine lookup count for switch filter
+ * @flags: TC-flower flags
+ * @headers: Pointer to TC flower filter header structure
+ * @fltr: Pointer to outer TC filter structure
+ *
+ * Determine lookup count based on TC flower input for switch filter.
+ */
+static int
+ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int lkups_cnt = 0;
+
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
+ lkups_cnt++;
+
+ /* are MAC fields specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
+ lkups_cnt++;
+
+ /* is VLAN specified? */
+ if (flags & ICE_TC_FLWR_FIELD_VLAN)
+ lkups_cnt++;
+
+ /* are IPv[4|6] fields specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4))
+ lkups_cnt++;
+ else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_SRC_IPV6))
+ lkups_cnt++;
+
+ /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
+ ICE_TC_FLWR_FIELD_SRC_L4_PORT))
+ lkups_cnt++;
+
+ return lkups_cnt;
+}
+
+/**
+ * ice_tc_fill_rules - fill filter rules based on TC fltr
+ * @hw: pointer to HW structure
+ * @flags: tc flower field flags
+ * @tc_fltr: pointer to TC flower filter
+ * @list: list of advance rule elements
+ * @rule_info: pointer to information about rule
+ * @l4_proto: pointer to information such as L4 proto type
+ *
+ * Fill ice_adv_lkup_elem list based on TC flower flags and
+ * TC flower headers. This list should be used to add
+ * advance filter in hardware.
+ */
+static int
+ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
+ struct ice_tc_flower_fltr *tc_fltr,
+ struct ice_adv_lkup_elem *list,
+ struct ice_adv_rule_info *rule_info,
+ u16 *l4_proto)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
+ int i = 0;
+
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
+ list[i].type = ICE_ETYPE_OL;
+ list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
+ list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_SRC_MAC)) {
+ struct ice_tc_l2_hdr *l2_key, *l2_mask;
+
+ l2_key = &headers->l2_key;
+ l2_mask = &headers->l2_mask;
+
+ list[i].type = ICE_MAC_OFOS;
+ if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
+ ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
+ l2_key->dst_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
+ l2_mask->dst_mac);
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
+ ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
+ l2_key->src_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
+ l2_mask->src_mac);
+ }
+ i++;
+ }
+
+ /* copy VLAN info */
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].type = ICE_VLAN_OFOS;
+ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ i++;
+ }
+
+ /* copy L3 (IPv[4|6]: src, dest) address */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_SRC_IPV4)) {
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ list[i].type = ICE_IPV4_OFOS;
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+ if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
+ list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
+ list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
+ list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
+ list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
+ }
+ i++;
+ } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_SRC_IPV6)) {
+ struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ list[i].type = ICE_IPV6_OFOS;
+ ipv6_hdr = &list[i].h_u.ipv6_hdr;
+ ipv6_mask = &list[i].m_u.ipv6_hdr;
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+
+ if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
+ memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
+ sizeof(l3_key->dst_ipv6_addr));
+ memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
+ sizeof(l3_mask->dst_ipv6_addr));
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
+ memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
+ sizeof(l3_key->src_ipv6_addr));
+ memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
+ sizeof(l3_mask->src_ipv6_addr));
+ }
+ i++;
+ }
+
+ /* copy L4 (src, dest) port */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
+ ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
+ struct ice_tc_l4_hdr *l4_key, *l4_mask;
+
+ l4_key = &headers->l4_key;
+ l4_mask = &headers->l4_mask;
+ if (headers->l3_key.ip_proto == IPPROTO_TCP) {
+ list[i].type = ICE_TCP_IL;
+ /* detected L4 proto is TCP */
+ if (l4_proto)
+ *l4_proto = IPPROTO_TCP;
+ } else if (headers->l3_key.ip_proto == IPPROTO_UDP) {
+ list[i].type = ICE_UDP_ILOS;
+ /* detected L4 proto is UDP */
+ if (l4_proto)
+ *l4_proto = IPPROTO_UDP;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
+ list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
+ list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
+ list[i].h_u.l4_hdr.src_port = l4_key->src_port;
+ list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
+ }
+ i++;
+ }
+
+ return i;
+}
+
+static int
+ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ struct ice_repr *repr;
+
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+ break;
+
+ case FLOW_ACTION_REDIRECT:
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+
+ if (ice_is_port_repr_netdev(act->dev)) {
+ repr = ice_netdev_to_repr(act->dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else if (netif_is_ice(act->dev)) {
+ struct ice_netdev_priv *np = netdev_priv(act->dev);
+
+ fltr->dest_vsi = np->vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ break;
+
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data rule_added;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_adv_lkup_elem *list;
+ u32 flags = fltr->flags;
+ enum ice_status status;
+ int lkups_cnt;
+ int ret = 0;
+ int i;
+
+ if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
+ return -EOPNOTSUPP;
+ }
+
+ lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
+ if (i != lkups_cnt) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = fltr->action.fltr_act;
+ if (fltr->action.fltr_act != ICE_DROP_PACKET)
+ rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
+ /* For now, making priority to be highest, and it also becomes
+ * the priority for recipe which will get created as a result of
+ * new extraction sequence based on input set.
+ * Priority '7' is max val for switch recipe, higher the number
+ * results into order of switch rule evaluation.
+ */
+ rule_info.priority = 7;
+
+ if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.rx = false;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ rule_info.flags_info.act_valid = true;
+ }
+
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = fltr->cookie;
+
+ status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
+ ret = -EINVAL;
+ goto exit;
+ } else if (status) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* store the output params, which are needed later for removing
+ * advanced switch filter
+ */
+ fltr->rid = rule_added.rid;
+ fltr->rule_id = rule_added.rule_id;
+
+exit:
+ kfree(list);
+ return ret;
+}
+
+/**
+ * ice_add_tc_flower_adv_fltr - add appropriate filter rules
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to TC flower filter structure
+ *
+ * based on filter parameters using Advance recipes supported
+ * by OS package.
+ */
+static int
+ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
+ struct ice_adv_rule_info rule_info = {0};
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_lkup_elem *list;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 flags = tc_fltr->flags;
+ struct ice_vsi *ch_vsi;
+ struct device *dev;
+ u16 lkups_cnt = 0;
+ u16 l4_proto = 0;
+ int ret = 0;
+ u16 i = 0;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
+ return -EOPNOTSUPP;
+ }
+
+ /* get the channel (aka ADQ VSI) */
+ if (tc_fltr->dest_vsi)
+ ch_vsi = tc_fltr->dest_vsi;
+ else
+ ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
+
+ lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
+ if (i != lkups_cnt) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
+ if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
+ if (!ch_vsi) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.priority = 7;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
+ tc_fltr->action.tc_class,
+ rule_info.sw_act.vsi_handle, lkups_cnt);
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.rx = false;
+ }
+
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = tc_fltr->cookie;
+
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (ret == -EEXIST) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter because it already exist");
+ ret = -EINVAL;
+ goto exit;
+ } else if (ret) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter due to error");
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* store the output params, which are needed later for removing
+ * advanced switch filter
+ */
+ tc_fltr->rid = rule_added.rid;
+ tc_fltr->rule_id = rule_added.rule_id;
+ if (tc_fltr->action.tc_class > 0 && ch_vsi) {
+ /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
+ * for PF ADQ filter, it is not yet set in tc_fltr,
+ * hence store the dest_vsi ptr in tc_fltr
+ */
+ if (ch_vsi->type == ICE_VSI_CHNL)
+ tc_fltr->dest_vsi = ch_vsi;
+ /* keep track of advanced switch filter for
+ * destination VSI (channel VSI)
+ */
+ ch_vsi->num_chnl_fltr++;
+ /* in this case, dest_id is VSI handle (sw handle) */
+ tc_fltr->dest_id = rule_added.vsi_handle;
+
+ /* keeps track of channel filters for PF VSI */
+ if (vsi->type == ICE_VSI_PF &&
+ (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
+ pf->num_dmac_chnl_fltrs++;
+ }
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
+ lkups_cnt, flags,
+ tc_fltr->action.tc_class, rule_added.rid,
+ rule_added.rule_id, rule_added.vsi_handle);
+exit:
+ kfree(list);
+ return ret;
+}
+
+/**
+ * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ */
+static int
+ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ if (match->key->dst) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
+ headers->l3_key.dst_ipv4 = match->key->dst;
+ headers->l3_mask.dst_ipv4 = match->mask->dst;
+ }
+ if (match->key->src) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
+ headers->l3_key.src_ipv4 = match->key->src;
+ headers->l3_mask.src_ipv4 = match->mask->src;
+ }
+ return 0;
+}
+
+/**
+ * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ */
+static int
+ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ /* src and dest IPV6 address should not be LOOPBACK
+ * (0:0:0:0:0:0:0:1), which can be represented as ::1
+ */
+ if (ipv6_addr_loopback(&match->key->dst) ||
+ ipv6_addr_loopback(&match->key->src)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
+ return -EINVAL;
+ }
+ /* if src/dest IPv6 address is *,* error */
+ if (ipv6_addr_any(&match->mask->dst) &&
+ ipv6_addr_any(&match->mask->src)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
+ return -EINVAL;
+ }
+ if (!ipv6_addr_any(&match->mask->dst))
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
+ if (!ipv6_addr_any(&match->mask->src))
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
+
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+
+ if (fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
+ memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
+ sizeof(match->key->src.s6_addr));
+ memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
+ sizeof(match->mask->src.s6_addr));
+ }
+ if (fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
+ memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
+ sizeof(match->key->dst.s6_addr));
+ memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
+ sizeof(match->mask->dst.s6_addr));
+ }
+
+ return 0;
+}
+
+/**
+ * ice_tc_set_port - Parse ports from TC flower filter
+ * @match: Flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ */
+static int
+ice_tc_set_port(struct flow_match_ports match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ if (match.key->dst) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+ headers->l4_key.dst_port = match.key->dst;
+ headers->l4_mask.dst_port = match.mask->dst;
+ }
+ if (match.key->src) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+ headers->l4_key.src_port = match.key->src;
+ headers->l4_mask.src_port = match.mask->src;
+ }
+ return 0;
+}
+
+/**
+ * ice_parse_cls_flower - Parse TC flower filters provided by kernel
+ * @vsi: Pointer to the VSI
+ * @filter_dev: Pointer to device on which filter is being added
+ * @f: Pointer to struct flow_cls_offload
+ * @fltr: Pointer to filter structure
+ */
+static int
+ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ struct flow_cls_offload *f,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
+ struct flow_dissector *dissector;
+
+ dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ } else {
+ fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+ }
+
+ headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
+ headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
+ headers->l3_key.ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.key->dst)) {
+ ether_addr_copy(headers->l2_key.dst_mac,
+ match.key->dst);
+ ether_addr_copy(headers->l2_mask.dst_mac,
+ match.mask->dst);
+ fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+ }
+
+ if (!is_zero_ether_addr(match.key->src)) {
+ ether_addr_copy(headers->l2_key.src_mac,
+ match.key->src);
+ ether_addr_copy(headers->l2_mask.src_mac,
+ match.mask->src);
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
+ is_vlan_dev(filter_dev)) {
+ struct flow_dissector_key_vlan mask;
+ struct flow_dissector_key_vlan key;
+ struct flow_match_vlan match;
+
+ if (is_vlan_dev(filter_dev)) {
+ match.key = &key;
+ match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
+ match.key->vlan_priority = 0;
+ match.mask = &mask;
+ memset(match.mask, 0xff, sizeof(*match.mask));
+ match.mask->vlan_priority = 0;
+ } else {
+ flow_rule_match_vlan(rule, &match);
+ }
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
+ return -EINVAL;
+ }
+ }
+
+ headers->vlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
+ if (match.mask->vlan_priority)
+ headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (ice_tc_set_ipv4(&match, fltr, headers))
+ return -EINVAL;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if (ice_tc_set_ipv6(&match, fltr, headers))
+ return -EINVAL;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if (ice_tc_set_port(match, fltr, headers))
+ return -EINVAL;
+ switch (headers->l3_key.ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_add_switch_fltr - Add TC flower filters
+ * @vsi: Pointer to VSI
+ * @fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * Add filter in HW switch block
+ */
+static int
+ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
+ return -EOPNOTSUPP;
+
+ if (ice_is_eswitch_mode_switchdev(vsi->back))
+ return ice_eswitch_add_tc_fltr(vsi, fltr);
+
+ return ice_add_tc_flower_adv_fltr(vsi, fltr);
+}
+
+/**
+ * ice_handle_tclass_action - Support directing to a traffic class
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Support directing traffic to a traffic class
+ */
+static int
+ice_handle_tclass_action(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+ struct ice_vsi *main_vsi;
+
+ if (tc < 0) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
+ return -EINVAL;
+ }
+ if (!tc) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
+ if (!(vsi->all_enatc & BIT(tc))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
+ return -EINVAL;
+ }
+
+ /* Redirect to a TC class or Queue Group */
+ main_vsi = ice_get_main_vsi(vsi->back);
+ if (!main_vsi || !main_vsi->netdev) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of invalid netdevice");
+ return -EINVAL;
+ }
+
+ if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
+ (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_SRC_MAC))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
+ return -EOPNOTSUPP;
+ }
+
+ /* For ADQ, filter must include dest MAC address, otherwise unwanted
+ * packets with unrelated MAC address get delivered to ADQ VSIs as long
+ * as remaining filter criteria is satisfied such as dest IP address
+ * and dest/src L4 port. Following code is trying to handle:
+ * 1. For non-tunnel, if user specify MAC addresses, use them (means
+ * this code won't do anything
+ * 2. For non-tunnel, if user didn't specify MAC address, add implicit
+ * dest MAC to be lower netdev's active unicast MAC address
+ */
+ if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
+ ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
+ main_vsi->netdev->dev_addr);
+ eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
+ fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+ }
+
+ /* validate specified dest MAC address, make sure either it belongs to
+ * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
+ * unicast MAC filter destined to main VSI.
+ */
+ if (!ice_mac_fltr_exist(&main_vsi->back->hw,
+ fltr->outer_headers.l2_key.dst_mac,
+ main_vsi->idx)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
+ return -EINVAL;
+ }
+
+ /* Make sure VLAN is already added to main VSI, before allowing ADQ to
+ * add a VLAN based filter such as MAC + VLAN + L4 port.
+ */
+ if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
+ u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
+
+ if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
+ main_vsi->idx)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
+ return -EINVAL;
+ }
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ fltr->action.tc_class = tc;
+
+ return 0;
+}
+
+/**
+ * ice_parse_tc_flower_actions - Parse the actions for a TC filter
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Parse the actions for a TC filter
+ */
+static int
+ice_parse_tc_flower_actions(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct flow_action *flow_action = &rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ if (cls_flower->classid)
+ return ice_handle_tclass_action(vsi, cls_flower, fltr);
+
+ if (!flow_action_has_entries(flow_action))
+ return -EINVAL;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (ice_is_eswitch_mode_switchdev(vsi->back)) {
+ int err = ice_eswitch_tc_parse_action(fltr, act);
+
+ if (err)
+ return err;
+ continue;
+ }
+ /* Allow only one rule per filter */
+
+ /* Drop action */
+ if (act->id == FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
+ return -EINVAL;
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ }
+ return 0;
+}
+
+/**
+ * ice_del_tc_fltr - deletes a filter from HW table
+ * @vsi: Pointer to VSI
+ * @fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * This function deletes a filter from HW table and manages book-keeping
+ */
+static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_rule_query_data rule_rem;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ rule_rem.rid = fltr->rid;
+ rule_rem.rule_id = fltr->rule_id;
+ rule_rem.vsi_handle = fltr->dest_id;
+ err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
+ if (err) {
+ if (err == ICE_ERR_DOES_NOT_EXIST) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
+ return -ENOENT;
+ }
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
+ return -EIO;
+ }
+
+ /* update advanced switch filter count for destination
+ * VSI if filter destination was VSI
+ */
+ if (fltr->dest_vsi) {
+ if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
+ fltr->dest_vsi->num_chnl_fltr--;
+
+ /* keeps track of channel filters for PF VSI */
+ if (vsi->type == ICE_VSI_PF &&
+ (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
+ pf->num_dmac_chnl_fltrs--;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_add_tc_fltr - adds a TC flower filter
+ * @netdev: Pointer to netdev
+ * @vsi: Pointer to VSI
+ * @f: Pointer to flower offload structure
+ * @__fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * This function parses TC-flower input fields, parses action,
+ * and adds a filter.
+ */
+static int
+ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *f,
+ struct ice_tc_flower_fltr **__fltr)
+{
+ struct ice_tc_flower_fltr *fltr;
+ int err;
+
+ /* by default, set output to be INVALID */
+ *__fltr = NULL;
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ fltr->cookie = f->cookie;
+ fltr->extack = f->common.extack;
+ fltr->src_vsi = vsi;
+ INIT_HLIST_NODE(&fltr->tc_flower_node);
+
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+ err = ice_parse_tc_flower_actions(vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+ err = ice_add_switch_fltr(vsi, fltr);
+ if (err < 0)
+ goto err;
+
+ /* return the newly created filter */
+ *__fltr = fltr;
+
+ return 0;
+err:
+ kfree(fltr);
+ return err;
+}
+
+/**
+ * ice_find_tc_flower_fltr - Find the TC flower filter in the list
+ * @pf: Pointer to PF
+ * @cookie: filter specific cookie
+ */
+static struct ice_tc_flower_fltr *
+ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (cookie == fltr->cookie)
+ return fltr;
+
+ return NULL;
+}
+
+/**
+ * ice_add_cls_flower - add TC flower filters
+ * @netdev: Pointer to filter device
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to flower offload structure
+ */
+int
+ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower)
+{
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct net_device *vsi_netdev = vsi->netdev;
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (ice_is_reset_in_progress(pf->state))
+ return -EBUSY;
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ return -EINVAL;
+
+ if (ice_is_port_repr_netdev(netdev))
+ vsi_netdev = netdev;
+
+ if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
+ !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
+ /* Based on TC indirect notifications from kernel, all ice
+ * devices get an instance of rule from higher level device.
+ * Avoid triggering explicit error in this case.
+ */
+ if (netdev == vsi_netdev)
+ NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
+ return -EINVAL;
+ }
+
+ /* avoid duplicate entries, if exists - return error */
+ fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
+ if (fltr) {
+ NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
+ return -EEXIST;
+ }
+
+ /* prep and add TC-flower filter in HW */
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ if (err)
+ return err;
+
+ /* add filter into an ordered list */
+ hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
+ return 0;
+}
+
+/**
+ * ice_del_cls_flower - delete TC flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct flow_cls_offload
+ */
+int
+ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ /* find filter */
+ fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
+ if (!fltr) {
+ if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
+ hlist_empty(&pf->tc_flower_fltr_list))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
+ return -EINVAL;
+ }
+
+ fltr->extack = cls_flower->common.extack;
+ /* delete filter from HW */
+ err = ice_del_tc_fltr(vsi, fltr);
+ if (err)
+ return err;
+
+ /* delete filter from an ordered list */
+ hlist_del(&fltr->tc_flower_node);
+
+ /* free the filter node */
+ kfree(fltr);
+
+ return 0;
+}
+
+/**
+ * ice_replay_tc_fltrs - replay TC filters
+ * @pf: pointer to PF struct
+ */
+void ice_replay_tc_fltrs(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(fltr, node,
+ &pf->tc_flower_fltr_list,
+ tc_flower_node) {
+ fltr->extack = NULL;
+ ice_add_switch_fltr(fltr->src_vsi, fltr);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
new file mode 100644
index 000000000000..ee9b284fcc02
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_TC_LIB_H_
+#define _ICE_TC_LIB_H_
+
+#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0)
+#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1)
+#define ICE_TC_FLWR_FIELD_VLAN BIT(2)
+#define ICE_TC_FLWR_FIELD_DEST_IPV4 BIT(3)
+#define ICE_TC_FLWR_FIELD_SRC_IPV4 BIT(4)
+#define ICE_TC_FLWR_FIELD_DEST_IPV6 BIT(5)
+#define ICE_TC_FLWR_FIELD_SRC_IPV6 BIT(6)
+#define ICE_TC_FLWR_FIELD_DEST_L4_PORT BIT(7)
+#define ICE_TC_FLWR_FIELD_SRC_L4_PORT BIT(8)
+#define ICE_TC_FLWR_FIELD_TENANT_ID BIT(9)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 BIT(10)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 BIT(11)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 BIT(12)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 BIT(13)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT BIT(14)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
+#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
+#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
+
+struct ice_tc_flower_action {
+ u32 tc_class;
+ enum ice_sw_fwd_act_type fltr_act;
+};
+
+struct ice_tc_vlan_hdr {
+ __be16 vlan_id; /* Only last 12 bits valid */
+ u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+};
+
+struct ice_tc_l2_hdr {
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ __be16 n_proto; /* Ethernet Protocol */
+};
+
+struct ice_tc_l3_hdr {
+ u8 ip_proto; /* IPPROTO value */
+ union {
+ struct {
+ struct in_addr dst_ip;
+ struct in_addr src_ip;
+ } v4;
+ struct {
+ struct in6_addr dst_ip6;
+ struct in6_addr src_ip6;
+ } v6;
+ } ip;
+#define dst_ipv6 ip.v6.dst_ip6.s6_addr32
+#define dst_ipv6_addr ip.v6.dst_ip6.s6_addr
+#define src_ipv6 ip.v6.src_ip6.s6_addr32
+#define src_ipv6_addr ip.v6.src_ip6.s6_addr
+#define dst_ipv4 ip.v4.dst_ip.s_addr
+#define src_ipv4 ip.v4.src_ip.s_addr
+
+ u8 tos;
+ u8 ttl;
+};
+
+struct ice_tc_l4_hdr {
+ __be16 dst_port;
+ __be16 src_port;
+};
+
+struct ice_tc_flower_lyr_2_4_hdrs {
+ /* L2 layer fields with their mask */
+ struct ice_tc_l2_hdr l2_key;
+ struct ice_tc_l2_hdr l2_mask;
+ struct ice_tc_vlan_hdr vlan_hdr;
+ /* L3 (IPv4[6]) layer fields with their mask */
+ struct ice_tc_l3_hdr l3_key;
+ struct ice_tc_l3_hdr l3_mask;
+
+ /* L4 layer fields with their mask */
+ struct ice_tc_l4_hdr l4_key;
+ struct ice_tc_l4_hdr l4_mask;
+};
+
+enum ice_eswitch_fltr_direction {
+ ICE_ESWITCH_FLTR_INGRESS,
+ ICE_ESWITCH_FLTR_EGRESS,
+};
+
+struct ice_tc_flower_fltr {
+ struct hlist_node tc_flower_node;
+
+ /* cookie becomes filter_rule_id if rule is added successfully */
+ unsigned long cookie;
+
+ /* add_adv_rule returns information like recipe ID, rule_id. Store
+ * those values since they are needed to remove advanced rule
+ */
+ u16 rid;
+ u16 rule_id;
+ /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon
+ * destination type
+ */
+ u16 dest_id;
+ /* if dest_id is vsi_idx, then need to store destination VSI ptr */
+ struct ice_vsi *dest_vsi;
+ /* direction of fltr for eswitch use case */
+ enum ice_eswitch_fltr_direction direction;
+
+ /* Parsed TC flower configuration params */
+ struct ice_tc_flower_lyr_2_4_hdrs outer_headers;
+ struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
+ struct ice_vsi *src_vsi;
+ __be32 tenant_id;
+ u32 flags;
+ struct ice_tc_flower_action action;
+
+ /* cache ptr which is used wherever needed to communicate netlink
+ * messages
+ */
+ struct netlink_ext_ack *extack;
+};
+
+/**
+ * ice_is_chnl_fltr - is this a valid channel filter
+ * @f: Pointer to tc-flower filter
+ *
+ * Criteria to determine of given filter is valid channel filter
+ * or not is based on its "destination". If destination is hw_tc (aka tc_class)
+ * and it is non-zero, then it is valid channel (aka ADQ) filter
+ */
+static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)
+{
+ return !!f->action.tc_class;
+}
+
+/**
+ * ice_chnl_dmac_fltr_cnt - DMAC based CHNL filter count
+ * @pf: Pointer to PF
+ */
+static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
+{
+ return pf->num_dmac_chnl_fltrs;
+}
+
+int
+ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
+int
+ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+void ice_replay_tc_fltrs(struct ice_pf *pf);
+
+#endif /* _ICE_TC_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 9bc0b8fdfc77..cf685247c07a 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -64,15 +64,15 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template,
TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim)
- __string(devname, q_vector->rx.ring->netdev->name)),
+ __string(devname, q_vector->rx.rx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->rx.ring->netdev->name);),
+ __assign_str(devname, q_vector->rx.rx_ring->netdev->name);),
TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
- __entry->q_vector->rx.ring->q_index,
+ __entry->q_vector->rx.rx_ring->q_index,
__entry->dim->state,
__entry->dim->profile_ix,
__entry->dim->tune_state,
@@ -91,15 +91,15 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template,
TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim)
- __string(devname, q_vector->tx.ring->netdev->name)),
+ __string(devname, q_vector->tx.tx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->tx.ring->netdev->name);),
+ __assign_str(devname, q_vector->tx.tx_ring->netdev->name);),
TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
- __entry->q_vector->tx.ring->q_index,
+ __entry->q_vector->tx.tx_ring->q_index,
__entry->dim->state,
__entry->dim->profile_ix,
__entry->dim->tune_state,
@@ -115,7 +115,7 @@ DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work,
/* Events related to a vsi & ring */
DECLARE_EVENT_CLASS(ice_tx_template,
- TP_PROTO(struct ice_ring *ring, struct ice_tx_desc *desc,
+ TP_PROTO(struct ice_tx_ring *ring, struct ice_tx_desc *desc,
struct ice_tx_buf *buf),
TP_ARGS(ring, desc, buf),
@@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_tx_template, name, \
- TP_PROTO(struct ice_ring *ring, \
+ TP_PROTO(struct ice_tx_ring *ring, \
struct ice_tx_desc *desc, \
struct ice_tx_buf *buf), \
TP_ARGS(ring, desc, buf))
@@ -145,7 +145,7 @@ DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap);
DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop);
DECLARE_EVENT_CLASS(ice_rx_template,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc),
@@ -161,12 +161,12 @@ DECLARE_EVENT_CLASS(ice_rx_template,
__entry->ring, __entry->desc)
);
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc)
);
DECLARE_EVENT_CLASS(ice_rx_indicate_template,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -186,13 +186,13 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
);
DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb)
);
DECLARE_EVENT_CLASS(ice_xmit_template,
- TP_PROTO(struct ice_ring *ring, struct sk_buff *skb),
+ TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb),
TP_ARGS(ring, skb),
@@ -210,7 +210,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_xmit_template, name, \
- TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), \
+ TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb), \
TP_ARGS(ring, skb))
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 6ee8e0032d52..bc3ba19dc88f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -6,6 +6,7 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
#include <linux/bpf_trace.h>
+#include <net/dsfield.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -13,6 +14,7 @@
#include "ice_trace.h"
#include "ice_dcb_lib.h"
#include "ice_xsk.h"
+#include "ice_eswitch.h"
#define ICE_RX_HDR_SIZE 256
@@ -32,7 +34,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
struct ice_tx_buf *tx_buf, *first;
struct ice_fltr_desc *f_desc;
struct ice_tx_desc *tx_desc;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
struct device *dev;
dma_addr_t dma;
u32 td_cmd;
@@ -106,7 +108,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
* @tx_buf: the buffer to free
*/
static void
-ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
+ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
@@ -133,7 +135,7 @@ ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
/* tx_buf must be completely set up in the transmit path */
}
-static struct netdev_queue *txring_txq(const struct ice_ring *ring)
+static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->q_index);
}
@@ -142,8 +144,9 @@ static struct netdev_queue *txring_txq(const struct ice_ring *ring)
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
-void ice_clean_tx_ring(struct ice_ring *tx_ring)
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
u16 i;
if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
@@ -162,8 +165,10 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
tx_skip_free:
memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
/* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ memset(tx_ring->desc, 0, size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -181,14 +186,18 @@ tx_skip_free:
*
* Free all transmit software resources
*/
-void ice_free_tx_ring(struct ice_ring *tx_ring)
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
+
ice_clean_tx_ring(tx_ring);
devm_kfree(tx_ring->dev, tx_ring->tx_buf);
tx_ring->tx_buf = NULL;
if (tx_ring->desc) {
- dmam_free_coherent(tx_ring->dev, tx_ring->size,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size,
tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -201,7 +210,7 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
+static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = ICE_DFLT_IRQ_WORK;
@@ -238,11 +247,8 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
- if (ice_ring_is_xdp(tx_ring))
- page_frag_free(tx_buf->raw_buf);
- else
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -298,9 +304,6 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
- if (ice_ring_is_xdp(tx_ring))
- return !!budget;
-
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
total_bytes);
@@ -329,9 +332,10 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
*
* Return 0 on success, negative on error
*/
-int ice_setup_tx_ring(struct ice_ring *tx_ring)
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -339,19 +343,19 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_buf);
tx_ring->tx_buf =
- devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
+ devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
GFP_KERNEL);
if (!tx_ring->tx_buf)
return -ENOMEM;
/* round up to nearest page */
- tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
- PAGE_SIZE);
- tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
- tx_ring->size);
+ size);
goto err;
}
@@ -370,9 +374,10 @@ err:
* ice_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
*/
-void ice_clean_rx_ring(struct ice_ring *rx_ring)
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
u16 i;
/* ring already cleared, nothing to do */
@@ -417,7 +422,9 @@ rx_skip_free:
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
/* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ memset(rx_ring->desc, 0, size);
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
@@ -430,8 +437,10 @@ rx_skip_free:
*
* Free all receive software resources
*/
-void ice_free_rx_ring(struct ice_ring *rx_ring)
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
{
+ u32 size;
+
ice_clean_rx_ring(rx_ring);
if (rx_ring->vsi->type == ICE_VSI_PF)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
@@ -441,7 +450,9 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
rx_ring->rx_buf = NULL;
if (rx_ring->desc) {
- dmam_free_coherent(rx_ring->dev, rx_ring->size,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(rx_ring->dev, size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -453,9 +464,10 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
*
* Return 0 on success, negative on error
*/
-int ice_setup_rx_ring(struct ice_ring *rx_ring)
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -463,19 +475,19 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
+ devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
/* round up to nearest page */
- rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
- PAGE_SIZE);
- rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
- rx_ring->size);
+ size);
goto err;
}
@@ -499,7 +511,7 @@ err:
}
static unsigned int
-ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
+ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
{
unsigned int truesize;
@@ -519,15 +531,15 @@ ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
-ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{
- struct ice_ring *xdp_ring;
- int err, result;
+ int err;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -535,11 +547,14 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
return ICE_XDP_PASS;
case XDP_TX:
- xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
- if (result == ICE_XDP_CONSUMED)
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_lock(&xdp_ring->tx_lock);
+ err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+ if (err == ICE_XDP_CONSUMED)
goto out_failure;
- return result;
+ return err;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (err)
@@ -576,7 +591,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct ice_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *xdp_ring;
+ struct ice_tx_ring *xdp_ring;
int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state))
@@ -588,7 +603,14 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- xdp_ring = vsi->xdp_rings[queue_index];
+ if (static_branch_unlikely(&ice_xdp_locking_key)) {
+ queue_index %= vsi->num_xdp_txq;
+ xdp_ring = vsi->xdp_rings[queue_index];
+ spin_lock(&xdp_ring->tx_lock);
+ } else {
+ xdp_ring = vsi->xdp_rings[queue_index];
+ }
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -602,6 +624,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+
return nxmit;
}
@@ -614,7 +639,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
* reused.
*/
static bool
-ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
+ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -665,7 +690,7 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -794,7 +819,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
* The function will then update the page offset.
*/
static void
-ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
@@ -820,7 +845,7 @@ ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* Synchronizes page for reuse by the adapter
*/
static void
-ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
+ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
@@ -851,7 +876,7 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* for use by the CPU.
*/
static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
+ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
@@ -888,7 +913,7 @@ ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *
-ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
u8 metasize = xdp->data - xdp->data_meta;
@@ -940,7 +965,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* skb correctly.
*/
static struct sk_buff *
-ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
@@ -1000,7 +1025,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* the associated resources.
*/
static void
-ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
@@ -1036,7 +1061,7 @@ ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
-ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
+ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
@@ -1060,11 +1085,12 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int offset = rx_ring->rx_offset;
+ struct ice_tx_ring *xdp_ring = NULL;
unsigned int xdp_res, xdp_xmit = 0;
struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
@@ -1077,6 +1103,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
#endif
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (xdp_prog)
+ xdp_ring = rx_ring->xdp_ring;
+
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -1140,11 +1170,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
goto construct_skb;
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
+ xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
@@ -1221,7 +1250,7 @@ construct_skb:
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
if (xdp_prog)
- ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
rx_ring->skb = skb;
ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
@@ -1230,6 +1259,41 @@ construct_skb:
return failure ? budget : (int)total_rx_pkts;
}
+static void __ice_update_sample(struct ice_q_vector *q_vector,
+ struct ice_ring_container *rc,
+ struct dim_sample *sample,
+ bool is_tx)
+{
+ u64 packets = 0, bytes = 0;
+
+ if (is_tx) {
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_tx_ring(tx_ring, *rc) {
+ packets += tx_ring->stats.pkts;
+ bytes += tx_ring->stats.bytes;
+ }
+ } else {
+ struct ice_rx_ring *rx_ring;
+
+ ice_for_each_rx_ring(rx_ring, *rc) {
+ packets += rx_ring->stats.pkts;
+ bytes += rx_ring->stats.bytes;
+ }
+ }
+
+ dim_update_sample(q_vector->total_events, packets, bytes, sample);
+ sample->comp_ctr = 0;
+
+ /* if dim settings get stale, like when not updated for 1
+ * second or longer, force it to start again. This addresses the
+ * frequent case of an idle queue being switched to by the
+ * scheduler. The 1,000 here means 1,000 milliseconds.
+ */
+ if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
+ rc->dim.state = DIM_START_MEASURE;
+}
+
/**
* ice_net_dim - Update net DIM algorithm
* @q_vector: the vector associated with the interrupt
@@ -1245,34 +1309,16 @@ static void ice_net_dim(struct ice_q_vector *q_vector)
struct ice_ring_container *rx = &q_vector->rx;
if (ITR_IS_DYNAMIC(tx)) {
- struct dim_sample dim_sample = {};
- u64 packets = 0, bytes = 0;
- struct ice_ring *ring;
-
- ice_for_each_ring(ring, q_vector->tx) {
- packets += ring->stats.pkts;
- bytes += ring->stats.bytes;
- }
-
- dim_update_sample(q_vector->total_events, packets, bytes,
- &dim_sample);
+ struct dim_sample dim_sample;
+ __ice_update_sample(q_vector, tx, &dim_sample, true);
net_dim(&tx->dim, dim_sample);
}
if (ITR_IS_DYNAMIC(rx)) {
- struct dim_sample dim_sample = {};
- u64 packets = 0, bytes = 0;
- struct ice_ring *ring;
-
- ice_for_each_ring(ring, q_vector->rx) {
- packets += ring->stats.pkts;
- bytes += ring->stats.bytes;
- }
-
- dim_update_sample(q_vector->total_events, packets, bytes,
- &dim_sample);
+ struct dim_sample dim_sample;
+ __ice_update_sample(q_vector, rx, &dim_sample, false);
net_dim(&rx->dim, dim_sample);
}
}
@@ -1299,15 +1345,14 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
}
/**
- * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
+ * ice_enable_interrupt - re-enable MSI-X interrupt
* @q_vector: the vector associated with the interrupt to enable
*
- * Update the net_dim() algorithm and re-enable the interrupt associated with
- * this vector.
- *
- * If the VSI is down, the interrupt will not be re-enabled.
+ * If the VSI is down, the interrupt will not be re-enabled. Also,
+ * when enabling the interrupt always reset the wb_on_itr to false
+ * and trigger a software interrupt to clean out internal state.
*/
-static void ice_update_ena_itr(struct ice_q_vector *q_vector)
+static void ice_enable_interrupt(struct ice_q_vector *q_vector)
{
struct ice_vsi *vsi = q_vector->vsi;
bool wb_en = q_vector->wb_on_itr;
@@ -1316,25 +1361,25 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
if (test_bit(ICE_DOWN, vsi->state))
return;
- /* When exiting WB_ON_ITR, let ITR resume its normal
- * interrupts-enabled path.
+ /* trigger an ITR delayed software interrupt when exiting busy poll, to
+ * make sure to catch any pending cleanups that might have been missed
+ * due to interrupt state transition. If busy poll or poll isn't
+ * enabled, then don't update ITR, and just enable the interrupt.
*/
- if (wb_en)
+ if (!wb_en) {
+ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ } else {
q_vector->wb_on_itr = false;
- /* This will do nothing if dynamic updates are not enabled. */
- ice_net_dim(q_vector);
-
- /* net_dim() updates ITR out-of-band using a work item */
- itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
- /* trigger an immediate software interrupt when exiting
- * busy poll, to make sure to catch any pending cleanups
- * that might have been missed due to interrupt state
- * transition.
- */
- if (wb_en) {
+ /* do two things here with a single write. Set up the third ITR
+ * index to be used for software interrupt moderation, and then
+ * trigger a software interrupt with a rate limit of 20K on
+ * software interrupts, this will help avoid high interrupt
+ * loads due to frequently polling and exiting polling.
+ */
+ itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_SW_ITR_INDX_M |
+ ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
}
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
@@ -1387,18 +1432,24 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
{
struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi);
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
bool clean_complete = true;
- struct ice_ring *ring;
int budget_per_ring;
int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_pool ?
- ice_clean_tx_irq_zc(ring, budget) :
- ice_clean_tx_irq(ring, budget);
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ bool wd;
+
+ if (tx_ring->xsk_pool)
+ wd = ice_clean_tx_irq_zc(tx_ring, budget);
+ else if (ice_ring_is_xdp(tx_ring))
+ wd = true;
+ else
+ wd = ice_clean_tx_irq(tx_ring, budget);
if (!wd)
clean_complete = false;
@@ -1419,16 +1470,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
- ice_for_each_ring(ring, q_vector->rx) {
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
int cleaned;
/* A dedicated path for zero-copy allows making a single
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_pool ?
- ice_clean_rx_irq_zc(ring, budget_per_ring) :
- ice_clean_rx_irq(ring, budget_per_ring);
+ cleaned = rx_ring->xsk_pool ?
+ ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
@@ -1447,10 +1498,12 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done)))
- ice_update_ena_itr(q_vector);
- else
+ if (likely(napi_complete_done(napi, work_done))) {
+ ice_net_dim(q_vector);
+ ice_enable_interrupt(q_vector);
+ } else {
ice_set_wb_on_itr(q_vector);
+ }
return min_t(int, work_done, budget - 1);
}
@@ -1462,7 +1515,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*
* Returns -EBUSY if a stop is needed, else 0
*/
-static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
/* Memory barrier before checking head and tail */
@@ -1485,7 +1538,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
*
* Returns 0 if stop is not needed
*/
-static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -1504,7 +1557,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
* it and the length into the transmit descriptor.
*/
static void
-ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
+ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
struct ice_tx_offload_params *off)
{
u64 td_offset, td_tag, td_cmd;
@@ -1840,7 +1893,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* related to VLAN tagging for the HW, such as VLAN, DCB, etc.
*/
static void
-ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
+ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
@@ -2146,7 +2199,7 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
* @off: Tx offload parameters
*/
static void
-ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
+ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
s8 idx;
@@ -2181,7 +2234,7 @@ ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t
-ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
{
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
@@ -2245,6 +2298,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
+ if (ice_is_switchdev_running(vsi->back))
+ ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
@@ -2282,7 +2337,7 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
tx_ring = vsi->tx_rings[skb->queue_mapping];
@@ -2296,10 +2351,43 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
/**
+ * ice_get_dscp_up - return the UP/TC value for a SKB
+ * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
+ * @skb: SKB to query for info to determine UP/TC
+ *
+ * This function is to only be called when the PF is in L3 DSCP PFC mode
+ */
+static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
+{
+ u8 dscp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return dcbcfg->dscp_map[dscp];
+}
+
+u16
+ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *dcbcfg;
+
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
+ skb->priority = ice_get_dscp_up(dcbcfg, skb);
+
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
+/**
* ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
* @tx_ring: tx_ring to clean
*/
-void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
{
struct ice_vsi *vsi = tx_ring->vsi;
s16 i = tx_ring->next_to_clean;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 1e46e80f3d6f..c56dd1749903 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -13,6 +13,7 @@
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
+#define ICE_TX_THRESH 32
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -154,7 +155,7 @@ struct ice_tx_buf {
struct ice_tx_offload_params {
u64 cd_qw1;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
u32 td_cmd;
u32 td_offset;
u32 td_l2tag1;
@@ -164,17 +165,10 @@ struct ice_tx_offload_params {
};
struct ice_rx_buf {
- union {
- struct {
- dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- u16 pagecnt_bias;
- };
- struct {
- struct xdp_buff *xdp;
- };
- };
+ dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
};
struct ice_q_stats {
@@ -258,9 +252,9 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
-struct ice_ring {
+struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
- struct ice_ring *next; /* pointer to next ring in q_vector */
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -268,14 +262,13 @@ struct ice_ring {
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
union {
- struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf;
+ struct xdp_buff **xdp_buf;
};
/* CL2 - 2nd cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
+ /* CL3 - 3rd cacheline starts here */
u16 q_index; /* Queue number of ring */
- u16 q_handle; /* Queue handle per TC */
-
- u8 ring_active:1; /* is ring online or not */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -284,63 +277,104 @@ struct ice_ring {
u16 next_to_use;
u16 next_to_clean;
u16 next_to_alloc;
+ u16 rx_offset;
+ u16 rx_buf_len;
/* stats structs */
+ struct ice_rxq_stats rx_stats;
struct ice_q_stats stats;
struct u64_stats_sync syncp;
- union {
- struct ice_txq_stats tx_stats;
- struct ice_rxq_stats rx_stats;
- };
struct rcu_head rcu; /* to avoid race on free */
- DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ /* CL4 - 3rd cacheline starts here */
+ struct ice_channel *ch;
struct bpf_prog *xdp_prog;
+ struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool;
- u16 rx_offset;
- /* CL3 - 3rd cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
struct sk_buff *skb;
- /* CLX - the below items are only accessed infrequently and should be
- * in their own cache line if possible
- */
-#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ dma_addr_t dma; /* physical address of ring */
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+ u64 cached_phctime;
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 ptp_rx;
u8 flags;
+} ____cacheline_internodealigned_in_smp;
+
+struct ice_tx_ring {
+ /* CL1 - 1st cacheline starts here */
+ struct ice_tx_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ u8 __iomem *tail;
+ struct ice_tx_buf *tx_buf;
+ struct ice_q_vector *q_vector; /* Backreference to associated vector */
+ struct net_device *netdev; /* netdev ring maps to */
+ struct ice_vsi *vsi; /* Backreference to associated VSI */
+ /* CL2 - 2nd cacheline starts here */
dma_addr_t dma; /* physical address of ring */
- unsigned int size; /* length of descriptor ring in bytes */
+ struct xsk_buff_pool *xsk_pool;
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_rs;
+ u16 next_dd;
+ u16 q_handle; /* Queue handle per TC */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 count; /* Number of descriptors */
+ u16 q_index; /* Queue number of ring */
+ /* stats structs */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
+ struct ice_txq_stats tx_stats;
+
+ /* CL3 - 3rd cacheline starts here */
+ struct rcu_head rcu; /* to avoid race on free */
+ DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ struct ice_channel *ch;
+ struct ice_ptp_tx *tx_tstamps;
+ spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
- u16 rx_buf_len;
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- struct ice_ptp_tx *tx_tstamps;
- u64 cached_phctime;
- u8 ptp_rx:1;
- u8 ptp_tx:1;
+ u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
-static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
{
return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
}
-static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline bool ice_ring_is_xdp(struct ice_ring *ring)
+static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
+{
+ return !!ring->ch;
+}
+
+static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
{
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
}
+enum ice_container_type {
+ ICE_RX_CONTAINER,
+ ICE_TX_CONTAINER,
+};
+
struct ice_ring_container {
/* head of linked-list of rings */
- struct ice_ring *ring;
+ union {
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+ };
struct dim dim; /* data for net_dim algorithm */
u16 itr_idx; /* index in the interrupt vector */
/* this matches the maximum number of ITR bits, but in usec
@@ -349,6 +383,7 @@ struct ice_ring_container {
u16 itr_setting:13;
u16 itr_reserved:2;
u16 itr_mode:1;
+ enum ice_container_type type;
};
struct ice_coalesce_stored {
@@ -360,10 +395,13 @@ struct ice_coalesce_stored {
};
/* iterator for handling rings in ring container */
-#define ice_for_each_ring(pos, head) \
- for (pos = (head).ring; pos; pos = pos->next)
+#define ice_for_each_rx_ring(pos, head) \
+ for (pos = (head).rx_ring; pos; pos = pos->next)
+
+#define ice_for_each_tx_ring(pos, head) \
+ for (pos = (head).tx_ring; pos; pos = pos->next)
-static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
+static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring->rx_buf_len > (PAGE_SIZE / 2))
@@ -376,18 +414,21 @@ static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
union ice_32b_rx_flex_desc;
-bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void ice_clean_tx_ring(struct ice_ring *tx_ring);
-void ice_clean_rx_ring(struct ice_ring *rx_ring);
-int ice_setup_tx_ring(struct ice_ring *tx_ring);
-int ice_setup_rx_ring(struct ice_ring *rx_ring);
-void ice_free_tx_ring(struct ice_ring *tx_ring);
-void ice_free_rx_ring(struct ice_ring *rx_ring);
+u16
+ice_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);
-void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 171397dcf00a..1dd7e84f41f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -2,13 +2,15 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_txrx_lib.h"
+#include "ice_eswitch.h"
+#include "ice_lib.h"
/**
* ice_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
*/
-void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
+void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
{
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
@@ -66,7 +68,7 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
* @rx_ptype: the ptype value from the descriptor
*/
static void
-ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 rx_ptype)
{
struct ice_32b_rx_flex_desc_nic *nic_mdid;
@@ -93,7 +95,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
* skb->protocol must be set before this function is called
*/
static void
-ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
+ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
struct ice_rx_ptype_decoded decoded;
@@ -178,14 +180,15 @@ checksum_fail:
* other fields within the skb.
*/
void
-ice_process_skb_fields(struct ice_ring *rx_ring,
+ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype)
{
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev
+ (rx_ring, rx_desc));
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -203,7 +206,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring,
* gro receive functions (with/without VLAN tag)
*/
void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
@@ -212,18 +215,67 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
}
/**
+ * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
+ * @xdp_ring: XDP ring to clean
+ */
+static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
+{
+ unsigned int total_bytes = 0, total_pkts = 0;
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *next_dd_desc;
+ u16 next_dd = xdp_ring->next_dd;
+ struct ice_tx_buf *tx_buf;
+ int i;
+
+ next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
+ if (!(next_dd_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ return;
+
+ for (i = 0; i < ICE_TX_THRESH; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ total_bytes += tx_buf->bytecount;
+ /* normally tx_buf->gso_segs was taken but at this point
+ * it's always 1 for us
+ */
+ total_pkts++;
+
+ page_frag_free(tx_buf->raw_buf);
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ tx_buf->raw_buf = NULL;
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+
+ next_dd_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
+ if (xdp_ring->next_dd > xdp_ring->count)
+ xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_to_clean = ntc;
+ ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
+}
+
+/**
* ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
* @data: packet data pointer
* @size: packet data size
* @xdp_ring: XDP ring for transmission
*/
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
{
u16 i = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
dma_addr_t dma;
+ if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
+ ice_clean_xdp_irq(xdp_ring);
+
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
xdp_ring->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED;
@@ -244,21 +296,26 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
size, 0);
- /* Make certain all of the status bits have been updated
- * before next_to_watch is written.
- */
- smp_wmb();
-
i++;
- if (i == xdp_ring->count)
+ if (i == xdp_ring->count) {
i = 0;
-
- tx_buf->next_to_watch = tx_desc;
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs = ICE_TX_THRESH - 1;
+ }
xdp_ring->next_to_use = i;
+ if (i > xdp_ring->next_rs) {
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs += ICE_TX_THRESH;
+ }
+
return ICE_XDP_TX;
}
@@ -269,7 +326,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
*
* Returns negative on failure, 0 on success.
*/
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
@@ -281,22 +338,23 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
/**
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
- * @rx_ring: Rx ring
+ * @xdp_ring: XDP ring
* @xdp_res: Result of the receive batch
*
* This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the
* napi loop.
*/
-void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
{
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map();
if (xdp_res & ICE_XDP_TX) {
- struct ice_ring *xdp_ring =
- rx_ring->vsi->xdp_rings[rx_ring->q_index];
-
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_lock(&xdp_ring->tx_lock);
ice_xdp_ring_update_tail(xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 05ac30752902..11b6c1601986 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -37,7 +37,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
*
* This function updates the XDP Tx ring tail register.
*/
-static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
+static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
{
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
@@ -46,14 +46,14 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}
-void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
-void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res);
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring);
+void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
-ice_process_skb_fields(struct ice_ring *rx_ring,
+ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype);
void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index d33d1906103c..9e0c2923c62e 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -138,7 +138,9 @@ enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF = 1,
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
+ ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
+ ICE_VSI_SWITCHDEV_CTRL = 7,
};
struct ice_link_status {
@@ -569,6 +571,8 @@ struct ice_sched_vsi_info {
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
+ /* bw_t_info saves VSI BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
};
/* driver defines the policy */
@@ -604,7 +608,8 @@ struct ice_dcb_app_priority_table {
};
#define ICE_MAX_USER_PRIORITY 8
-#define ICE_DCBX_MAX_APPS 32
+#define ICE_DCBX_MAX_APPS 64
+#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -622,7 +627,14 @@ struct ice_dcbx_cfg {
struct ice_dcb_ets_cfg etscfg;
struct ice_dcb_ets_cfg etsrec;
struct ice_dcb_pfc_cfg pfc;
+#define ICE_QOS_MODE_VLAN 0x0
+#define ICE_QOS_MODE_DSCP 0x1
+ u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
+ /* when DSCP mapping defined by user set its bit to 1 */
+ DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
+ u8 dscp_map[ICE_DSCP_NUM_VAL];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -668,6 +680,10 @@ struct ice_port_info {
struct ice_switch_info {
struct list_head vsi_list_map_head;
struct ice_sw_recipe *recp_list;
+ u16 prof_res_bm_init;
+ u16 max_used_prof_index;
+
+ DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
/* FW logging configuration */
@@ -903,6 +919,7 @@ struct ice_hw {
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
+ u16 io_expander_handle;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index e93430ab37f1..a42eaf6f942e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -5,7 +5,9 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
+#include "ice_dcb_lib.h"
#include "ice_flow.h"
+#include "ice_eswitch.h"
#include "ice_virtchnl_allowlist.h"
#define FIELD_SELECTOR(proto_hdr_field) \
@@ -251,7 +253,7 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
* ice_get_vf_vsi - get VF's VSI based on the stored index
* @vf: VF used to get VSI
*/
-static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
{
return vf->pf->vsi[vf->lan_vsi_idx];
}
@@ -412,7 +414,7 @@ static bool ice_is_vf_link_up(struct ice_vf *vf)
*
* send a link status message to a single VF
*/
-static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+void ice_vc_notify_vf_link_state(struct ice_vf *vf)
{
struct virtchnl_pf_event pfe = { 0 };
struct ice_hw *hw = &vf->pf->hw;
@@ -620,6 +622,8 @@ void ice_free_vfs(struct ice_pf *pf)
if (!pf->vf)
return;
+ ice_eswitch_release(pf);
+
while (test_and_set_bit(ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
@@ -828,7 +832,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
@@ -855,7 +859,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
ice_vf_ctrl_invalidate_vsi(vf);
@@ -882,6 +886,40 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
}
/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
* @vf: VF to add MAC filters for
*
@@ -932,6 +970,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
enum ice_status status;
u8 broadcast[ETH_ALEN];
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
eth_broadcast_addr(broadcast);
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
if (status) {
@@ -1414,6 +1455,11 @@ static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
if (ice_vf_rebuild_host_vlan_cfg(vf))
dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
/* rebuild aggregator node config for main VF VSI */
ice_vf_rebuild_aggregator_node_cfg(vsi);
}
@@ -1581,6 +1627,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vf_post_vsi_rebuild(vf);
}
+ if (ice_is_eswitch_mode_switchdev(pf))
+ if (ice_eswitch_rebuild(pf))
+ dev_warn(dev, "eswitch rebuild failed\n");
+
ice_flush(hw);
clear_bit(ICE_VF_DIS, pf->state);
@@ -1593,7 +1643,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
*
* Returns true if the PF or VF is disabled, false otherwise.
*/
-static bool ice_is_vf_disabled(struct ice_vf *vf)
+bool ice_is_vf_disabled(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
@@ -1711,6 +1761,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
}
ice_vf_post_vsi_rebuild(vf);
+ vsi = ice_get_vf_vsi(vf);
+ ice_eswitch_update_repr(vsi);
/* if the VF has been reset allow it to come up again */
if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
@@ -1894,6 +1946,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
*/
ice_vf_ctrl_invalidate_vsi(vf);
ice_vf_fdir_init(vf);
+
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
}
}
@@ -1960,6 +2014,11 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
}
clear_bit(ICE_VF_DIS, pf->state);
+
+ ret = ice_eswitch_configure(pf);
+ if (ret)
+ goto err_unroll_sriov;
+
return 0;
err_unroll_sriov:
@@ -2823,7 +2882,7 @@ static void ice_wait_on_vf_reset(struct ice_vf *vf)
* disabled, and initialized so it can be configured and/or queried by a host
* administrator.
*/
-static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
{
struct ice_pf *pf;
@@ -3329,7 +3388,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- struct ice_ring *ring = vsi->tx_rings[vf_q_id];
+ struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
@@ -3802,6 +3861,26 @@ static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
}
/**
+ * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to check
+ *
+ * only update cached hardware MAC for legacy VF drivers on delete
+ * because we cannot guarantee order/type of MAC from the VF driver
+ */
+static void
+ice_update_legacy_cached_mac(struct ice_vf *vf,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
+ ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
+ return;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
+}
+
+/**
* ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
* @vf: VF to update
* @vc_ether_addr: structure from VIRTCHNL with MAC to delete
@@ -3822,16 +3901,7 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
*/
eth_zero_addr(vf->dev_lan_addr.addr);
- /* only update cached hardware MAC for legacy VF drivers on delete
- * because we cannot guarantee order/type of MAC from the VF driver
- */
- if (ice_is_vc_addr_legacy(vc_ether_addr) &&
- !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) {
- ether_addr_copy(vf->dev_lan_addr.addr,
- vf->legacy_last_added_umac.addr);
- ether_addr_copy(vf->hw_lan_addr.addr,
- vf->legacy_last_added_umac.addr);
- }
+ ice_update_legacy_cached_mac(vf, vc_ether_addr);
}
/**
@@ -4400,6 +4470,133 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
return ice_vsi_manage_vlan_stripping(vsi, false);
}
+static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
+ .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+};
+
+void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
+{
+ *ops = ice_vc_vf_dflt_ops;
+}
+
+static int
+ice_vc_repr_no_action_msg(struct ice_vf __always_unused *vf,
+ u8 __always_unused *msg)
+{
+ return 0;
+}
+
+/**
+ * ice_vc_repr_add_mac
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * When port representors are created, we do not add MAC rule
+ * to firmware, we store it so that PF could report same
+ * MAC as VF.
+ */
+static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ pf = vf->pf;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *mac_addr = al->list[i].addr;
+
+ if (!is_unicast_ether_addr(mac_addr) ||
+ ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
+ continue;
+
+ if (vf->pf_set_mac) {
+ dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto handle_mac_exit;
+ }
+
+ ice_vfhw_mac_add(vf, &al->list[i]);
+ vf->num_mac++;
+ break;
+ }
+
+handle_mac_exit:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_repr_del_mac - response with success for deleting MAC
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * Respond with success to not break normal VF flow.
+ * For legacy VF driver try to update cached MAC address.
+ */
+static int
+ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ ice_update_legacy_cached_mac(vf, &al->list[0]);
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_no_action(struct ice_vf __always_unused *vf)
+{
+ return 0;
+}
+
+void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
+{
+ ops->add_mac_addr_msg = ice_vc_repr_add_mac;
+ ops->del_mac_addr_msg = ice_vc_repr_del_mac;
+ ops->add_vlan_msg = ice_vc_repr_no_action_msg;
+ ops->remove_vlan_msg = ice_vc_repr_no_action_msg;
+ ops->ena_vlan_stripping = ice_vc_repr_no_action;
+ ops->dis_vlan_stripping = ice_vc_repr_no_action;
+ ops->cfg_promiscuous_mode_msg = ice_vc_repr_no_action_msg;
+}
+
/**
* ice_vc_process_vf_msg - Process request from VF
* @pf: pointer to the PF structure
@@ -4413,6 +4610,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
s16 vf_id = le16_to_cpu(event->desc.retval);
u16 msglen = event->msg_len;
+ struct ice_vc_vf_ops *ops;
u8 *msg = event->msg_buf;
struct ice_vf *vf = NULL;
struct device *dev;
@@ -4436,6 +4634,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
goto error_handler;
}
+ ops = &vf->vc_ops;
+
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
if (err) {
@@ -4463,75 +4663,75 @@ error_handler:
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
- err = ice_vc_get_ver_msg(vf, msg);
+ err = ops->get_ver_msg(vf, msg);
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
- err = ice_vc_get_vf_res_msg(vf, msg);
+ err = ops->get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf))
dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
- ice_vc_reset_vf_msg(vf);
+ ops->reset_vf(vf);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
- err = ice_vc_add_mac_addr_msg(vf, msg);
+ err = ops->add_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_ETH_ADDR:
- err = ice_vc_del_mac_addr_msg(vf, msg);
+ err = ops->del_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- err = ice_vc_cfg_qs_msg(vf, msg);
+ err = ops->cfg_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
- err = ice_vc_ena_qs_msg(vf, msg);
+ err = ops->ena_qs_msg(vf, msg);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_DISABLE_QUEUES:
- err = ice_vc_dis_qs_msg(vf, msg);
+ err = ops->dis_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
- err = ice_vc_request_qs_msg(vf, msg);
+ err = ops->request_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- err = ice_vc_cfg_irq_map_msg(vf, msg);
+ err = ops->cfg_irq_map_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- err = ice_vc_config_rss_key(vf, msg);
+ err = ops->config_rss_key(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- err = ice_vc_config_rss_lut(vf, msg);
+ err = ops->config_rss_lut(vf, msg);
break;
case VIRTCHNL_OP_GET_STATS:
- err = ice_vc_get_stats_msg(vf, msg);
+ err = ops->get_stats_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
+ err = ops->cfg_promiscuous_mode_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_VLAN:
- err = ice_vc_add_vlan_msg(vf, msg);
+ err = ops->add_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_VLAN:
- err = ice_vc_remove_vlan_msg(vf, msg);
+ err = ops->remove_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
- err = ice_vc_ena_vlan_stripping(vf);
+ err = ops->ena_vlan_stripping(vf);
break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
- err = ice_vc_dis_vlan_stripping(vf);
+ err = ops->dis_vlan_stripping(vf);
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER:
- err = ice_vc_add_fdir_fltr(vf, msg);
+ err = ops->add_fdir_fltr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_FDIR_FILTER:
- err = ice_vc_del_fdir_fltr(vf, msg);
+ err = ops->del_fdir_fltr_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_RSS_CFG:
- err = ice_vc_handle_rss_cfg(vf, msg, true);
+ err = ops->handle_rss_cfg_msg(vf, msg, true);
break;
case VIRTCHNL_OP_DEL_RSS_CFG:
- err = ice_vc_handle_rss_cfg(vf, msg, false);
+ err = ops->handle_rss_cfg_msg(vf, msg, false);
break;
case VIRTCHNL_OP_UNKNOWN:
default:
@@ -4588,8 +4788,8 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
else
ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- ivi->max_tx_rate = vf->tx_rate;
- ivi->min_tx_rate = 0;
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
return 0;
}
@@ -4699,6 +4899,11 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
struct ice_vf *vf;
int ret;
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ return -EOPNOTSUPP;
+ }
+
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
@@ -4763,6 +4968,122 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
}
/**
+ * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
+ * @pf: PF associated with VFs
+ */
+static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
+{
+ int rate = 0, i;
+
+ ice_for_each_vf(pf, i)
+ rate += pf->vf[i].min_tx_rate;
+
+ return rate;
+}
+
+/**
+ * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
+ * @vf: VF trying to configure min_tx_rate
+ * @min_tx_rate: min Tx rate in Mbps
+ *
+ * Check if the min_tx_rate being passed in will cause oversubscription of total
+ * min_tx_rate based on the current link speed and all other VFs configured
+ * min_tx_rate
+ *
+ * Return true if the passed min_tx_rate would cause oversubscription, else
+ * return false
+ */
+static bool
+ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
+{
+ int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
+ int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+
+ /* this VF's previous rate is being overwritten */
+ all_vfs_min_tx_rate -= vf->min_tx_rate;
+
+ if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
+ dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
+ min_tx_rate, vf->vf_id,
+ all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
+ link_speed_mbps);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_set_vf_bw - set min/max VF bandwidth
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate in Mbps
+ * @max_tx_rate: Maximum Tx rate in Mbps
+ */
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ /* when max_tx_rate is zero that means no max Tx rate limiting, so only
+ * check if max_tx_rate is non-zero
+ */
+ if (max_tx_rate && min_tx_rate > max_tx_rate) {
+ dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
+ min_tx_rate, max_tx_rate);
+ return -EINVAL;
+ }
+
+ if (min_tx_rate && ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate))
+ return -EINVAL;
+
+ if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
+ ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ vf->min_tx_rate = min_tx_rate;
+ }
+
+ if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
+ ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ vf->max_tx_rate = max_tx_rate;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_vf_stats - populate some stats for the VF
* @netdev: the netdev of the PF
* @vf_id: the host OS identifier (0-255)
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 842cb077df86..5ff93a08f54c 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -70,6 +70,32 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+struct ice_vf;
+
+struct ice_vc_vf_ops {
+ int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+ void (*reset_vf)(struct ice_vf *vf);
+ int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
+ int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping)(struct ice_vf *vf);
+ int (*dis_vlan_stripping)(struct ice_vf *vf);
+ int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
+ int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+};
+
/* VF information structure */
struct ice_vf {
struct ice_pf *pf;
@@ -99,7 +125,8 @@ struct ice_vf {
* the main LAN VSI for the PF.
*/
u16 lan_vsi_num; /* ID as used by firmware */
- unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
+ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
u64 num_inval_msgs; /* number of continuous invalid msgs */
@@ -111,9 +138,17 @@ struct ice_vf {
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
+
+ struct ice_repr *repr;
+
+ struct ice_vc_vf_ops vc_ops;
+
+ /* devlink port data */
+ struct devlink_port devlink_port;
};
#ifdef CONFIG_PCI_IOV
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
@@ -124,6 +159,9 @@ void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
+void ice_vc_notify_vf_link_state(struct ice_vf *vf);
+void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops);
+void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
@@ -135,10 +173,18 @@ int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto);
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+
+bool ice_is_vf_disabled(struct ice_vf *vf);
+
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
@@ -164,6 +210,9 @@ static inline
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
+static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { }
+static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { }
static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
@@ -171,6 +220,21 @@ static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ return true;
+}
+
+static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ return NULL;
+}
+
static inline bool
ice_is_malicious_vf(struct ice_pf __always_unused *pf,
struct ice_rq_event_info __always_unused *event,
@@ -245,6 +309,14 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
}
static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
struct ice_q_vector __always_unused *q_vector)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5a9f61deeb38..d9dfcfc2c6f9 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -67,7 +67,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @q_vector: queue vector
*/
static void
-ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector)
{
struct ice_pf *pf = vsi->back;
@@ -104,16 +104,17 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
u16 reg_idx = q_vector->reg_idx;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
ice_cfg_itr(hw, q_vector);
- ice_for_each_ring(ring, q_vector->tx)
- ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
q_vector->tx.itr_idx);
- ice_for_each_ring(ring, q_vector->rx)
- ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
q_vector->rx.itr_idx);
ice_flush(hw);
@@ -144,8 +145,9 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_txq_meta txq_meta = { };
- struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
int timeout = 50;
int err;
@@ -171,7 +173,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(&txq_meta, 0, sizeof(txq_meta));
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
@@ -201,8 +203,9 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
u16 size;
int err;
@@ -225,7 +228,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
goto free_buf;
if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(qg_buf, 0, size);
qg_buf->num_txqs = 1;
@@ -233,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
+ xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -360,56 +363,50 @@ xsk_pool_if_up:
*
* Returns true if all allocations were successful, false if any fail.
*/
-bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
- struct ice_rx_buf *rx_buf;
- bool ok = true;
+ struct xdp_buff **xdp;
+ u32 nb_buffs, i;
dma_addr_t dma;
- if (!count)
- return true;
-
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- rx_buf = &rx_ring->rx_buf[ntu];
+ xdp = &rx_ring->xdp_buf[ntu];
- do {
- rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
- if (!rx_buf->xdp) {
- ok = false;
- break;
- }
+ nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+ nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+ if (!nb_buffs)
+ return false;
- dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
+ i = nb_buffs;
+ while (i--) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
- rx_desc->wb.status_error0 = 0;
rx_desc++;
- rx_buf++;
- ntu++;
-
- if (unlikely(ntu == rx_ring->count)) {
- rx_desc = ICE_RX_DESC(rx_ring, 0);
- rx_buf = rx_ring->rx_buf;
- ntu = 0;
- }
- } while (--count);
+ xdp++;
+ }
- if (rx_ring->next_to_use != ntu) {
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.status_error0 = 0;
- ice_release_rx_desc(rx_ring, ntu);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->xdp_buf;
+ ntu = 0;
}
- return ok;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.status_error0 = 0;
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return count == nb_buffs ? true : false;
}
/**
* ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
* @rx_ring: Rx ring
*/
-static void ice_bump_ntc(struct ice_ring *rx_ring)
+static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
{
int ntc = rx_ring->next_to_clean + 1;
@@ -421,19 +418,19 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
- * @rx_buf: zero-copy Rx buffer
+ * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
{
- unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
- unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
- unsigned int datasize_hard = rx_buf->xdp->data_end -
- rx_buf->xdp->data_hard_start;
+ struct xdp_buff *xdp = *xdp_arr;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -441,13 +438,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- xsk_buff_free(rx_buf->xdp);
- rx_buf->xdp = NULL;
+ xsk_buff_free(xdp);
+ *xdp_arr = NULL;
return skb;
}
@@ -455,22 +452,18 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
* ice_run_xdp_zc - Executes an XDP program in zero-copy path
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
-ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{
int err, result = ICE_XDP_PASS;
- struct bpf_prog *xdp_prog;
- struct ice_ring *xdp_ring;
u32 act;
- /* ZC patch is enabled only when XDP program is set,
- * so here it can not be NULL
- */
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
-
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
@@ -484,7 +477,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
case XDP_PASS:
break;
case XDP_TX:
- xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
@@ -511,17 +503,25 @@ out_failure:
*
* Returns number of processed packets on success, remaining budget on failure.
*/
-int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
+ /* ZC patch is enabled only when XDP program is set,
+ * so here it can not be NULL
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ xdp_ring = rx_ring->xdp_ring;
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
- struct ice_rx_buf *rx_buf;
+ struct xdp_buff **xdp;
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
@@ -544,18 +544,18 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- rx_buf->xdp->data_end = rx_buf->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
+ xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
+ xsk_buff_set_size(*xdp, size);
+ xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
- xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
+ xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
else
- xsk_buff_free(rx_buf->xdp);
+ xsk_buff_free(*xdp);
- rx_buf->xdp = NULL;
+ *xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
cleaned_count++;
@@ -565,7 +565,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
}
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, rx_buf);
+ skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
break;
@@ -596,7 +596,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (cleaned_count >= ICE_RX_BUF_WRITE)
failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
- ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
@@ -618,7 +618,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
*
* Returns true if cleanup/transmission is done.
*/
-static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
+static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
{
struct ice_tx_desc *tx_desc = NULL;
bool work_done = true;
@@ -669,7 +669,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
* @tx_buf: Tx buffer to clean
*/
static void
-ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
{
xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
@@ -684,7 +684,7 @@ ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
*
* Returns true if cleanup/tranmission is done.
*/
-bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
+bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
{
int total_packets = 0, total_bytes = 0;
s16 ntc = xdp_ring->next_to_clean;
@@ -757,7 +757,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_q_vector *q_vector;
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *ring;
+ struct ice_tx_ring *ring;
if (test_bit(ICE_DOWN, vsi->state))
return -ENETDOWN;
@@ -808,17 +808,17 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
* ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned
*/
-void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
+void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
u16 i;
for (i = 0; i < rx_ring->count; i++) {
- struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+ struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
- if (!rx_buf->xdp)
+ if (!xdp)
continue;
- rx_buf->xdp = NULL;
+ *xdp = NULL;
}
}
@@ -826,7 +826,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
* ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring
*/
-void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
+void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
{
u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
u32 xsk_frames = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index ea208808623a..4c7bd8e9dfc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -11,13 +11,13 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
-int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
-bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
+bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
-void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
-void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
+void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
+void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
#else
static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
@@ -28,21 +28,21 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
}
static inline int
-ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
+ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
int __always_unused budget)
{
return 0;
}
static inline bool
-ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
+ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
int __always_unused budget)
{
return false;
}
static inline bool
-ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
+ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
u16 __always_unused count)
{
return false;
@@ -60,7 +60,7 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { }
-static inline void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) { }
+static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
+static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 751de06019a0..e67a71c3f141 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3356,7 +3356,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "NVM Read Error\n");
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
@@ -4988,7 +4988,7 @@ static int igb_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d32e72d953c8..74ccd622251a 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1527,8 +1527,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
spin_unlock_bh(&hw->mbx_lock);
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
memcpy(netdev->perm_addr, adapter->hw.mac.addr,
netdev->addr_len);
}
@@ -1813,7 +1812,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
@@ -2816,8 +2815,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
}
spin_unlock_bh(&hw->mbx_lock);
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 4461f8b9a864..4e0203336c6b 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -22,8 +22,8 @@
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
#define IGC_DEV_ID_I225_K2 0x3101
+#define IGC_DEV_ID_I226_K 0x3102
#define IGC_DEV_ID_I225_LMVP 0x5502
-#define IGC_DEV_ID_I226_K 0x5504
#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I226_LM 0x125B
#define IGC_DEV_ID_I226_V 0x125C
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 0e19b4d02e62..7ffb1045f00c 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -949,7 +949,7 @@ static int igc_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
@@ -6377,7 +6377,7 @@ static int igc_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "NVM Read Error\n");
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0f021909b430..30568e3544cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -773,7 +773,7 @@ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
{
-#if IS_ENABLED(CONFIG_X86_TSC)
+#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML)
return convert_art_ns_to_tsc(tstamp);
#else
return (struct system_counterval_t) { };
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index a430871d1c27..c8d1e815ec6b 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -549,7 +549,7 @@ ixgb_mta_set(struct ixgb_hw *hw,
*****************************************************************************/
void
ixgb_rar_set(struct ixgb_hw *hw,
- u8 *addr,
+ const u8 *addr,
u32 index)
{
u32 rar_low, rar_high;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 6064583095da..70bcff5fb3db 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -740,7 +740,7 @@ bool ixgb_adapter_start(struct ixgb_hw *hw);
void ixgb_check_for_link(struct ixgb_hw *hw);
bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
+void ixgb_rar_set(struct ixgb_hw *hw, const u8 *addr, u32 index);
/* Filters (multicast, vlan, receive) */
void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1588376d4c67..99d481904ce6 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -362,6 +362,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgb_adapter *adapter;
static int cards_found = 0;
int pci_using_dac;
+ u8 addr[ETH_ALEN];
int i;
int err;
@@ -461,7 +462,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ ixgb_get_ee_mac_addr(&adapter->hw, addr);
+ eth_hw_addr_set(netdev, addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
@@ -1030,7 +1032,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
@@ -2227,6 +2229,7 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u8 addr[ETH_ALEN];
if (pci_enable_device(pdev)) {
netif_err(adapter, probe, adapter->netdev,
@@ -2250,7 +2253,8 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
"After reset, the EEPROM checksum is not valid\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ ixgb_get_ee_mac_addr(&adapter->hw, addr);
+ eth_hw_addr_set(netdev, addr);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a604552fa634..4a69823e6abd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -351,6 +351,7 @@ struct ixgbe_ring {
};
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
+ spinlock_t tx_lock; /* used in XDP mode */
struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
@@ -375,11 +376,13 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_XDP_QS (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 63
+DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
+
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -629,7 +632,7 @@ struct ixgbe_adapter {
/* XDP */
int num_xdp_queues;
- struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
+ struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS];
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
/* TX */
@@ -772,6 +775,22 @@ struct ixgbe_adapter {
#endif /* CONFIG_IXGBE_IPSEC */
};
+static inline int ixgbe_determine_xdp_q_idx(int cpu)
+{
+ if (static_key_enabled(&ixgbe_xdp_locking_key))
+ return cpu % IXGBE_MAX_XDP_QS;
+ else
+ return cpu;
+}
+
+static inline
+struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
+{
+ int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
+
+ return adapter->xdp_ring[index];
+}
+
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
{
switch (adapter->hw.mac.type) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index fc26e4ddeb0d..beda8e0ef7d4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3208,7 +3208,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
max_combined = ixgbe_max_rss_indices(adapter);
}
- return max_combined;
+ return min_t(int, max_combined, num_online_cpus());
}
static void ixgbe_get_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 0218f6c9b925..86b11164655e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -299,7 +299,10 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
{
- return adapter->xdp_prog ? nr_cpu_ids : 0;
+ int queues;
+
+ queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids);
+ return adapter->xdp_prog ? queues : 0;
}
#define IXGBE_RSS_64Q_MASK 0x3F
@@ -947,6 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->count = adapter->tx_ring_count;
ring->queue_index = xdp_idx;
set_ring_xdp(ring);
+ spin_lock_init(&ring->tx_lock);
/* assign ring to adapter */
WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
@@ -1032,6 +1036,9 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
__netif_napi_del(&q_vector->napi);
+ if (static_key_enabled(&ixgbe_xdp_locking_key))
+ static_branch_dec(&ixgbe_xdp_locking_key);
+
/*
* after a call to __netif_napi_del() napi may still be used and
* ixgbe_get_stats64() might access the rings on this vector,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 24e06ba6f5e9..0f9f022260d7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -165,6 +165,9 @@ MODULE_AUTHOR("Intel Corporation, <[email protected]>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
+DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
+EXPORT_SYMBOL(ixgbe_xdp_locking_key);
+
static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
@@ -2197,6 +2200,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
+ struct ixgbe_ring *ring;
struct xdp_frame *xdpf;
u32 act;
@@ -2215,7 +2219,12 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto out_failure;
- result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ ring = ixgbe_determine_xdp_ring(adapter);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ result = ixgbe_xmit_xdp_ring(ring, xdpf);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
@@ -2422,13 +2431,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp_do_flush_map();
if (xdp_xmit & IXGBE_XDP_TX) {
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
@@ -6320,7 +6325,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
if (ixgbe_init_rss_key(adapter))
return -ENOMEM;
- adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
+ adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL);
if (!adapter->af_xdp_zc_qps)
return -ENOMEM;
@@ -8536,10 +8541,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif
-int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf)
{
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
u32 len, cmd_type;
@@ -8788,7 +8792,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
ixgbe_mac_set_default_filter(adapter);
@@ -10112,6 +10116,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct bpf_prog *old_prog;
bool need_reset;
+ int num_queues;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return -EINVAL;
@@ -10130,8 +10135,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return -EINVAL;
}
- if (nr_cpu_ids > MAX_XDP_QUEUES)
+ /* if the number of cpus is much larger than the maximum of queues,
+ * we should stop it and then return with ENOMEM like before.
+ */
+ if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2)
return -ENOMEM;
+ else if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
+ static_branch_inc(&ixgbe_xdp_locking_key);
old_prog = xchg(&adapter->xdp_prog, prog);
need_reset = (!!prog != !!old_prog);
@@ -10161,11 +10171,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* Kick start the NAPI context if there is an AF_XDP socket open
* on that queue id. This so that receiving will start.
*/
- if (need_reset && prog)
- for (i = 0; i < adapter->num_rx_queues; i++)
+ if (need_reset && prog) {
+ num_queues = min_t(int, adapter->num_rx_queues,
+ adapter->num_xdp_queues);
+ for (i = 0; i < num_queues; i++)
if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
+ }
return 0;
}
@@ -10195,6 +10208,15 @@ void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
writel(ring->next_to_use, ring->tail);
}
+void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
+{
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ ixgbe_xdp_ring_update_tail(ring);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
+}
+
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
@@ -10212,18 +10234,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
+ ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL;
if (unlikely(!ring))
return -ENXIO;
if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
return -ENXIO;
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
- err = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ err = ixgbe_xmit_xdp_ring(ring, xdpf);
if (err != IXGBE_XDP_TX)
break;
nxmit++;
@@ -10232,6 +10257,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(flags & XDP_XMIT_FLUSH))
ixgbe_xdp_ring_update_tail(ring);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
+
return nxmit;
}
@@ -10899,7 +10927,7 @@ skip_sriov:
eth_platform_get_mac_address(&adapter->pdev->dev,
adapter->hw.mac.perm_addr);
- memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.perm_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
e_dev_err("invalid MAC address\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 2aeec78029bc..a82533f21d36 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -12,7 +12,7 @@
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
-int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf);
bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
@@ -23,6 +23,7 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
+void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring);
void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b1d22e4d5ec9..db2bc58dfcfd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -100,6 +100,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
+ struct ixgbe_ring *ring;
struct xdp_frame *xdpf;
u32 act;
@@ -120,7 +121,12 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto out_failure;
- result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ ring = ixgbe_determine_xdp_ring(adapter);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ result = ixgbe_xmit_xdp_ring(ring, xdpf);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
@@ -334,13 +340,9 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
xdp_do_flush_map();
if (xdp_xmit & IXGBE_XDP_TX) {
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c714e1ecd308..d81811ab4ec4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2540,7 +2540,7 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
}
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
@@ -3054,7 +3054,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ eth_hw_addr_set(netdev, hw->mac.addr);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -4231,7 +4231,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
ether_addr_copy(hw->mac.addr, addr->sa_data);
ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 1bdc4f23e1e5..439674fc9765 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -313,7 +313,7 @@ jme_load_macaddr(struct net_device *netdev)
val = jread32(jme, JME_RXUMA_HI);
macaddr[4] = (val >> 0) & 0xFF;
macaddr[5] = (val >> 8) & 0xFF;
- memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
+ eth_hw_addr_set(netdev, macaddr);
spin_unlock_bh(&jme->macaddr_lock);
}
@@ -2254,7 +2254,7 @@ jme_set_macaddr(struct net_device *netdev, void *p)
return -EBUSY;
spin_lock_bh(&jme->macaddr_lock);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
jme_set_unicastaddr(netdev);
spin_unlock_bh(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 3e9f324f1061..df9a8eefa007 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1297,8 +1297,8 @@ static int korina_probe(struct platform_device *pdev)
lp = netdev_priv(dev);
if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
- else if (of_get_mac_address(pdev->dev.of_node, dev->dev_addr) < 0)
+ eth_hw_addr_set(dev, mac_addr);
+ else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0)
eth_hw_addr_random(dev);
clk = devm_clk_get_optional(&pdev->dev, "mdioclk");
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 9b7307eba97c..ecf1e11d9b91 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -527,7 +527,7 @@ static int xrx200_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- err = of_get_mac_address(np, net_dev->dev_addr);
+ err = of_get_ethdev_address(np, net_dev);
if (err)
eth_hw_addr_random(net_dev);
diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig
index 63bf01d28f0c..f99adbf26ab4 100644
--- a/drivers/net/ethernet/litex/Kconfig
+++ b/drivers/net/ethernet/litex/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF_NET
+ depends on OF
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index a9bdbf0dcfe1..3d9385a4989b 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -266,7 +266,7 @@ static int liteeth_probe(struct platform_device *pdev)
priv->tx_base = buf_base + priv->num_rx_slots * priv->slot_size;
priv->tx_slot = 0;
- err = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ err = of_get_ethdev_address(pdev->dev.of_node, netdev);
if (err)
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 28d5ad296646..bb14fa2241a3 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1770,7 +1770,7 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
addr[5] = mac_l & 0xff;
}
-static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
+static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr)
{
wrlp(mp, MAC_ADDR_HIGH,
(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
@@ -1919,7 +1919,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
netif_addr_lock_bh(dev);
mv643xx_eth_program_unicast_filter(dev);
@@ -2925,10 +2925,14 @@ static void set_params(struct mv643xx_eth_private *mp,
struct net_device *dev = mp->dev;
unsigned int tx_ring_size;
- if (is_valid_ether_addr(pd->mac_addr))
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
- else
- uc_addr_get(mp, dev->dev_addr);
+ if (is_valid_ether_addr(pd->mac_addr)) {
+ eth_hw_addr_set(dev, pd->mac_addr);
+ } else {
+ u8 addr[ETH_ALEN];
+
+ uc_addr_get(mp, addr);
+ eth_hw_addr_set(dev, addr);
+ }
mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
if (pd->rx_queue_size)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d460a270601..98f276c617fb 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1623,8 +1623,8 @@ static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
}
/* Set mac address */
-static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
- int queue)
+static void mvneta_mac_addr_set(struct mvneta_port *pp,
+ const unsigned char *addr, int queue)
{
unsigned int mac_h;
unsigned int mac_l;
@@ -1914,7 +1914,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
}
/* Handle tx checksum */
-static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
+static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
@@ -2595,8 +2595,7 @@ err_drop_frame:
}
static inline void
-mvneta_tso_put_hdr(struct sk_buff *skb,
- struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
@@ -2604,7 +2603,7 @@ mvneta_tso_put_hdr(struct sk_buff *skb,
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
- tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+ tx_desc->command = mvneta_skb_tx_csum(skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
@@ -2681,7 +2680,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- mvneta_tso_put_hdr(skb, pp, txq);
+ mvneta_tso_put_hdr(skb, txq);
while (data_left > 0) {
int size;
@@ -2799,7 +2798,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
- tx_cmd = mvneta_skb_tx_csum(pp, skb);
+ tx_cmd = mvneta_skb_tx_csum(skb);
tx_desc->data_size = skb_headlen(skb);
@@ -5242,14 +5241,14 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_free_ports;
}
- err = of_get_mac_address(dn, dev->dev_addr);
+ err = of_get_ethdev_address(dn, dev);
if (!err) {
mac_from = "device tree";
} else {
mvneta_get_mac_addr(pp, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
mac_from = "hardware";
- memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, hw_mac_addr);
} else {
mac_from = "random";
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index d5c92e43f89e..ad3be55cce68 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6081,9 +6081,9 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
char hw_mac_addr[ETH_ALEN] = {0};
char fw_mac_addr[ETH_ALEN];
- if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
+ if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
*mac_from = "firmware node";
- ether_addr_copy(dev->dev_addr, fw_mac_addr);
+ eth_hw_addr_set(dev, fw_mac_addr);
return;
}
@@ -6091,7 +6091,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
mvpp21_get_mac_address(port, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
*mac_from = "hardware";
- ether_addr_copy(dev->dev_addr, hw_mac_addr);
+ eth_hw_addr_set(dev, hw_mac_addr);
return;
}
}
@@ -6301,12 +6301,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_NA:
if (mvpp2_port_supports_xlg(port)) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
+ phylink_set_10g_modes(mask);
phylink_set(mask, 10000baseKR_Full);
}
if (state->interface != PHY_INTERFACE_MODE_NA)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 93575800ca92..75ba57bd1d46 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -2347,7 +2347,7 @@ int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
return err;
/* Set addr in the device */
- ether_addr_copy(dev->dev_addr, da);
+ eth_hw_addr_set(dev, da);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 34a089b71e55..186d00a9ab35 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -838,9 +838,6 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
if (!cgx)
return;
- if (is_dev_rpm(cgx))
- return;
-
if (enable) {
/* Enable inbound PTP timestamping */
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
@@ -1522,7 +1519,6 @@ static int cgx_lmac_exit(struct cgx *cgx)
int i;
if (cgx->cgx_cmd_workq) {
- flush_workqueue(cgx->cgx_cmd_workq);
destroy_workqueue(cgx->cgx_cmd_workq);
cgx->cgx_cmd_workq = NULL;
}
@@ -1545,9 +1541,11 @@ static int cgx_lmac_exit(struct cgx *cgx)
static void cgx_populate_features(struct cgx *cgx)
{
if (is_dev_rpm(cgx))
- cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
+ cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
+ RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
else
- cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 |
+ RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
}
static struct mac_ops cgx_mac_ops = {
@@ -1571,6 +1569,7 @@ static struct mac_ops cgx_mac_ops = {
.mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = cgx_lmac_ptp_config,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index c38306b3384a..fc6e7423cbd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -102,6 +102,11 @@ struct mac_ops {
void (*mac_pause_frm_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ /* Enable/Disable Inbound PTP */
+ void (*mac_enadis_ptp_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 250eefa5e853..4e79e918a161 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -84,7 +84,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0009)
+#define OTX2_MBOX_VERSION (0x000a)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -154,23 +154,23 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
-M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
-M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
-M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
-M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
-M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
- cgx_set_link_mode_rsp) \
-M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
- cgx_features_info_msg) \
-M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
-M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
- cgx_mac_addr_add_rsp) \
-M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
msg_rsp) \
-M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
cgx_max_dmac_entries_get_rsp) \
-M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
-M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
@@ -191,6 +191,7 @@ M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
+M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@@ -231,6 +232,8 @@ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
msg_req, npc_mcam_read_base_rule_rsp) \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
@@ -290,10 +293,14 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+#define MBOX_UP_CPT_MESSAGES \
+M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
+MBOX_UP_CPT_MESSAGES
#undef M
};
@@ -581,10 +588,13 @@ struct cgx_mac_addr_update_req {
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
-#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
-#define RVU_MAC_VERSION BIT_ULL(2)
-#define RVU_MAC_CGX BIT_ULL(3)
-#define RVU_MAC_RPM BIT_ULL(4)
+#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1)
+ /* flow control from physical link higig2 messages */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */
+#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */
+#define RVU_MAC_VERSION BIT_ULL(4)
+#define RVU_MAC_CGX BIT_ULL(5)
+#define RVU_MAC_RPM BIT_ULL(6)
struct cgx_features_info_msg {
struct mbox_msghdr hdr;
@@ -599,6 +609,22 @@ struct rpm_stats_rsp {
u64 tx_stats[RPM_TX_STATS_COUNT];
};
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+ u8 var_len_off; /* Offset of custom header length field.
+ * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
+ */
+ u8 var_len_off_mask; /* Mask for length with in offset */
+ u8 shift_dir; /* shift direction to get length of the header at var_len_off */
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -1399,12 +1425,15 @@ struct npc_mcam_get_stats_rsp {
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
+ PTP_OP_GET_TSTMP = 2,
+ PTP_OP_SET_THRESH = 3,
};
struct ptp_req {
struct mbox_msghdr hdr;
u8 op;
s64 scaled_ppm;
+ u64 thresh;
};
struct ptp_rsp {
@@ -1538,6 +1567,13 @@ struct cpt_rxc_time_cfg_req {
u16 active_limit;
};
+/* Mailbox message request format to request for CPT_INST_S lmtst. */
+struct cpt_inst_lmtst_req {
+ struct mbox_msghdr hdr;
+ u64 inst[8];
+ u64 rsvd;
+};
+
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 3a819b24accc..6e1192f52608 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -31,9 +31,9 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_HIGIG2_ETHER,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_LT_LA_CH_LEN_90B_ETHER,
NPC_LT_LA_CPT_HDR,
NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -148,10 +148,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
-#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
NPC_RX_CPT_HDR_PKIND,
@@ -162,6 +163,10 @@ enum npc_pkind_type {
NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
};
+enum npc_interface_type {
+ NPC_INTF_MODE_DEF,
+};
+
/* list of known and supported fields in packet header and
* fields present in key structure.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 588822a0cf21..1a8c5376297c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -176,9 +176,8 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_EXDSA,
NPC_S_KPU1_HIGIG2,
NPC_S_KPU1_IH_NIX_HIGIG2,
- NPC_S_KPU1_CUSTOM_L2_90B,
+ NPC_S_KPU1_CUSTOM_PRE_L2,
NPC_S_KPU1_CPT_HDR,
- NPC_S_KPU1_CUSTOM_L2_24B,
NPC_S_KPU1_VLAN_EXDSA,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
@@ -188,6 +187,8 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_PREHEADER,
NPC_S_KPU2_EXDSA,
NPC_S_KPU2_NGIO,
+ NPC_S_KPU2_CPT_CTAG,
+ NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
@@ -979,8 +980,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
0,
0, 0, 0, 0,
@@ -996,27 +997,27 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 36, 40, 44, 0, 0,
- NPC_S_KPU1_CUSTOM_L2_24B, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 40, 54, 58, 0, 0,
- NPC_S_KPU1_CPT_HDR, 0, 0,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 40, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 7, 7, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 102, 106, 110, 0, 0,
- NPC_S_KPU1_CUSTOM_L2_90B, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
0,
0, 0, 0, 0,
@@ -1711,7 +1712,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_IP,
0xffff,
0x0000,
@@ -1720,7 +1721,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_IP6,
0xffff,
0x0000,
@@ -1729,7 +1730,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_ARP,
0xffff,
0x0000,
@@ -1738,7 +1739,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_RARP,
0xffff,
0x0000,
@@ -1747,7 +1748,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_PTP,
0xffff,
0x0000,
@@ -1756,7 +1757,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_FCOE,
0xffff,
0x0000,
@@ -1765,7 +1766,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_CTAG,
0xffff,
NPC_ETYPE_CTAG,
@@ -1774,7 +1775,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1783,7 +1784,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_SBTAG,
0xffff,
0x0000,
@@ -1792,7 +1793,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_QINQ,
0xffff,
0x0000,
@@ -1801,7 +1802,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_ETAG,
0xffff,
0x0000,
@@ -1810,7 +1811,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1819,7 +1820,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
0x0000,
@@ -1828,7 +1829,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_NSH,
0xffff,
0x0000,
@@ -1837,7 +1838,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1847,168 +1848,33 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
NPC_ETYPE_IP,
0xffff,
0x0000,
0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
0x0000,
0x0000,
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_QINQ,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
- 0x0000,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
- 0x0000,
NPC_ETYPE_IP6,
0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
0x0000,
- 0xffff,
0x0000,
0x0000,
- NPC_ETYPE_CTAG,
- 0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
0x0000,
- NPC_ETYPE_QINQ,
- 0xffff,
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_PTP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_FCOE,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
NPC_ETYPE_CTAG,
0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
0x0000,
0x0000,
0x0000,
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_S_KPU1_CPT_HDR, 0xff,
NPC_ETYPE_QINQ,
0xffff,
0x0000,
@@ -2017,51 +1883,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_ETAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_MPLSU,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_MPLSM,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_NSH,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU1_VLAN_EXDSA, 0xff,
NPC_ETYPE_CTAG,
0xffff,
@@ -3066,6 +2887,42 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -7496,15 +7353,6 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_S_KPU9_GTPU, 0xff,
0x0000,
0x0000,
- NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
- NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU9_GTPU, 0xff,
- 0x0000,
- 0x0000,
NPC_GTP_PT_GTP | NPC_GTP_VER1,
NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
0x0000,
@@ -9192,159 +9040,127 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
- NPC_S_KPU5_IP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 3, 0,
- NPC_S_KPU5_IP6, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_ARP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_RARP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_PTP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_FCOE, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_CTAG2, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_CTAG2, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_CTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 22, 0, 0,
- NPC_S_KPU2_SBTAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_SBTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_QINQ, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 26, 0, 0,
- NPC_S_KPU2_ETAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ NPC_S_KPU2_ETAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 2, 0,
- NPC_S_KPU4_NSH, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_NSH,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 3, 0,
- NPC_S_KPU5_CPT_IP, 56, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
- NPC_S_KPU5_CPT_IP6, 56, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 54, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 54, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
- NPC_S_KPU5_CPT_IP, 60, 1,
+ NPC_S_KPU5_CPT_IP, 14, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
0,
0, 0, 0, 0,
@@ -9352,7 +9168,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 3, 0,
- NPC_S_KPU5_CPT_IP6, 60, 1,
+ NPC_S_KPU5_CPT_IP6, 14, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
0,
0, 0, 0, 0,
@@ -9360,7 +9176,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 58, 1,
+ NPC_S_KPU2_CPT_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
@@ -9368,141 +9184,13 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 58, 1,
+ NPC_S_KPU2_CPT_QINQ, 12, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 3, 0,
- NPC_S_KPU5_IP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
- NPC_S_KPU5_IP6, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_ARP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_RARP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_PTP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_FCOE, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
- NPC_S_KPU2_CTAG2, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 22, 0, 0,
- NPC_S_KPU2_SBTAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 26, 0, 0,
- NPC_S_KPU2_ETAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 2, 0,
- NPC_S_KPU4_NSH, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_NSH,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 0, 0, 1, 0,
NPC_S_KPU3_VLAN_EXDSA, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
@@ -10395,6 +10083,38 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -14335,16 +14055,8 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_G_PDU,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
+ 8, 0, 6, 2, 1,
+ NPC_S_NA, 0, 1,
NPC_LID_LE, NPC_LT_LE_GTPU,
0,
0, 0, 0, 0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index 9b8e59f4c206..d6321de3cc17 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -27,54 +27,29 @@
#define PCI_DEVID_CN10K_PTP 0xA09E
#define PCI_PTP_BAR_NO 0
-#define PCI_RST_BAR_NO 0
#define PTP_CLOCK_CFG 0xF00ULL
#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1)
+#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2)
+#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
+#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
+#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
+#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
+#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
+
+#define PTP_PPS_HI_INCR 0xF60ULL
+#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_HI 0xF58ULL
+
#define PTP_CLOCK_LO 0xF08ULL
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
-
-#define RST_BOOT 0x1600ULL
-#define RST_MUL_BITS GENMASK_ULL(38, 33)
-#define CLOCK_BASE_RATE 50000000ULL
+#define PTP_TIMESTAMP 0xF20ULL
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
-static u64 get_clock_rate(void)
-{
- u64 cfg, ret = CLOCK_BASE_RATE * 16;
- struct pci_dev *pdev;
- void __iomem *base;
-
- /* To get the input clock frequency with which PTP co-processor
- * block is running the base frequency(50 MHz) needs to be multiplied
- * with multiplier bits present in RST_BOOT register of RESET block.
- * Hence below code gets the multiplier bits from the RESET PCI
- * device present in the system.
- */
- pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVID_OCTEONTX2_RST, NULL);
- if (!pdev)
- goto error;
-
- base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
- if (!base)
- goto error_put_pdev;
-
- cfg = readq(base + RST_BOOT);
- ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
-
- iounmap(base);
-
-error_put_pdev:
- pci_dev_put(pdev);
-
-error:
- return ret;
-}
-
struct ptp *ptp_get(void)
{
struct ptp *ptp = first_ptp_block;
@@ -145,13 +120,74 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk)
return 0;
}
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
+{
+ struct pci_dev *pdev;
+ u64 clock_comp;
+ u64 clock_cfg;
+
+ if (!ptp)
+ return;
+
+ pdev = ptp->pdev;
+
+ if (!sclk) {
+ dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
+ return;
+ }
+
+ /* sclk is in MHz */
+ ptp->clock_rate = sclk * 1000000;
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+
+ if (ext_clk_freq) {
+ ptp->clock_rate = ext_clk_freq;
+ /* Set GPIO as PTP clock source */
+ clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
+ }
+
+ if (extts) {
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
+ /* Set GPIO as timestamping source */
+ clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ /* Set 50% duty cycle for 1Hz output */
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+}
+
+static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
+{
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+
+ return 0;
+}
+
+static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
+{
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
static int ptp_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ptp *ptp;
- u64 clock_comp;
- u64 clock_cfg;
int err;
ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
@@ -172,17 +208,6 @@ static int ptp_probe(struct pci_dev *pdev,
ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
- ptp->clock_rate = get_clock_rate();
-
- /* Enable PTP clock */
- clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
- clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
- writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
-
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
- /* Initial compensation value to start the nanosecs counter */
- writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
-
pci_set_drvdata(pdev, ptp);
if (!first_ptp_block)
first_ptp_block = ptp;
@@ -272,6 +297,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_GET_CLOCK:
err = ptp_get_clock(rvu->ptp, &rsp->clk);
break;
+ case PTP_OP_GET_TSTMP:
+ err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_SET_THRESH:
+ err = ptp_set_thresh(rvu->ptp, req->thresh);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 76d404b24552..1b81a0493cd3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -20,6 +20,7 @@ struct ptp {
struct ptp *ptp_get(void);
void ptp_put(struct ptp *ptp);
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts);
extern struct pci_driver ptp_driver;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 07b0eafccad8..e695fa0e82a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -29,6 +29,7 @@ static struct mac_ops rpm_mac_ops = {
.mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -270,3 +271,19 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
return 0;
}
+
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
+ if (enable)
+ cfg |= RPMX_RX_TS_PREPEND;
+ else
+ cfg &= ~RPMX_RX_TS_PREPEND;
+ rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index f0b069442dcc..57c8a687b488 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -14,6 +14,8 @@
#define PCI_DEVID_CN10K_RPM 0xA060
/* Registers */
+#define RPMX_CMRX_CFG 0x00
+#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
@@ -54,4 +56,5 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause);
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5909173ff788..cb56e171ddd4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -854,6 +854,7 @@ static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
block->lfcfg_reg = NIX_PRIV_LFX_CFG;
block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
block->lfreset_reg = NIX_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NIX%d", blkid);
rvu->nix_blkaddr[blkid] = blkaddr;
return rvu_alloc_bitmap(&block->lf);
@@ -883,6 +884,7 @@ static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
block->lfcfg_reg = CPT_PRIV_LFX_CFG;
block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
block->lfreset_reg = CPT_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "CPT%d", blkid);
return rvu_alloc_bitmap(&block->lf);
}
@@ -940,6 +942,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfcfg_reg = NPA_PRIV_LFX_CFG;
block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
block->lfreset_reg = NPA_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NPA");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -979,6 +982,7 @@ nix:
block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSO GROUP");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1003,6 +1007,7 @@ ssow:
block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSOWS");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1028,6 +1033,7 @@ tim:
block->lfcfg_reg = TIM_PRIV_LFX_CFG;
block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
block->lfreset_reg = TIM_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "TIM");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -2399,7 +2405,6 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw)
int devid;
if (mw->mbox_wq) {
- flush_workqueue(mw->mbox_wq);
destroy_workqueue(mw->mbox_wq);
mw->mbox_wq = NULL;
}
@@ -2527,7 +2532,8 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
rvu_npa_lf_teardown(rvu, pcifunc, lf);
else if ((block->addr == BLKADDR_CPT0) ||
(block->addr == BLKADDR_CPT1))
- rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
+ rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
+ slot);
err = rvu_lf_reset(rvu, block, lf);
if (err) {
@@ -2725,6 +2731,8 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
+ rvu_cpt_unregister_interrupts(rvu);
+
/* Disable the Mbox interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
@@ -2934,6 +2942,11 @@ static int rvu_register_interrupts(struct rvu *rvu)
goto fail;
}
rvu->irq_allocated[offset] = true;
+
+ ret = rvu_cpt_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
return 0;
fail:
@@ -2944,7 +2957,6 @@ fail:
static void rvu_flr_wq_destroy(struct rvu *rvu)
{
if (rvu->flr_wq) {
- flush_workqueue(rvu->flr_wq);
destroy_workqueue(rvu->flr_wq);
rvu->flr_wq = NULL;
}
@@ -3240,6 +3252,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&rvu->rswitch.switch_lock);
+ if (rvu->fwdata)
+ ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
+ rvu->fwdata->ptp_ext_tstamp);
+
return 0;
err_dl:
rvu_unregister_dl(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index f59169cdc0db..66e45d733824 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -101,6 +101,7 @@ struct rvu_block {
u64 msixcfg_reg;
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
+ struct rvu *rvu;
};
struct nix_mcast {
@@ -220,6 +221,7 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
+ bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
@@ -237,6 +239,7 @@ struct rvu_pfvf {
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
+ int intf_mode;
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
@@ -394,7 +397,9 @@ struct rvu_fwdata {
u64 mcam_addr;
u64 mcam_sz;
u64 msixtr_base;
-#define FWDATA_RESERVED_MEM 1023
+ u32 ptp_ext_clk_rate;
+ u32 ptp_ext_tstamp;
+#define FWDATA_RESERVED_MEM 1022
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 5
#define CGX_LMACS_MAX 4
@@ -796,6 +801,7 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
@@ -807,7 +813,11 @@ bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
int index);
/* CPT APIs */
-int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+int rvu_cpt_register_interrupts(struct rvu *rvu);
+void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ int slot);
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
@@ -829,4 +839,7 @@ void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 81e8ea9ee30e..2ca182a4ce82 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -324,7 +324,6 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
static void rvu_cgx_wq_destroy(struct rvu *rvu)
{
if (rvu->cgx_evh_wq) {
- flush_workqueue(rvu->cgx_evh_wq);
destroy_workqueue(rvu->cgx_evh_wq);
rvu->cgx_evh_wq = NULL;
}
@@ -411,7 +410,7 @@ int rvu_cgx_exit(struct rvu *rvu)
* VF's of mapped PF and other PFs are not allowed. This fn() checks
* whether a PFFUNC is permitted to do the config or not.
*/
-static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
@@ -694,7 +693,9 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -711,13 +712,16 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
- cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
/* If PTP is enabled then inform NPC that packets to be
* parsed by this PF will have their data shifted by 8 bytes
* and if PTP is disabled then no shift is required
*/
if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
return -EINVAL;
+ /* This flag is required to clean up CGX conf if app gets killed */
+ pfvf->hw_rx_tstamp_en = enable;
return 0;
}
@@ -725,6 +729,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ return -EPERM;
+
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 267d092b8e97..45357deecabb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -37,6 +37,236 @@
(_rsp)->free_sts_##etype = free_sts; \
})
+static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg0, reg1, reg2;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ if (!is_rvu_otx2(rvu)) {
+ reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
+ reg0, reg1, reg2);
+ } else {
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx",
+ reg0, reg1);
+ }
+
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
+ if (!is_rvu_otx2(rvu))
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ struct rvu *rvu = block->rvu;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, block);
+ if (ret) {
+ dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
+ return ret;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+ return 0;
+}
+
+static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i;
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[off + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, off + i), block);
+ rvu->irq_allocated[off + i] = false;
+ }
+}
+
+static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return;
+ }
+ block = &hw->block[blkaddr];
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_unregister_interrupts(block, offs);
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_cpt_unregister_interrupts(struct rvu *rvu)
+{
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ char irq_name[16];
+ int i, ret;
+
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, off + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs, ret = 0;
+ char irq_name[16];
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_register_interrupts(block, offs);
+
+ for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, offs + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_cpt_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ return cpt_register_interrupts(rvu, BLKADDR_CPT1);
+}
+
static int get_cpt_pf_num(struct rvu *rvu)
{
int i, domain_nr, cpt_pf_num = -1;
@@ -147,9 +377,13 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
- /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
- val = (u64)req->nix_pf_func << 48 |
- (u64)req->sso_pf_func << 32;
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
+ * on reset.
+ */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
+ val |= ((u64)req->nix_pf_func << 48 |
+ (u64)req->sso_pf_func << 32);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
}
@@ -159,7 +393,7 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
{
u16 pcifunc = req->hdr.pcifunc;
- int num_lfs, cptlf, slot;
+ int num_lfs, cptlf, slot, err;
struct rvu_block *block;
block = &rvu->hw->block[blkaddr];
@@ -173,10 +407,15 @@ static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
if (cptlf < 0)
return CPT_AF_ERR_LF_INVALID;
- /* Reset CPT LF group and priority */
- rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), 0x0);
- /* Reset CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
- rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), 0x0);
+ /* Perform teardown */
+ rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
+
+ /* Reset LF */
+ err = rvu_lf_reset(rvu, block, cptlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+ }
}
return 0;
@@ -556,6 +795,58 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
+}
+
+static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_rxc_time_cfg_req req;
+ int timeout = 2000;
+ u64 reg;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ /* Set time limit to minimum values, so that rxc entries will be
+ * flushed out quickly.
+ */
+ req.step = 1;
+ req.zombie_thres = 1;
+ req.zombie_limit = 1;
+ req.active_thres = 1;
+ req.active_limit = 1;
+
+ cpt_rxc_time_cfg(rvu, &req, blkaddr);
+
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
+
+ timeout = 2000;
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+}
+
#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
@@ -620,14 +911,12 @@ static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
}
-int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
{
- int blkaddr;
u64 reg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc);
- if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
- return -EINVAL;
+ if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
+ cpt_rxc_teardown(rvu, blkaddr);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
@@ -644,3 +933,154 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
return 0;
}
+
+#define CPT_RES_LEN 16
+#define CPT_SE_IE_EGRP 1ULL
+
+static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
+ int nix_blkaddr)
+{
+ int cpt_pf_num = get_cpt_pf_num(rvu);
+ struct cpt_inst_lmtst_req *req;
+ dma_addr_t res_daddr;
+ int timeout = 3000;
+ u8 cpt_idx;
+ u64 *inst;
+ u16 *res;
+ int rc;
+
+ res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(rvu->dev, res_daddr)) {
+ dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
+ rc = -EFAULT;
+ goto res_free;
+ }
+ *res = 0xFFFF;
+
+ /* Send mbox message to CPT PF */
+ req = (struct cpt_inst_lmtst_req *)
+ otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
+ cpt_pf_num, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ rc = -ENOMEM;
+ goto res_daddr_unmap;
+ }
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
+
+ inst = req->inst;
+ /* Prepare CPT_INST_S */
+ inst[0] = 0;
+ inst[1] = res_daddr;
+ /* AF PF FUNC */
+ inst[2] = 0;
+ /* Set QORD */
+ inst[3] = 1;
+ inst[4] = 0;
+ inst[5] = 0;
+ inst[6] = 0;
+ /* Set EGRP */
+ inst[7] = CPT_SE_IE_EGRP << 61;
+
+ /* Subtract 1 from the NIX-CPT credit count to preserve
+ * credit counts.
+ */
+ cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ BIT_ULL(22) - 1);
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ if (rc)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ cpt_pf_num);
+ /* Wait for CPT instruction to be completed */
+ do {
+ mdelay(1);
+ if (*res == 0xFFFF)
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
+
+res_daddr_unmap:
+ dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
+res_free:
+ kfree(res);
+
+ return 0;
+}
+
+#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
+#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
+
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
+{
+ int nix_blkaddr, blkaddr;
+ u16 max_ctx_entries, i;
+ int slot = 0, num_lfs;
+ u64 reg, cam_data;
+ int rc;
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (nix_blkaddr < 0)
+ return -EINVAL;
+
+ if (is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
+
+ /* Submit CPT_INST_S to track when all packets have been
+ * flushed through for the NIX PF FUNC in inline inbound case.
+ */
+ rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
+ if (rc)
+ return rc;
+
+ /* Wait for rxc entries to be flushed out */
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ max_ctx_entries = (reg >> 48) & 0xFFF;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ blkaddr);
+ if (num_lfs == 0) {
+ dev_warn(rvu->dev, "CPT LF is not configured\n");
+ goto unlock;
+ }
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ for (i = 0; i < max_ctx_entries; i++) {
+ cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
+
+ if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
+ FIELD_GET(CTX_CAM_CPTR, cam_data)) {
+ reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
+ reg);
+ }
+ }
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index de9562acd04b..70bacd38a6d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1510,7 +1510,6 @@ int rvu_register_dl(struct rvu *rvu)
return -ENOMEM;
}
- devlink_register(dl);
rvu_dl = devlink_priv(dl);
rvu_dl->dl = dl;
rvu_dl->rvu = rvu;
@@ -1531,13 +1530,11 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
- devlink_params_publish(dl);
-
+ devlink_register(dl);
return 0;
err_dl_health:
rvu_health_reporters_destroy(rvu);
- devlink_unregister(dl);
devlink_free(dl);
return err;
}
@@ -1547,12 +1544,9 @@ void rvu_unregister_dl(struct rvu *rvu)
struct rvu_devlink *rvu_dl = rvu->rvu_dl;
struct devlink *dl = rvu_dl->dl;
- if (!dl)
- return;
-
+ devlink_unregister(dl);
devlink_params_unregister(dl, rvu_af_dl_params,
ARRAY_SIZE(rvu_af_dl_params));
rvu_health_reporters_destroy(rvu);
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index e7bffebc3385..7761dcf17b91 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4512,10 +4512,17 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
+#define RX_SA_BASE GENMASK_ULL(52, 7)
+
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ u64 sa_base;
+ void *cgxd;
int err;
ctx_req.hdr.pcifunc = pcifunc;
@@ -4552,9 +4559,33 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+ /* reset HW config done for Switch headers */
+ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
+ (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
+
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
+
+ sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
+ if (FIELD_GET(RX_SA_BASE, sa_base)) {
+ err = rvu_cpt_ctx_flush(rvu, pcifunc);
+ if (err)
+ dev_err(rvu->dev,
+ "CPT ctx flush failed with error: %d\n", err);
+ }
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 5efb4174e82d..bb6b42bbefa4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -3167,6 +3167,102 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static int
+npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind,
+ u8 var_len_off, u8 var_len_off_mask, u8 shift_dir)
+{
+ struct npc_kpu_action0 *act0;
+ u8 shift_count = 0;
+ int blkaddr;
+ u64 val;
+
+ if (!var_len_off_mask)
+ return -EINVAL;
+
+ if (var_len_off_mask != 0xff) {
+ if (shift_dir)
+ shift_count = __ffs(var_len_off_mask);
+ else
+ shift_count = (8 - __fls(var_len_off_mask));
+ }
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ act0 = (struct npc_kpu_action0 *)&val;
+ act0->var_len_shift = shift_count;
+ act0->var_len_right = shift_dir;
+ act0->var_len_mask = var_len_off_mask;
+ act0->var_len_offset = var_len_off;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+ return 0;
+}
+
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir)
+
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr, nixlf, rc, intf_mode;
+ int pf = rvu_get_pf(pcifunc);
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+ intf_mode = NPC_INTF_MODE_DEF;
+
+ if (mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) {
+ rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind,
+ var_len_off,
+ var_len_off_mask,
+ shift_dir);
+ if (rc)
+ return rc;
+ }
+ rxpkind = pkind;
+ txpkind = pkind;
+ }
+
+ if (dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return 0;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (dir & PKIND_TX) {
+ /* Tx pkind set request valid if PCIFUNC has NIXLF attached */
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
+
+ pfvf->intf_mode = intf_mode;
+ return 0;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode,
+ req->dir, req->pkind, req->var_len_off,
+ req->var_len_off_mask, req->shift_dir);
+}
+
int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
struct msg_req *req,
struct npc_mcam_read_base_rule_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index dbaeb10de7c2..22cd751613cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -527,6 +527,7 @@
#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
#define CPT_AF_CTX_PSH_PC (0x49450ull)
#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
+#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
#define CPT_AF_RXC_TIME (0x50010ull)
#define CPT_AF_RXC_TIME_CFG (0x50018ull)
#define CPT_AF_RXC_DFRG (0x50020ull)
@@ -544,6 +545,7 @@
#define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40
#define CPT_LF_Q_GRP_PTR 0x120
+#define CPT_LF_CTX_FLUSH 0x510
#define NPC_AF_BLK_RST (0x00040)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 77ac96693f04..edc9367b1b95 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -62,6 +62,24 @@ enum rvu_af_int_vec_e {
RVU_AF_INT_VEC_CNT = 0x5,
};
+/* CPT Admin function Interrupt Vector Enumeration */
+enum cpt_af_int_vec_e {
+ CPT_AF_INT_VEC_FLT0 = 0x0,
+ CPT_AF_INT_VEC_FLT1 = 0x1,
+ CPT_AF_INT_VEC_RVU = 0x2,
+ CPT_AF_INT_VEC_RAS = 0x3,
+ CPT_AF_INT_VEC_CNT = 0x4,
+};
+
+enum cpt_10k_af_int_vec_e {
+ CPT_10K_AF_INT_VEC_FLT0 = 0x0,
+ CPT_10K_AF_INT_VEC_FLT1 = 0x1,
+ CPT_10K_AF_INT_VEC_FLT2 = 0x2,
+ CPT_10K_AF_INT_VEC_RVU = 0x3,
+ CPT_10K_AF_INT_VEC_RAS = 0x4,
+ CPT_10K_AF_INT_VEC_CNT = 0x5,
+};
+
/* NPA Admin function Interrupt Vector Enumeration */
enum npa_af_int_vec_e {
NPA_AF_INT_VEC_RVU = 0x0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index b92c267628b8..0048b5946712 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -3,11 +3,11 @@
# Makefile for Marvell's RVU Ethernet device drivers
#
-obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
-obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
+obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
+obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 95f21dfdba48..fd4f083c699e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 78df173e6df2..66da31f30d3e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -188,7 +188,7 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
return PTR_ERR(msghdr);
}
rsp = (struct nix_get_mac_addr_rsp *)msghdr;
- ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
+ eth_hw_addr_set(netdev, rsp->mac_addr);
mutex_unlock(&pfvf->mbox.lock);
return 0;
@@ -203,7 +203,7 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
/* update dmac field in vlan offload rule */
if (netif_running(netdev) &&
pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
@@ -231,7 +231,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- req->maxlen = pfvf->max_frs;
+ req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -590,7 +590,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
u64 schq, parent;
u64 dwrr_val;
- dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
if (!req)
@@ -603,9 +603,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8)
- | OTX2_MIN_MTU;
-
+ req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
req->num_regs++;
@@ -718,7 +716,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
incr = (u64)qidx << 32;
while (timeout) {
val = otx2_atomic64_add(incr, ptr);
@@ -800,7 +798,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
@@ -835,17 +833,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
- err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
- TSO_HEADER_SIZE);
- if (err)
- return err;
+ if (qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+ }
sq->sqe_base = sq->sqe->base;
sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
if (!sq->sg)
return -ENOMEM;
- if (pfvf->ptp) {
+ if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
if (err)
@@ -871,20 +871,27 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
+ int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
- int err, pool_id;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
+ non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
if (qidx < pfvf->hw.rx_queues) {
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- } else {
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ } else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
cq->cqe_cnt = qset->sqe_cnt;
+ } else {
+ cq->cq_type = CQ_XDP;
+ cq->cint_idx = qidx - non_xdp_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
}
cq->cqe_size = pfvf->qset.xqe_size;
@@ -991,7 +998,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
}
/* Initialize TX queues */
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
err = otx2_sq_init(pfvf, qidx, sqb_aura);
@@ -1006,6 +1013,9 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
return err;
}
+ pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf,
+ NIX_LF_CQ_OP_STATUS);
+
/* Initialize work queue for receive buffer refill */
pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
sizeof(struct refill_work), GFP_KERNEL);
@@ -1035,7 +1045,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
/* Set RQ/SQ/CQ counts */
nixlf->rq_cnt = pfvf->hw.rx_queues;
- nixlf->sq_cnt = pfvf->hw.tx_queues;
+ nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
@@ -1073,7 +1083,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
int sqb, qidx;
u64 iova, pa;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
continue;
@@ -1285,7 +1295,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
@@ -1305,7 +1315,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
goto fail;
/* Allocate pointers and free them to aura/pool */
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 8e51a1db7e29..61e52812983f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -171,6 +171,8 @@ struct otx2_hw {
struct otx2_rss_info rss_info;
u16 rx_queues;
u16 tx_queues;
+ u16 xdp_queues;
+ u16 tot_tx_queues;
u16 max_queues;
u16 pool_cnt;
u16 rqpool_cnt;
@@ -264,6 +266,12 @@ struct otx2_ptp {
struct cyclecounter cycle_counter;
struct timecounter time_counter;
+
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -318,7 +326,7 @@ struct otx2_nic {
struct net_device *netdev;
struct dev_hw_ops *hw_ops;
void *iommu_domain;
- u16 max_frs;
+ u16 tx_max_pktlen;
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
@@ -337,7 +345,9 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
+ u64 *cq_op_addr;
+ struct bpf_prog *xdp_prog;
struct otx2_qset qset;
struct otx2_hw hw;
struct pci_dev *pdev;
@@ -827,6 +837,9 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
+
/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
@@ -847,6 +860,7 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 3de18f9433ae..777a27047c8e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -108,7 +108,6 @@ int otx2_register_dl(struct otx2_nic *pfvf)
return -ENOMEM;
}
- devlink_register(dl);
otx2_dl = devlink_priv(dl);
otx2_dl->dl = dl;
otx2_dl->pfvf = pfvf;
@@ -122,12 +121,10 @@ int otx2_register_dl(struct otx2_nic *pfvf)
goto err_dl;
}
- devlink_params_publish(dl);
-
+ devlink_register(dl);
return 0;
err_dl:
- devlink_unregister(dl);
devlink_free(dl);
return err;
}
@@ -135,16 +132,10 @@ err_dl:
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
struct otx2_devlink *otx2_dl = pfvf->dl;
- struct devlink *dl;
-
- if (!otx2_dl || !otx2_dl->dl)
- return;
-
- dl = otx2_dl->dl;
+ struct devlink *dl = otx2_dl->dl;
+ devlink_unregister(dl);
devlink_params_unregister(dl, otx2_dl_params,
ARRAY_SIZE(otx2_dl_params));
-
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 38e5924ca8e9..b0f57bda7e27 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -1347,6 +1347,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_link_ksettings = otx2vf_get_link_ksettings,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 53df7fff92c4..1e0d0c9c1dac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/iommu.h>
#include <net/ip.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct otx2_nic *pf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
int err = 0;
+ if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
if (if_up)
otx2_stop(netdev);
@@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
}
/* SQ */
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
@@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
/* Free SQB pointers */
otx2_sq_free_sqbs(pf);
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
qmem_free(pf->dev, sq->sqe);
qmem_free(pf->dev, sq->tso_hdrs);
@@ -1304,16 +1312,14 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
* NIX transfers entire data using 6 segments/buffers and writes
* a CQE_RX descriptor with those segment addresses. First segment
* has additional data prepended to packet. Also software omits a
- * headroom of 128 bytes and sizeof(struct skb_shared_info) in
- * each segment. Hence the total size of memory needed
- * to receive a packet with 'mtu' is:
+ * headroom of 128 bytes in each segment. Hence the total size of
+ * memory needed to receive a packet with 'mtu' is:
* frame size = mtu + additional data;
- * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
+ * memory = frame_size + headroom * 6;
* each receive buffer size = memory / 6;
*/
frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
- total_size = frame_size + (OTX2_HEAD_ROOM +
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
+ total_size = frame_size + OTX2_HEAD_ROOM * 6;
rbuf_size = total_size / 6;
return ALIGN(rbuf_size, 2048);
@@ -1332,10 +1338,11 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
* so, aura count = pool count.
*/
hw->rqpool_cnt = hw->rx_queues;
- hw->sqpool_cnt = hw->tx_queues;
+ hw->sqpool_cnt = hw->tot_tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
@@ -1493,6 +1500,44 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_unlock(&mbox->lock);
}
+static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+{
+ struct net_device *netdev = pf->netdev;
+ struct nix_rx_mode *req;
+ bool promisc = false;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ promisc = true;
+ }
+
+ /* Write unicast address to mcam entries or del from mcam */
+ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (promisc)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
int otx2_open(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1503,7 +1548,7 @@ int otx2_open(struct net_device *netdev)
netif_carrier_off(netdev);
- pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
@@ -1523,7 +1568,7 @@ int otx2_open(struct net_device *netdev)
if (!qset->cq)
goto err_free_mem;
- qset->sq = kcalloc(pf->hw.tx_queues,
+ qset->sq = kcalloc(pf->hw.tot_tx_queues,
sizeof(struct otx2_snd_queue), GFP_KERNEL);
if (!qset->sq)
goto err_free_mem;
@@ -1544,11 +1589,20 @@ int otx2_open(struct net_device *netdev)
/* RQ0 & SQ0 are mapped to CINT0 and so on..
* 'cq_ids[0]' points to RQ's CQ and
* 'cq_ids[1]' points to SQ's CQ and
+ * 'cq_ids[2]' points to XDP's CQ and
*/
cq_poll->cq_ids[CQ_RX] =
(qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ if (pf->xdp_prog)
+ cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
+ (qidx + pf->hw.rx_queues +
+ pf->hw.tx_queues) :
+ CINT_INVALID_CQ;
+ else
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+
cq_poll->dev = (void *)pf;
netif_napi_add(netdev, &cq_poll->napi,
otx2_napi_handler, NAPI_POLL_WEIGHT);
@@ -1646,6 +1700,8 @@ int otx2_open(struct net_device *netdev)
if (err)
goto err_tx_stop_queues;
+ otx2_do_set_rx_mode(pf);
+
return 0;
err_tx_stop_queues:
@@ -1750,7 +1806,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
- (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1791,43 +1847,11 @@ static void otx2_set_rx_mode(struct net_device *netdev)
queue_work(pf->otx2_wq, &pf->rx_mode_work);
}
-static void otx2_do_set_rx_mode(struct work_struct *work)
+static void otx2_rx_mode_wrk_handler(struct work_struct *work)
{
struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
- struct net_device *netdev = pf->netdev;
- struct nix_rx_mode *req;
- bool promisc = false;
-
- if (!(netdev->flags & IFF_UP))
- return;
-
- if ((netdev->flags & IFF_PROMISC) ||
- (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
- promisc = true;
- }
-
- /* Write unicast address to mcam entries or del from mcam */
- if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
- __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
-
- mutex_lock(&pf->mbox.lock);
- req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
- if (!req) {
- mutex_unlock(&pf->mbox.lock);
- return;
- }
-
- req->mode = NIX_RX_MODE_UCAST;
-
- if (promisc)
- req->mode |= NIX_RX_MODE_PROMISC;
- if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
- req->mode |= NIX_RX_MODE_ALLMULTI;
- req->mode |= NIX_RX_MODE_USE_MCE;
-
- otx2_sync_mbox_msg(&pf->mbox);
- mutex_unlock(&pf->mbox.lock);
+ otx2_do_set_rx_mode(pf);
}
static int otx2_set_features(struct net_device *netdev,
@@ -1967,7 +1991,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
return 0;
}
-static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -2023,8 +2047,9 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(otx2_config_hwtstamp);
-static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config *cfg = &pfvf->tstamp;
@@ -2039,6 +2064,7 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_ioctl);
static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
{
@@ -2281,6 +2307,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
+static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
+ int qidx)
+{
+ struct page *page;
+ u64 dma_addr;
+ int err = 0;
+
+ dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
+ offset_in_page(xdpf->data), xdpf->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pf->dev, dma_addr))
+ return -ENOMEM;
+
+ err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ if (!err) {
+ otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
+ page = virt_to_page(xdpf->data);
+ put_page(page);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int otx2_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = smp_processor_id();
+ struct otx2_snd_queue *sq;
+ int drops = 0, i;
+
+ if (!netif_running(netdev))
+ return -ENETDOWN;
+
+ qidx += pf->hw.tx_queues;
+ sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
+
+ /* Abort xmit if xdp queue is not */
+ if (unlikely(!sq))
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
+ if (err)
+ drops++;
+ }
+ return n - drops;
+}
+
+static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+{
+ struct net_device *dev = pf->netdev;
+ bool if_up = netif_running(pf->netdev);
+ struct bpf_prog *old_prog;
+
+ if (prog && dev->mtu > MAX_XDP_MTU) {
+ netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (if_up)
+ otx2_stop(pf->netdev);
+
+ old_prog = xchg(&pf->xdp_prog, prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (pf->xdp_prog)
+ bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
+
+ /* Network stack and XDP shared same rx queues.
+ * Use separate tx queues for XDP and network stack.
+ */
+ if (pf->xdp_prog)
+ pf->hw.xdp_queues = pf->hw.rx_queues;
+ else
+ pf->hw.xdp_queues = 0;
+
+ pf->hw.tot_tx_queues += pf->hw.xdp_queues;
+
+ if (if_up)
+ otx2_open(pf->netdev);
+
+ return 0;
+}
+
+static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return otx2_xdp_setup(pf, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
int req_perm)
{
@@ -2348,6 +2479,8 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
+ .ndo_bpf = otx2_xdp,
+ .ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
@@ -2358,7 +2491,7 @@ static int otx2_wq_init(struct otx2_nic *pf)
if (!pf->otx2_wq)
return -ENOMEM;
- INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
INIT_WORK(&pf->reset_task, otx2_reset_task);
return 0;
}
@@ -2489,6 +2622,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->pdev = pdev;
hw->rx_queues = qcount;
hw->tx_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
num_vec = pci_msix_vec_count(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index ec9e49985c2c..0ef68fdd1f26 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -27,6 +27,23 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
+static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
+{
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_SET_THRESH;
+ req->thresh = thresh;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
+
static u64 ptp_cc_read(const struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
@@ -55,6 +72,33 @@ static u64 ptp_cc_read(const struct cyclecounter *cc)
return rsp->clk;
}
+static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_TSTMP;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -102,9 +146,73 @@ static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
return 0;
}
+static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static void otx2_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ tstmp = ptp_tstmp_read(ptp);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ if (tstmp != ptp->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
+ ptp_clock_event(ptp->ptp_clock, &event);
+ ptp->last_extts = tstmp;
+
+ new_thresh = tstmp % 500000000;
+ if (ptp->thresh != new_thresh) {
+ mutex_lock(&ptp->nic->mbox.lock);
+ ptp_set_thresh(ptp, new_thresh);
+ mutex_unlock(&ptp->nic->mbox.lock);
+ ptp->thresh = new_thresh;
+ }
+ }
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ int pin;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on)
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+ else
+ cancel_delayed_work_sync(&ptp->extts_work);
+ return 0;
+ default:
+ break;
+ }
return -EOPNOTSUPP;
}
@@ -115,6 +223,11 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
struct ptp_req *req;
int err;
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ pfvf->ptp = NULL;
+ return 0;
+ }
+
mutex_lock(&pfvf->mbox.lock);
/* check if PTP block is available */
req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
@@ -149,20 +262,28 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
ktime_to_ns(ktime_get_real()));
+ snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP");
+ ptp_ptr->extts_config.index = 0;
+ ptp_ptr->extts_config.func = PTP_PF_NONE;
+
ptp_ptr->ptp_info = (struct ptp_clock_info) {
.owner = THIS_MODULE,
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
- .n_ext_ts = 0,
- .n_pins = 0,
+ .n_ext_ts = 1,
+ .n_pins = 1,
.pps = 0,
+ .pin_config = &ptp_ptr->extts_config,
.adjfine = otx2_ptp_adjfine,
.adjtime = otx2_ptp_adjtime,
.gettime64 = otx2_ptp_gettime,
.settime64 = otx2_ptp_settime,
.enable = otx2_ptp_enable,
+ .verify = otx2_ptp_verify_pin,
};
+ INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check);
+
ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
err = ptp_ptr->ptp_clock ?
@@ -176,6 +297,7 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
error:
return err;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_init);
void otx2_ptp_destroy(struct otx2_nic *pfvf)
{
@@ -188,6 +310,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
kfree(ptp);
pfvf->ptp = NULL;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_destroy);
int otx2_ptp_clock_index(struct otx2_nic *pfvf)
{
@@ -196,6 +319,7 @@ int otx2_ptp_clock_index(struct otx2_nic *pfvf)
return ptp_clock_index(pfvf->ptp->ptp_clock);
}
+EXPORT_SYMBOL_GPL(otx2_ptp_clock_index);
int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
{
@@ -206,3 +330,8 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
return 0;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_tstamp2time);
+
+MODULE_AUTHOR("Sunil Goutham <[email protected]>");
+MODULE_DESCRIPTION("Marvell RVU NIC PTP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f42b1d4e0c67..0cc6353254bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -8,6 +8,8 @@
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -17,6 +19,35 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq);
+
+static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq)
+{
+ u64 incr = (u64)(cq->cq_idx) << 32;
+ u64 status;
+
+ status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
+
+ if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
+ status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
+ dev_err(pfvf->dev, "CQ stopped due to error");
+ return -EINVAL;
+ }
+
+ cq->cq_tail = status & 0xFFFFF;
+ cq->cq_head = (status >> 20) & 0xFFFFF;
+ if (cq->cq_tail < cq->cq_head)
+ cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
+ cq->cq_tail;
+ else
+ cq->pend_cqe = cq->cq_tail - cq->cq_head;
+
+ return 0;
+}
static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{
@@ -73,6 +104,24 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
sg->num_segs = 0;
}
+static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sg_list *sg;
+ struct page *page;
+ u64 pa;
+
+ sg = &sq->sg[snd_comp->sqe_id];
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
+ sg->size[0], DMA_TO_DEVICE);
+ page = virt_to_page(phys_to_virt(pa));
+ put_page(page);
+}
+
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
struct otx2_snd_queue *sq,
@@ -132,8 +181,9 @@ static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
}
-static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len, struct nix_rx_parse_s *parse)
+static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len, struct nix_rx_parse_s *parse,
+ int qidx)
{
struct page *page;
int off = 0;
@@ -154,11 +204,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
}
page = virt_to_page(va);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page) + off, len - off, pfvf->rbsize);
+ if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page) + off,
+ len - off, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+ return true;
+ }
- otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
- pfvf->rbsize, DMA_FROM_DEVICE);
+ /* If more than MAX_SKB_FRAGS fragments are received then
+ * give back those buffer pointers to hardware for reuse.
+ */
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
+
+ return false;
}
static void otx2_set_rxhash(struct otx2_nic *pfvf,
@@ -285,6 +346,10 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
return;
}
+ if (pfvf->xdp_prog)
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
+ return;
+
skb = napi_get_frags(napi);
if (unlikely(!skb))
return;
@@ -296,9 +361,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
seg_addr = &sg->seg_addr;
seg_size = (void *)sg;
for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
- otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg],
- parse);
- cq->pool_ptrs++;
+ if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
+ seg_size[seg], parse, cq->cq_idx))
+ cq->pool_ptrs++;
}
start += sizeof(*sg);
}
@@ -318,7 +383,14 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
!cqe->sg.seg_addr) {
@@ -334,17 +406,13 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (unlikely(!cq->pool_ptrs))
- return 0;
- /* Refill pool with new buffers */
- pfvf->hw_ops->refill_pool_ptrs(pfvf, cq);
-
return processed_cqe;
}
@@ -364,22 +432,36 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
- int tx_pkts = 0, tx_bytes = 0;
+ int tx_pkts = 0, tx_bytes = 0, qidx;
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) {
if (!processed_cqe)
return 0;
break;
}
- otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
- cqe, budget, &tx_pkts, &tx_bytes);
-
+ if (cq->cq_type == CQ_XDP) {
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
+ cqe);
+ } else {
+ otx2_snd_pkt_handler(pfvf, cq,
+ &pfvf->qset.sq[cq->cint_idx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+ }
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
@@ -402,6 +484,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
+ struct otx2_cq_queue *rx_cq = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
struct otx2_cq_queue *cq;
@@ -412,17 +495,13 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
pfvf = (struct otx2_nic *)cq_poll->dev;
qset = &pfvf->qset;
- for (i = CQS_PER_CINT - 1; i >= 0; i--) {
+ for (i = 0; i < CQS_PER_CINT; i++) {
cq_idx = cq_poll->cq_ids[i];
if (unlikely(cq_idx == CINT_INVALID_CQ))
continue;
cq = &qset->cq[cq_idx];
if (cq->cq_type == CQ_RX) {
- /* If the RQ refill WQ task is running, skip napi
- * scheduler for this queue.
- */
- if (cq->refill_task_sched)
- continue;
+ rx_cq = cq;
workdone += otx2_rx_napi_handler(pfvf, napi,
cq, budget);
} else {
@@ -430,6 +509,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
}
+ if (rx_cq && rx_cq->pool_ptrs)
+ pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -936,10 +1017,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
int processed_cqe = 0;
u64 iova, pa;
- while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
- if (!cqe->sg.subdc)
- continue;
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_unreg(&cq->xdp_rxq);
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
if (cqe->sg.segs > 1) {
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
continue;
@@ -965,7 +1055,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
sq = &pfvf->qset.sq[cq->cint_idx];
- while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
sg = &sq->sg[cqe->comp.sqe_id];
skb = (struct sk_buff *)sg->skb;
if (skb) {
@@ -973,7 +1072,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
dev_kfree_skb_any(skb);
sg->skb = (u64)NULL;
}
- processed_cqe++;
}
/* Free CQEs to HW */
@@ -1001,3 +1099,116 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+
+static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 *iova = NULL;
+
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 1;
+ sg->seg1_size = len;
+ iova = (void *)sg + sizeof(*sg);
+ *iova = dma_addr;
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ sq->sg[sq->head].dma_addr[0] = dma_addr;
+ sq->sg[sq->head].size[0] = len;
+ sq->sg[sq->head].num_segs = 1;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset, free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ if (free_sqe < sq->sqe_thresh)
+ return false;
+
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+
+ return true;
+}
+
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq)
+{
+ unsigned char *hard_start, *data;
+ int qidx = cq->cq_idx;
+ struct xdp_buff xdp;
+ struct page *page;
+ u64 iova, pa;
+ u32 act;
+ int err;
+
+ iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ page = virt_to_page(phys_to_virt(pa));
+
+ xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
+
+ data = (unsigned char *)phys_to_virt(pa);
+ hard_start = page_address(page);
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+ cqe->sg.seg_size, false);
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ qidx += pfvf->hw.tx_queues;
+ cq->pool_ptrs++;
+ return otx2_xdp_sq_append_pkt(pfvf, iova,
+ cqe->sg.seg_size, qidx);
+ case XDP_REDIRECT:
+ cq->pool_ptrs++;
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ if (!err)
+ return true;
+ put_page(page);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(pfvf->netdev, prog, act);
+ break;
+ case XDP_DROP:
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ cq->pool_ptrs++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 3ff1ad79c001..f1a04cf9210c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -11,6 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/iommu.h>
#include <linux/if_vlan.h>
+#include <net/xdp.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -25,6 +26,8 @@
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
+#define MAX_XDP_MTU (1530 - OTX2_ETH_HLEN)
+
/* Rx buffer size should be in multiples of 128bytes */
#define RCV_FRAG_LEN1(x) \
((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
@@ -36,9 +39,7 @@
#define RCV_FRAG_LEN(x) \
((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
-#define DMA_BUFFER_LEN(x) \
- ((x) - OTX2_HEAD_ROOM - \
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM)
/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
* is equal to this value.
@@ -56,6 +57,9 @@
*/
#define CQ_QCOUNT_DEFAULT 1
+#define CQ_OP_STAT_OP_ERR 63
+#define CQ_OP_STAT_CQ_ERR 46
+
struct queue_stats {
u64 bytes;
u64 pkts;
@@ -96,7 +100,8 @@ struct otx2_snd_queue {
enum cq_type {
CQ_RX,
CQ_TX,
- CQS_PER_CINT = 2, /* RQ + SQ */
+ CQ_XDP,
+ CQS_PER_CINT = 3, /* RQ + SQ + XDP */
};
struct otx2_cq_poll {
@@ -122,9 +127,12 @@ struct otx2_cq_queue {
u16 pool_ptrs;
u32 cqe_cnt;
u32 cq_head;
+ u32 cq_tail;
+ u32 pend_cqe;
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct otx2_qset {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 03b4ec630432..e6cb8cd0787d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -8,9 +8,11 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
#include "otx2_reg.h"
+#include "otx2_ptp.h"
#include "cn10k.h"
#define DRV_NAME "rvu_nicvf"
@@ -277,7 +279,6 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
struct mbox *mbox = &vf->mbox;
if (vf->mbox_wq) {
- flush_workqueue(vf->mbox_wq);
destroy_workqueue(vf->mbox_wq);
vf->mbox_wq = NULL;
}
@@ -500,6 +501,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_do_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *vf)
@@ -583,6 +585,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -640,6 +643,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(vf);
+
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 5cca007a3e17..06279cd6da67 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -345,8 +345,6 @@ static struct prestera_trap prestera_trap_items_arr[] = {
},
};
-static void prestera_devlink_traps_fini(struct prestera_switch *sw);
-
static int prestera_drop_counter_get(struct devlink *devlink,
const struct devlink_trap *trap,
u64 *p_drops);
@@ -381,8 +379,6 @@ static int prestera_trap_action_set(struct devlink *devlink,
enum devlink_trap_action action,
struct netlink_ext_ack *extack);
-static int prestera_devlink_traps_register(struct prestera_switch *sw);
-
static const struct devlink_ops prestera_dl_ops = {
.info_get = prestera_dl_info_get,
.trap_init = prestera_trap_init,
@@ -407,34 +403,18 @@ void prestera_devlink_free(struct prestera_switch *sw)
devlink_free(dl);
}
-int prestera_devlink_register(struct prestera_switch *sw)
+void prestera_devlink_register(struct prestera_switch *sw)
{
struct devlink *dl = priv_to_devlink(sw);
- int err;
devlink_register(dl);
-
- err = prestera_devlink_traps_register(sw);
- if (err) {
- devlink_unregister(dl);
- dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n",
- err);
- return err;
- }
-
- return 0;
}
void prestera_devlink_unregister(struct prestera_switch *sw)
{
- struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
- prestera_devlink_traps_fini(sw);
devlink_unregister(dl);
-
- kfree(trap_data->trap_items_arr);
- kfree(trap_data);
}
int prestera_devlink_port_register(struct prestera_port *port)
@@ -482,7 +462,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
return &port->dl_port;
}
-static int prestera_devlink_traps_register(struct prestera_switch *sw)
+int prestera_devlink_traps_register(struct prestera_switch *sw)
{
const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr);
const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr);
@@ -621,8 +601,9 @@ static int prestera_drop_counter_get(struct devlink *devlink,
cpu_code_type, p_drops);
}
-static void prestera_devlink_traps_fini(struct prestera_switch *sw)
+void prestera_devlink_traps_unregister(struct prestera_switch *sw)
{
+ struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
const struct devlink_trap *trap;
int i;
@@ -634,4 +615,6 @@ static void prestera_devlink_traps_fini(struct prestera_switch *sw)
devlink_trap_groups_unregister(dl, prestera_trap_groups_arr,
ARRAY_SIZE(prestera_trap_groups_arr));
+ kfree(trap_data->trap_items_arr);
+ kfree(trap_data);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
index cc34c3db13a2..b322295bad3a 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -9,7 +9,7 @@
struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev);
void prestera_devlink_free(struct prestera_switch *sw);
-int prestera_devlink_register(struct prestera_switch *sw);
+void prestera_devlink_register(struct prestera_switch *sw);
void prestera_devlink_unregister(struct prestera_switch *sw);
int prestera_devlink_port_register(struct prestera_port *port);
@@ -22,5 +22,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
void prestera_devlink_trap_report(struct prestera_port *port,
struct sk_buff *skb, u8 cpu_code);
+int prestera_devlink_traps_register(struct prestera_switch *sw);
+void prestera_devlink_traps_unregister(struct prestera_switch *sw);
#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 44c670807fb3..d0d5a229d19d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -137,7 +137,7 @@ static int prestera_port_set_mac_address(struct net_device *dev, void *p)
if (err)
return err;
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -338,11 +338,14 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
+ eth_hw_addr_gen(dev, sw->base_mac, port->fp_id);
/* firmware requires that port's MAC address consist of the first
* 5 bytes of the base MAC address
*/
- memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1);
- dev->dev_addr[dev->addr_len - 1] = port->fp_id;
+ if (memcmp(dev->dev_addr, sw->base_mac, ETH_ALEN - 1)) {
+ dev_warn(prestera_dev(sw), "Port MAC address wraps for port(%u)\n", id);
+ dev_addr_mod(dev, 0, sw->base_mac, ETH_ALEN - 1);
+ }
err = prestera_hw_port_mac_set(port, dev->dev_addr);
if (err) {
@@ -851,7 +854,7 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_span_init;
- err = prestera_devlink_register(sw);
+ err = prestera_devlink_traps_register(sw);
if (err)
goto err_dl_register;
@@ -863,12 +866,13 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_ports_create;
+ prestera_devlink_register(sw);
return 0;
err_ports_create:
prestera_lag_fini(sw);
err_lag_init:
- prestera_devlink_unregister(sw);
+ prestera_devlink_traps_unregister(sw);
err_dl_register:
prestera_span_fini(sw);
err_span_init:
@@ -888,9 +892,10 @@ err_swdev_register:
static void prestera_switch_fini(struct prestera_switch *sw)
{
+ prestera_devlink_unregister(sw);
prestera_destroy_ports(sw);
prestera_lag_fini(sw);
- prestera_devlink_unregister(sw);
+ prestera_devlink_traps_unregister(sw);
prestera_span_fini(sw);
prestera_acl_fini(sw);
prestera_event_handlers_unregister(sw);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fab53c9b8380..6c02e1740609 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -389,7 +389,7 @@ static void inverse_every_nibble(unsigned char *mac_addr)
* Outputs
* return the calculated entry.
*/
-static u32 hash_function(unsigned char *mac_addr_orig)
+static u32 hash_function(const unsigned char *mac_addr_orig)
{
u32 hash_result;
u32 addr0;
@@ -434,7 +434,7 @@ static u32 hash_function(unsigned char *mac_addr_orig)
* -ENOSPC if table full
*/
static int add_del_hash_entry(struct pxa168_eth_private *pep,
- unsigned char *mac_addr,
+ const unsigned char *mac_addr,
u32 rd, u32 skip, int del)
{
struct addr_table_entry *entry, *start;
@@ -521,7 +521,7 @@ static int add_del_hash_entry(struct pxa168_eth_private *pep,
*/
static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
unsigned char *oaddr,
- unsigned char *addr)
+ const unsigned char *addr)
{
/* Delete old entry */
if (oaddr)
@@ -607,7 +607,7 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
memcpy(oldMac, dev->dev_addr, ETH_ALEN);
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
mac_h = dev->dev_addr[0] << 24;
mac_h |= dev->dev_addr[1] << 16;
@@ -1434,11 +1434,15 @@ static int pxa168_eth_probe(struct platform_device *pdev)
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
- err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
+ err = of_get_ethdev_address(pdev->dev.of_node, dev);
if (err) {
+ u8 addr[ETH_ALEN];
+
/* try reading the mac address, if set by the bootloader */
- pxa168_eth_get_mac_address(dev, dev->dev_addr);
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ pxa168_eth_get_mac_address(dev, addr);
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(dev, addr);
+ } else {
dev_info(&pdev->dev, "Using random mac address\n");
eth_hw_addr_random(dev);
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 051dd3fb5b03..0c864e5bf0a6 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3459,7 +3459,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev)) {
memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
@@ -3810,6 +3810,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
{
struct skge_port *skge;
struct net_device *dev = alloc_etherdev(sizeof(*skge));
+ u8 addr[ETH_ALEN];
if (!dev)
return NULL;
@@ -3862,7 +3863,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
}
/* read the mac address */
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
return dev;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 3cb9c1271328..5abb55191e8e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3817,7 +3817,7 @@ static int sky2_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
dev->dev_addr, ETH_ALEN);
memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
@@ -4720,10 +4720,13 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
* 1) from device tree data
* 2) from internal registers set by bootloader
*/
- ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr);
- if (ret)
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
- ETH_ALEN);
+ ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev);
+ if (ret) {
+ u8 addr[ETH_ALEN];
+
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ }
/* if the address is invalid, use a random value */
if (!is_valid_ether_addr(dev->dev_addr)) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 398c23cec815..75d67d1b5f6b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2588,7 +2588,7 @@ static int __init mtk_init(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
int ret;
- ret = of_get_mac_address(mac->of_node, dev->dev_addr);
+ ret = of_get_ethdev_address(mac->of_node, dev);
if (ret) {
/* If the mac address is invalid, use random mac address */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index b5f68f66d42a..7bb1f20002b5 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -186,6 +186,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
int hash;
int i;
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
+ return -EEXIST;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
struct flow_match_meta match;
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 1d5dd2015453..89ca7960b225 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -523,7 +523,7 @@ static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
static void mtk_star_set_mac_addr(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
- u8 *mac_addr = ndev->dev_addr;
+ const u8 *mac_addr = ndev->dev_addr;
unsigned int high, low;
high = mac_addr[0] << 8 | mac_addr[1] << 0;
@@ -1544,7 +1544,7 @@ static int mtk_star_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
+ ret = platform_get_ethdev_address(dev, ndev);
if (ret || !is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 8d751383530b..e10b7b04b894 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2480,7 +2480,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
return 0;
err_thread:
- flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
err_slaves:
while (i--) {
@@ -2587,7 +2586,6 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
int i, port;
if (mlx4_is_master(dev)) {
- flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
for (i = 0; i < dev->num_slaves; i++) {
for (port = 1; port <= MLX4_MAX_PORTS; port++)
@@ -3009,7 +3007,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
return -EPERM;
}
- s_info->mac = mlx4_mac_to_u64(mac);
+ s_info->mac = ether_addr_to_u64(mac);
mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
@@ -3195,7 +3193,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
- mlx4_u64_to_mac(mac, s_info->mac);
+ u64_to_ether_addr(s_info->mac, mac);
if (setting && !is_valid_ether_addr(mac)) {
mlx4_info(dev, "Illegal MAC with spoofchk\n");
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index f7053a74e6a8..4d4f9cf9facb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -314,7 +314,8 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
buf += PAGE_SIZE;
}
} else {
- err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ?
+ err = copy_to_user((void __user *)buf, init_ents,
+ array_size(entries, cqe_size)) ?
-EFAULT : 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ef518b1040f7..66c8ae29bc7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -197,6 +197,8 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
/* xdp statistics */
"rx_xdp_drop",
+ "rx_xdp_redirect",
+ "rx_xdp_redirect_fail",
"rx_xdp_tx",
"rx_xdp_tx_full",
@@ -428,6 +430,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
data[index++] = priv->rx_ring[i]->bytes;
data[index++] = priv->rx_ring[i]->dropped;
data[index++] = priv->rx_ring[i]->xdp_drop;
+ data[index++] = priv->rx_ring[i]->xdp_redirect;
+ data[index++] = priv->rx_ring[i]->xdp_redirect_fail;
data[index++] = priv->rx_ring[i]->xdp_tx;
data[index++] = priv->rx_ring[i]->xdp_tx_full;
}
@@ -520,6 +524,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_drop", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_redirect", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_redirect_fail", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx_full", i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 109472d6b61f..f1259bdb1a29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -237,7 +237,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
- flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
iounmap(mdev->uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index a2f61a87cef8..3f6d5c384637 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -372,6 +372,9 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
int nhoff = skb_network_offset(skb);
int ret = 0;
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
if (skb->protocol != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
@@ -524,18 +527,17 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
return err;
}
-static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+static void mlx4_en_u64_to_mac(struct net_device *dev, u64 src_mac)
{
- int i;
- for (i = ETH_ALEN - 1; i >= 0; --i) {
- dst_mac[i] = src_mac & 0xff;
- src_mac >>= 8;
- }
- memset(&dst_mac[ETH_ALEN], 0, 2);
+ u8 addr[ETH_ALEN];
+
+ u64_to_ether_addr(src_mac, addr);
+ eth_hw_addr_set(dev, addr);
}
-static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv,
+ const unsigned char *addr,
int qpn, u64 *reg_id)
{
int err;
@@ -556,7 +558,7 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
- unsigned char *mac, int *qpn, u64 *reg_id)
+ const unsigned char *mac, int *qpn, u64 *reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
@@ -608,7 +610,8 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
}
static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
- unsigned char *mac, int qpn, u64 reg_id)
+ const unsigned char *mac,
+ int qpn, u64 reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
@@ -641,7 +644,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
int index = 0;
int err = 0;
int *qpn = &priv->base_qpn;
- u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
@@ -680,7 +683,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
int qpn = priv->base_qpn;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
@@ -698,14 +701,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
- u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
+ u64 new_mac_u64 = ether_addr_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
- u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
+ u64 prev_mac_u64 = ether_addr_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -794,7 +797,7 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
if (err)
goto out;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, saddr->sa_data);
mlx4_en_update_user_mac(priv, new_mac);
out:
mutex_unlock(&mdev->state_lock);
@@ -1073,7 +1076,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
- mcast_addr = mlx4_mac_to_u64(mclist->addr);
+ mcast_addr = ether_addr_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -1166,7 +1169,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
found = true;
if (!found) {
- mac = mlx4_mac_to_u64(entry->mac);
+ mac = ether_addr_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
@@ -1209,7 +1212,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
- mac = mlx4_mac_to_u64(ha->addr);
+ mac = ether_addr_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
@@ -1269,7 +1272,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
if (!netif_carrier_ok(dev)) {
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
netif_carrier_on(dev);
en_dbg(LINK, priv, "Link Up\n");
}
@@ -1346,7 +1348,7 @@ static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_mac_to_u64(entry->mac);
+ mac = ether_addr_to_u64(entry->mac);
en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
@@ -1557,26 +1559,36 @@ static void mlx4_en_service_task(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
-static void mlx4_en_linkstate(struct work_struct *work)
+static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_port_state *port_state = &priv->port_state;
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+ bool up;
+
+ if (mlx4_en_QUERY_PORT(mdev, priv->port))
+ port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
+
+ up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
+ if (up == netif_carrier_ok(dev))
+ netif_carrier_event(dev);
+ if (!up) {
+ en_info(priv, "Link Down\n");
+ netif_carrier_off(dev);
+ } else {
+ en_info(priv, "Link Up\n");
+ netif_carrier_on(dev);
+ }
+}
+
+static void mlx4_en_linkstate_work(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
linkstate_task);
struct mlx4_en_dev *mdev = priv->mdev;
- int linkstate = priv->link_state;
mutex_lock(&mdev->state_lock);
- /* If observable port state changed set carrier state and
- * report to system log */
- if (priv->last_link_state != linkstate) {
- if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
- en_info(priv, "Link Down\n");
- netif_carrier_off(priv->dev);
- } else {
- en_info(priv, "Link Up\n");
- netif_carrier_on(priv->dev);
- }
- }
- priv->last_link_state = linkstate;
+ mlx4_en_linkstate(priv);
mutex_unlock(&mdev->state_lock);
}
@@ -2079,9 +2091,11 @@ static int mlx4_en_open(struct net_device *dev)
mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
- if (err)
+ if (err) {
en_err(priv, "Failed starting port:%d\n", priv->port);
-
+ goto out;
+ }
+ mlx4_en_linkstate(priv);
out:
mutex_unlock(&mdev->state_lock);
return err;
@@ -3168,7 +3182,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->restart_task, mlx4_en_restart);
- INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
#ifdef CONFIG_RFS_ACCEL
@@ -3253,7 +3267,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Set default MAC */
dev->addr_len = ETH_ALEN;
- mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
+ mlx4_en_u64_to_mac(dev, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
priv->port, dev->dev_addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0158b88bea5b..532997eba698 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -244,6 +244,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_complete = 0;
priv->port_stats.rx_alloc_pages = 0;
priv->xdp_stats.rx_xdp_drop = 0;
+ priv->xdp_stats.rx_xdp_redirect = 0;
+ priv->xdp_stats.rx_xdp_redirect_fail = 0;
priv->xdp_stats.rx_xdp_tx = 0;
priv->xdp_stats.rx_xdp_tx_full = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -255,6 +257,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
+ priv->xdp_stats.rx_xdp_redirect += READ_ONCE(ring->xdp_redirect);
+ priv->xdp_stats.rx_xdp_redirect_fail += READ_ONCE(ring->xdp_redirect_fail);
priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7f6d3b82c29b..650e6a1844ae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -669,6 +669,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct bpf_prog *xdp_prog;
int cq_ring = cq->ring;
bool doorbell_pending;
+ bool xdp_redir_flush;
struct mlx4_cqe *cqe;
struct xdp_buff xdp;
int polled = 0;
@@ -682,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
xdp_prog = rcu_dereference_bh(ring->xdp_prog);
xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
+ xdp_redir_flush = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
@@ -790,6 +792,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
switch (act) {
case XDP_PASS:
break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) {
+ ring->xdp_redirect++;
+ xdp_redir_flush = true;
+ frags[0].page = NULL;
+ goto next;
+ }
+ ring->xdp_redirect_fail++;
+ trace_xdp_exception(dev, xdp_prog, act);
+ goto xdp_drop_no_cnt;
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
length, cq_ring,
@@ -897,6 +909,9 @@ next:
break;
}
+ if (xdp_redir_flush)
+ xdp_do_flush();
+
if (likely(polled)) {
if (doorbell_pending) {
priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c56b9dba4c71..817f4154b86d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -130,6 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = !!(priv->pflags &
MLX4_EN_PRIV_FLAGS_BLUEFLAME);
}
+ ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
@@ -753,8 +754,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
#else
iowrite32be(
#endif
- (__force u32)ring->doorbell_qpn,
- ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ (__force u32)ring->doorbell_qpn, ring->doorbell_address);
}
static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index dc4ac1a2b6b6..42c96c9d7fb1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -3105,7 +3105,7 @@ void mlx4_replace_zero_macs(struct mlx4_dev *dev)
dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
eth_random_addr(mac_addr);
dev->port_random_macs |= 1 << i;
- dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+ dev->caps.def_mac[i] = ether_addr_to_u64(mac_addr);
}
}
EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 27ed4694fbea..b187c210d4d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4015,7 +4015,6 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&dev->persist->interface_state_mutex);
mutex_init(&dev->persist->pci_status_mutex);
- devlink_register(devlink);
ret = devlink_params_register(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
if (ret)
@@ -4025,16 +4024,15 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_params_unregister;
- devlink_params_publish(devlink);
- devlink_reload_enable(devlink);
pci_save_state(pdev);
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
err_params_unregister:
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
- devlink_unregister(devlink);
kfree(dev->persist);
err_devlink_free:
devlink_free(devlink);
@@ -4137,7 +4135,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4173,7 +4171,6 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_pci_disable_device(dev);
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
- devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f1b4ad9c66d2..f1716a83a4d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1046,7 +1046,7 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3d1a20201ef..e132ff4c82f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -283,6 +283,7 @@ struct mlx4_en_tx_ring {
struct mlx4_bf bf;
/* Following part should be mostly read */
+ void __iomem *doorbell_address;
__be32 doorbell_qpn;
__be32 mr_key;
u32 size; /* number of TXBBs */
@@ -340,6 +341,8 @@ struct mlx4_en_rx_ring {
unsigned long csum_complete;
unsigned long rx_alloc_pages;
unsigned long xdp_drop;
+ unsigned long xdp_redirect;
+ unsigned long xdp_redirect_fail;
unsigned long xdp_tx;
unsigned long xdp_tx_full;
unsigned long dropped;
@@ -552,7 +555,6 @@ struct mlx4_en_priv {
struct mlx4_hwq_resources res;
int link_state;
- int last_link_state;
bool port_up;
int port;
int registered;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 7b51ae8cf759..e9cd4bb6f83d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -42,9 +42,11 @@ struct mlx4_en_port_stats {
struct mlx4_en_xdp_stats {
unsigned long rx_xdp_drop;
+ unsigned long rx_xdp_redirect;
+ unsigned long rx_xdp_redirect_fail;
unsigned long rx_xdp_tx;
unsigned long rx_xdp_tx_full;
-#define NUM_XDP_STATS 3
+#define NUM_XDP_STATS 5
};
struct mlx4_en_phy_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 63032cd6efb1..bdb271b604d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,10 +14,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
- fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
+ fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o
+ fw_reset.o qos.o lib/tout.o
#
# Netdev basic
@@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o \
en/mapping.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index db5dfff585c9..f71ec4d9d68e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -45,6 +45,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
+#include "lib/tout.h"
enum {
CMD_IF_REV = 5,
@@ -225,9 +226,13 @@ static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
{
- unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd);
+ u64 cmd_to_ms = mlx5_tout_ms(dev, CMD);
+ unsigned long poll_end;
u8 own;
+ poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000);
+
do {
own = READ_ONCE(ent->lay->status_own);
if (!(own & CMD_OWNER_HW)) {
@@ -925,15 +930,18 @@ static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
- struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
- unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ bool poll_cmd = ent->polling;
struct mlx5_cmd_layout *lay;
+ struct mlx5_core_dev *dev;
+ unsigned long cb_timeout;
struct semaphore *sem;
unsigned long flags;
- bool poll_cmd = ent->polling;
int alloc_ret;
int cmd_mode;
+ dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+
complete(&ent->handling);
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -1073,7 +1081,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
- unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
struct mlx5_cmd *cmd = &dev->cmd;
int err;
@@ -2058,7 +2066,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL;
}
- cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL);
+ cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
if (!cmd->stats)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index cf97985628ab..02e77ffe5c3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -155,6 +155,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
int err;
+ mlx5_debug_cq_remove(dev, cq);
+
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
mlx5_eq_del_cq(&cq->eq->core, cq);
@@ -162,16 +164,13 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
err = mlx5_cmd_exec_in(dev, destroy_cq, in);
- if (err)
- return err;
synchronize_irq(cq->irqn);
- mlx5_debug_cq_remove(dev, cq);
mlx5_cq_put(cq);
wait_for_completion(&cq->free);
- return 0;
+ return err;
}
EXPORT_SYMBOL(mlx5_core_destroy_cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index e8093c4e09d4..a8b84d53dfb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -33,6 +33,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
/* intf dev list mutex */
@@ -537,6 +538,16 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
return add_drivers(dev);
}
+static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
+{
+ u64 fsystem_guid, psystem_guid;
+
+ fsystem_guid = mlx5_query_nic_system_image_guid(dev);
+ psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
+
+ return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
+}
+
static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
{
return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
@@ -556,7 +567,8 @@ static int next_phys_dev(struct device *dev, const void *data)
if (mdev == curr)
return 0;
- if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
+ if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
+ mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
return 0;
return 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index b36f721625e4..1c98652b244a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -136,6 +136,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
@@ -153,6 +154,10 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (pci_num_vf(pdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
+ }
+
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
mlx5_unload_one(dev);
@@ -449,7 +454,8 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct mlx5_core_dev *dev = devlink_priv(devlink);
bool new_state = val.vbool;
- if (new_state && !MLX5_CAP_GEN(dev, roce)) {
+ if (new_state && !MLX5_CAP_GEN(dev, roce) &&
+ !MLX5_CAP_GEN(dev, roce_rw_supported)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
return -EOPNOTSUPP;
}
@@ -791,13 +797,14 @@ static void mlx5_devlink_traps_unregister(struct devlink *devlink)
int mlx5_devlink_register(struct devlink *devlink)
{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
- devlink_register(devlink);
err = devlink_params_register(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
if (err)
- goto params_reg_err;
+ return err;
+
mlx5_devlink_set_params_init_values(devlink);
err = mlx5_devlink_auxdev_params_register(devlink);
@@ -808,7 +815,9 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto traps_reg_err;
- devlink_params_publish(devlink);
+ if (!mlx5_core_is_mp_slave(dev))
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+
return 0;
traps_reg_err:
@@ -816,17 +825,13 @@ traps_reg_err:
auxdev_reg_err:
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
-params_reg_err:
- devlink_unregister(devlink);
return err;
}
void mlx5_devlink_unregister(struct devlink *devlink)
{
- devlink_params_unpublish(devlink);
mlx5_devlink_traps_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
- devlink_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 87d65f6b5310..7841ef6c193c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -235,6 +235,9 @@ const char *parse_fs_dst(struct trace_seq *p,
const char *ret = trace_seq_buffer_ptr(p);
switch (dst->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ trace_seq_printf(p, "uplink\n");
+ break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
trace_seq_printf(p, "vport=%u\n", dst->vport.num);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index f9cf9fb31547..da1bec04efff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -1069,7 +1069,6 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
- flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 7b8c8187543a..a3a4fece0cac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -220,8 +220,6 @@ struct mlx5e_umr_wqe {
struct mlx5_mtt inline_mtts[0];
};
-extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
-
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
MLX5E_PFLAG_TX_CQE_BASED_MODER,
@@ -252,6 +250,10 @@ struct mlx5e_params {
struct {
u16 mode;
u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ struct {
+ struct mlx5e_mqprio_rl *rl;
+ } channel;
} mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
@@ -845,6 +847,7 @@ struct mlx5e_priv {
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
+ u16 stats_nch;
u16 max_nch;
u8 max_opened_tc;
bool tx_ptp_opened;
@@ -877,6 +880,7 @@ struct mlx5e_priv {
#endif
struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb;
+ struct mlx5e_mqprio_rl *mqprio_rl;
};
struct mlx5e_rx_handlers {
@@ -916,6 +920,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
+int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
void mlx5e_set_rx_mode_work(struct work_struct *work);
@@ -1001,7 +1006,8 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
@@ -1100,12 +1106,6 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
-static inline unsigned int
-mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
-{
- return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
-}
-
static inline bool
mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
{
@@ -1114,11 +1114,13 @@ mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
}
int mlx5e_priv_init(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
struct net_device *netdev,
struct mlx5_core_dev *mdev);
void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs);
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+ unsigned int txqs, unsigned int rxqs);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 86e079310ac3..ae52e7f38306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -24,7 +24,7 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn);
+ attrs.phys.port_number = mlx5_get_dev_index(priv->mdev);
if (MLX5_ESWITCH_MANAGER(priv->mdev)) {
mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid);
memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 41684a6c44e9..678ffbb48a25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -125,15 +125,15 @@ struct mlx5e_ethtool_steering {
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
-int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
-static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
-static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_RXNFC */
@@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+int mlx5e_fs_init(struct mlx5e_priv *priv);
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index 018262d0164b..d5b7110a4265 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -32,7 +32,6 @@ void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
-#define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000
struct mlx5e_err_ctx {
int (*recover)(void *ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index ac44bbe95c5c..d290d7276b8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -35,7 +35,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
{
int ch, i = 0;
- for (ch = 0; ch < priv->max_nch; ch++) {
+ for (ch = 0; ch < priv->stats_nch; ch++) {
void *buf = data + i;
if (WARN_ON_ONCE(buf +
@@ -51,7 +51,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv)
{
return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) *
- priv->max_nch);
+ priv->stats_nch);
}
static void mlx5e_hv_vhca_stats_work(struct work_struct *work)
@@ -100,7 +100,7 @@ static void mlx5e_hv_vhca_stats_control(struct mlx5_hv_vhca_agent *agent,
sagent = &priv->stats_agent;
block->version = MLX5_HV_VHCA_STATS_VERSION;
- block->rings = priv->max_nch;
+ block->rings = priv->stats_nch;
if (!block->command) {
cancel_delayed_work_sync(&priv->stats_agent.work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index ee688dec67a9..3a86f66d1295 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -13,8 +13,6 @@ struct mlx5e_ptp_fs {
bool valid;
};
-#define MLX5E_PTP_CHANNEL_IX 0
-
struct mlx5e_ptp_params {
struct mlx5e_params params;
struct mlx5e_sq_param txq_sq_param;
@@ -509,6 +507,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->ptp_stats.rq;
+ rq->ix = MLX5E_PTP_CHANNEL_IX;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
err = mlx5e_rq_set_handlers(rq, params, false);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index c96668bd701c..a71a32e00ebb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -8,6 +8,8 @@
#include "en_stats.h"
#include <linux/ptp_classify.h>
+#define MLX5E_PTP_CHANNEL_IX 0
+
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
struct mlx5e_cq ts_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index e8a8d78e3e4d..50977f01a050 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -7,6 +7,21 @@
#define BYTES_IN_MBIT 125000
+int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+ if (nbytes < BYTES_IN_MBIT) {
+ qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n",
+ nbytes, BYTES_IN_MBIT);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+ return div_u64(nbytes, BYTES_IN_MBIT);
+}
+
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
{
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
@@ -238,7 +253,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err)
goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, node->hw_id, node->qid);
+ &param_sq, sq, 0, node->hw_id,
+ priv->htb.qos_sq_stats[node->qid]);
if (err)
goto err_close_cq;
@@ -979,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce
return err;
}
+
+struct mlx5e_mqprio_rl {
+ struct mlx5_core_dev *mdev;
+ u32 root_id;
+ u32 *leaves_id;
+ u8 num_tc;
+};
+
+struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL);
+}
+
+void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl)
+{
+ kvfree(rl);
+}
+
+int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
+ u64 max_rate[])
+{
+ int err;
+ int tc;
+
+ if (!mlx5_qos_is_supported(mdev)) {
+ qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
+ }
+ if (num_tc > mlx5e_qos_max_leaf_nodes(mdev))
+ return -EINVAL;
+
+ rl->mdev = mdev;
+ rl->num_tc = num_tc;
+ rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL);
+ if (!rl->leaves_id)
+ return -ENOMEM;
+
+ err = mlx5_qos_create_root_node(mdev, &rl->root_id);
+ if (err)
+ goto err_free_leaves;
+
+ qos_dbg(mdev, "Root created, id %#x\n", rl->root_id);
+
+ for (tc = 0; tc < num_tc; tc++) {
+ u32 max_average_bw;
+
+ max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]);
+ err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw,
+ &rl->leaves_id[tc]);
+ if (err)
+ goto err_destroy_leaves;
+
+ qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n",
+ tc, rl->leaves_id[tc], max_average_bw);
+ }
+ return 0;
+
+err_destroy_leaves:
+ while (--tc >= 0)
+ mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]);
+ mlx5_qos_destroy_node(mdev, rl->root_id);
+err_free_leaves:
+ kvfree(rl->leaves_id);
+ return err;
+}
+
+void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl)
+{
+ int tc;
+
+ for (tc = 0; tc < rl->num_tc; tc++)
+ mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]);
+ mlx5_qos_destroy_node(rl->mdev, rl->root_id);
+ kvfree(rl->leaves_id);
+}
+
+int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id)
+{
+ if (tc >= rl->num_tc)
+ return -EINVAL;
+
+ *hw_id = rl->leaves_id[tc];
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index 757682b7c0e0..b7558907ba20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -12,6 +12,7 @@ struct mlx5e_priv;
struct mlx5e_channels;
struct mlx5e_channel;
+int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
@@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
+/* MQPRIO TX rate limit */
+struct mlx5e_mqprio_rl;
+struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void);
+void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl);
+int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
+ u64 max_rate[]);
+void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl);
+int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index b5ddaa82755f..c6d2f8c78db7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -475,9 +475,6 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
goto err_alloc_wq;
}
- INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
- queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
- msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
err = register_switchdev_notifier(&br_offloads->nb);
@@ -500,6 +497,9 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
err);
goto err_register_netdev;
}
+ INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
return;
err_register_netdev:
@@ -523,10 +523,10 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
if (!br_offloads)
return;
+ cancel_delayed_work_sync(&br_offloads->update_work);
unregister_netdevice_notifier(&br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb);
- cancel_delayed_work(&br_offloads->update_work);
destroy_workqueue(br_offloads->wq);
rtnl_lock();
mlx5_esw_bridge_cleanup(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index de03684528bb..398c6761eeb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -3,6 +3,7 @@
#include <net/dst_metadata.h>
#include <linux/netdevice.h>
+#include <linux/if_macvlan.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
@@ -409,6 +410,13 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv)
static LIST_HEAD(mlx5e_block_cb_list);
+static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode == MACVLAN_MODE_PASSTHRU;
+}
+
static int
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
struct mlx5e_rep_priv *rpriv,
@@ -422,8 +430,14 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
struct flow_block_cb *block_cb;
if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
- !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
- return -EOPNOTSUPP;
+ !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) {
+ if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev))
+ return -EOPNOTSUPP;
+ if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
+ netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
+ return -EOPNOTSUPP;
+ }
+ }
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
@@ -647,9 +661,7 @@ static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *sk
"Failed to restore tunnel info for sampled packet\n");
return;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_skb(skb, mapped_obj);
-#endif /* CONFIG_MLX5_TC_SAMPLE */
mlx5_rep_tc_post_napi_receive(tc_priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 0eb125316fe2..74086eb556ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -6,6 +6,7 @@
#include "txrx.h"
#include "devlink.h"
#include "ptp.h"
+#include "lib/tout.h"
static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
{
@@ -32,8 +33,10 @@ out:
static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
{
- unsigned long exp_time = jiffies +
- msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
+ struct mlx5_core_dev *dev = icosq->channel->mdev;
+ unsigned long exp_time;
+
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
while (time_before(jiffies, exp_time)) {
if (icosq->cc == icosq->pc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index bb682fd751c9..4f4bc8726ec4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -4,11 +4,14 @@
#include "health.h"
#include "en/ptp.h"
#include "en/devlink.h"
+#include "lib/tout.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
- unsigned long exp_time = jiffies +
- msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
+ struct mlx5_core_dev *dev = sq->mdev;
+ unsigned long exp_time;
+
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
while (time_before(jiffies, exp_time)) {
if (sq->cc == sq->pc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 625cd49ef96c..b8b481b335cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -391,7 +391,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
return 0;
}
-static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
{
int err;
@@ -399,6 +399,7 @@ static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_r
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), err);
+ return err;
}
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
@@ -490,6 +491,14 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
{
bool changed_indir = false;
bool changed_hash = false;
+ struct mlx5e_rss *old_rss;
+ int err = 0;
+
+ old_rss = mlx5e_rss_alloc();
+ if (!old_rss)
+ return -ENOMEM;
+
+ *old_rss = *rss;
if (hfunc && *hfunc != rss->hash.hfunc) {
switch (*hfunc) {
@@ -497,7 +506,8 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
case ETH_RSS_HASH_TOP:
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
changed_hash = true;
changed_indir = true;
@@ -520,13 +530,20 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.table[i] = indir[i];
}
- if (changed_indir && rss->enabled)
- mlx5e_rss_apply(rss, rqns, num_rqns);
+ if (changed_indir && rss->enabled) {
+ err = mlx5e_rss_apply(rss, rqns, num_rqns);
+ if (err) {
+ *rss = *old_rss;
+ goto out;
+ }
+ }
if (changed_hash)
mlx5e_rss_update_tirs(rss);
- return 0;
+out:
+ mlx5e_rss_free(old_rss);
+ return err;
}
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index 6552ecee3f9b..d1d7e4b9f7ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -602,7 +602,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
}
sample_flow->pre_attr = pre_attr;
- return sample_flow->post_rule;
+ return sample_flow->pre_rule;
err_pre_offload_rule:
kfree(pre_attr);
@@ -613,7 +613,7 @@ err_sample_restore:
err_obj_id:
sampler_put(tc_psample, sample_flow->sampler);
err_sampler:
- if (!post_act_handle)
+ if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
err_post_rule:
if (post_act_handle)
@@ -628,9 +628,7 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5e_sample_flow *sample_flow;
- struct mlx5_vport_tbl_attr tbl_attr;
struct mlx5_eswitch *esw;
if (IS_ERR_OR_NULL(tc_psample))
@@ -650,23 +648,14 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
*/
sample_flow = attr->sample_attr->sample_flow;
mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
- if (!sample_flow->post_act_handle)
- mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule,
- sample_flow->post_attr);
sample_restore_put(tc_psample, sample_flow->restore);
mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler);
- if (sample_flow->post_act_handle) {
+ if (sample_flow->post_act_handle)
mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
- } else {
- tbl_attr.chain = attr->chain;
- tbl_attr.prio = attr->prio;
- tbl_attr.vport = esw_attr->in_rep->vport;
- tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
- mlx5_esw_vporttbl_put(esw, &tbl_attr);
- kfree(sample_flow->post_attr);
- }
+ else
+ del_post_rule(esw, sample_flow, attr);
kfree(sample_flow->pre_attr);
kfree(sample_flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
index db0146df9b30..9ef8a49d7801 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
@@ -19,6 +19,8 @@ struct mlx5e_sample_attr {
struct mlx5e_sample_flow *sample_flow;
};
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+
void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj);
struct mlx5_flow_handle *
@@ -38,4 +40,29 @@ mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act);
void
mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
+#else /* CONFIG_MLX5_TC_SAMPLE */
+
+static inline struct mlx5_flow_handle *
+mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ u32 tunnel_id)
+{ return ERR_PTR(-EOPNOTSUPP); }
+
+static inline void
+mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr) {}
+
+static inline struct mlx5e_tc_psample *
+mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act)
+{ return ERR_PTR(-EOPNOTSUPP); }
+
+static inline void
+mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) {}
+
+static inline void
+mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) {}
+
+#endif /* CONFIG_MLX5_TC_SAMPLE */
#endif /* __MLX5_EN_TC_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 6c949abcd2e1..740cd6f088b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -2127,12 +2127,21 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
ct_priv->post_act = post_act;
mutex_init(&ct_priv->control_lock);
- rhashtable_init(&ct_priv->zone_ht, &zone_params);
- rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
- rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
+ if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
+ goto err_ct_zone_ht;
+ if (rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params))
+ goto err_ct_tuples_ht;
+ if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
+ goto err_ct_tuples_nat_ht;
return ct_priv;
+err_ct_tuples_nat_ht:
+ rhashtable_destroy(&ct_priv->ct_tuples_ht);
+err_ct_tuples_ht:
+ rhashtable_destroy(&ct_priv->zone_ht);
+err_ct_zone_ht:
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
err_ct_nat_tbl:
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
err_ct_tbl:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index b4e986818794..d7e613d0139a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -10,6 +10,8 @@
#include "en_tc.h"
#include "rep/tc.h"
#include "rep/neigh.h"
+#include "lag/lag.h"
+#include "lag/mp.h"
struct mlx5e_tc_tun_route_attr {
struct net_device *out_dev;
@@ -118,6 +120,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
+ } else {
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+
+ if (tunnel && tunnel->get_remote_ifindex)
+ attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev);
}
rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
@@ -435,12 +442,15 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_tc_tun_route_attr *attr)
{
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
struct net_device *route_dev;
struct net_device *out_dev;
struct dst_entry *dst;
struct neighbour *n;
int ret;
+ if (tunnel && tunnel->get_remote_ifindex)
+ attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev);
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
NULL);
if (IS_ERR(dst))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 9350ca05ce65..aa092eaeaec3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -51,6 +51,7 @@ struct mlx5e_tc_tunnel {
void *headers_v);
bool (*encap_info_equal)(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b);
+ int (*get_remote_ifindex)(struct net_device *mirred_dev);
};
extern struct mlx5e_tc_tunnel vxlan_tunnel;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 4267f3a1059e..fd07c4cbfd1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -141,6 +141,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return 0;
}
+static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
+{
+ const struct vxlan_dev *vxlan = netdev_priv(mirred_dev);
+ const struct vxlan_rdst *dst = &vxlan->default_dst;
+
+ return dst->remote_ifindex;
+}
+
struct mlx5e_tc_tunnel vxlan_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN,
.match_level = MLX5_MATCH_L4,
@@ -151,4 +159,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
+ .get_remote_ifindex = mlx5e_tc_tun_get_remote_ifindex,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 33de8f0092a6..fb5397324aa4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* Pkt: MAC IP ESP IP L4
*
* Transport Mode:
- * SWP: OutL3 InL4
- * InL3
+ * SWP: OutL3 OutL4
* Pkt: MAC IP ESP L4
*
* Tunnel(VXLAN TCP/UDP) over Transport Mode
@@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
return;
if (!xo->inner_ipproto) {
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- if (xo->proto == IPPROTO_UDP)
+ switch (xo->proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ /* IP | ESP | TCP */
+ eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- return;
- }
-
- /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
- switch (xo->inner_ipproto) {
- case IPPROTO_UDP:
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- fallthrough;
- case IPPROTO_TCP:
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- break;
- default:
- break;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset =
+ (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
+ }
}
- return;
}
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 306fb5d6a36d..25926e581d18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -267,9 +267,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
break;
case ETH_SS_TEST:
- for (i = 0; i < mlx5e_self_test_num(priv); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx5e_self_tests[i]);
+ mlx5e_self_test_fill_strings(priv, data);
break;
case ETH_SS_STATS:
@@ -2036,6 +2034,17 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
}
new_params = priv->channels.params;
+ /* Don't allow enabling TX-port-TS if MQPRIO mode channel offload is
+ * active, since it defines explicitly which TC accepts the packet.
+ * This conflicts with TX-port-TS hijacking the PTP traffic to a specific
+ * HW TX-queue.
+ */
+ if (enable && new_params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
+ netdev_err(priv->netdev,
+ "%s: MQPRIO mode channel offload is active, cannot set the TX-port-TS\n",
+ __func__);
+ return -EINVAL;
+ }
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
* ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
@@ -2128,12 +2137,14 @@ int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return 0;
}
- return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
+ return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- return mlx5e_ethtool_set_rxnfc(dev, cmd);
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_rxnfc(priv, cmd);
}
static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index c06b4b938ae7..aeff1d972a46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -71,12 +71,12 @@ struct mlx5e_l2_hash_node {
bool mpfs;
};
-static inline int mlx5e_hash_l2(u8 *addr)
+static inline int mlx5e_hash_l2(const u8 *addr)
{
return addr[5];
}
-static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
{
struct mlx5e_l2_hash_node *hn;
int ix = mlx5e_hash_l2(addr);
@@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
struct mlx5e_flow_table *ft;
int err;
- priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
- if (!priv->fs.vlan)
- return -ENOMEM;
-
ft = &priv->fs.vlan->ft;
ft->num_groups = 0;
@@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
- if (IS_ERR(ft->t)) {
- err = PTR_ERR(ft->t);
- goto err_free_t;
- }
+ if (IS_ERR(ft->t))
+ return PTR_ERR(ft->t);
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
@@ -1221,9 +1215,6 @@ err_free_g:
kfree(ft->g);
err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t);
-err_free_t:
- kvfree(priv->fs.vlan);
- priv->fs.vlan = NULL;
return err;
}
@@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
- kvfree(priv->fs.vlan);
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
@@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
+
+int mlx5e_fs_init(struct mlx5e_priv *priv)
+{
+ priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
+ if (!priv->fs.vlan)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+{
+ kvfree(priv->fs.vlan);
+ priv->fs.vlan = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 03693fa74a70..81ebf281cdb4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -937,9 +937,8 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
return 0;
}
-int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (cmd->cmd) {
@@ -960,10 +959,9 @@ int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
-int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (info->cmd) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 3fd515e7bf30..0ff36c83714b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -930,9 +930,10 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+ size_t size;
- xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
+ xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
return -ENOMEM;
@@ -946,10 +947,11 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ size_t size;
int err;
- sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
+ sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
@@ -1298,7 +1300,8 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev,
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@ -1308,10 +1311,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
if (err)
return err;
- if (qos_queue_group_id)
- sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
- else
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+ sq->stats = sq_stats;
csp.tisn = tisn;
csp.tis_lst_sz = 1;
@@ -1705,6 +1705,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
mlx5e_close_cq(&c->sq[tc].cq);
}
+static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
+ return tc;
+
+ WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
+ return -ENOENT;
+}
+
+static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
+ u32 *hw_id)
+{
+ int tc;
+
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
+ !params->mqprio.channel.rl) {
+ *hw_id = 0;
+ return 0;
+ }
+
+ tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
+ if (tc < 0)
+ return tc;
+
+ return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+}
+
static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1713,9 +1743,16 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
+ u32 qos_queue_group_id;
+
+ err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
+ if (err)
+ goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
+ params, &cparam->txq_sq, &c->sq[tc], tc,
+ qos_queue_group_id,
+ &c->priv->channel_stats[c->ix].sq[tc]);
if (err)
goto err_close_sqs;
}
@@ -2264,7 +2301,7 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
}
static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
- struct tc_mqprio_qopt_offload *mqprio)
+ struct netdev_tc_txq *tc_to_txq)
{
int tc, err;
@@ -2282,11 +2319,8 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
for (tc = 0; tc < ntc; tc++) {
u16 count, offset;
- /* For DCB mode, map netdev TCs to offset 0
- * We have our own UP to TXQ mapping for QoS
- */
- count = mqprio ? mqprio->qopt.count[tc] : nch;
- offset = mqprio ? mqprio->qopt.offset[tc] : 0;
+ count = tc_to_txq[tc].count;
+ offset = tc_to_txq[tc].offset;
netdev_set_tc_queue(netdev, tc, count, offset);
}
@@ -2315,19 +2349,24 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
+ struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
int old_num_txqs, old_ntc;
int num_rxqs, nch, ntc;
int err;
+ int i;
old_num_txqs = netdev->real_num_tx_queues;
old_ntc = netdev->num_tc ? : 1;
+ for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)
+ old_tc_to_txq[i] = netdev->tc_to_txq[i];
nch = priv->channels.params.num_channels;
- ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+ ntc = priv->channels.params.mqprio.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
+ tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
- err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL);
+ err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
if (err)
goto err_out;
err = mlx5e_update_tx_netdev_queues(priv);
@@ -2338,6 +2377,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
+ if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
+ }
return 0;
@@ -2350,11 +2396,14 @@ err_txqs:
WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
err_tcs:
- mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL);
+ WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
+ old_tc_to_txq));
err_out:
return err;
}
+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
+
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
@@ -2861,6 +2910,61 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
return 0;
}
+static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
+ int ntc, int nch)
+{
+ int tc;
+
+ memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);
+
+ /* Map netdev TCs to offset 0.
+ * We have our own UP to TXQ mapping for DCB mode of QoS
+ */
+ for (tc = 0; tc < ntc; tc++) {
+ tc_to_txq[tc] = (struct netdev_tc_txq) {
+ .count = nch,
+ .offset = 0,
+ };
+ }
+}
+
+static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
+ struct tc_mqprio_qopt *qopt)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+ tc_to_txq[tc] = (struct netdev_tc_txq) {
+ .count = qopt->count[tc],
+ .offset = qopt->offset[tc],
+ };
+ }
+}
+
+static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
+{
+ params->mqprio.mode = TC_MQPRIO_MODE_DCB;
+ params->mqprio.num_tc = num_tc;
+ params->mqprio.channel.rl = NULL;
+ mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
+ params->num_channels);
+}
+
+static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
+ struct tc_mqprio_qopt *qopt,
+ struct mlx5e_mqprio_rl *rl)
+{
+ params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
+ params->mqprio.num_tc = qopt->num_tc;
+ params->mqprio.channel.rl = rl;
+ mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
+}
+
+static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
+{
+ mlx5e_params_mqprio_dcb_set(params, 1);
+}
+
static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
struct tc_mqprio_qopt *mqprio)
{
@@ -2874,8 +2978,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
return -EINVAL;
new_params = priv->channels.params;
- new_params.mqprio.mode = TC_MQPRIO_MODE_DCB;
- new_params.mqprio.num_tc = tc ? tc : 1;
+ mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
@@ -2889,9 +2992,17 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
struct net_device *netdev = priv->netdev;
+ struct mlx5e_ptp *ptp_channel;
int agg_count = 0;
int i;
+ ptp_channel = priv->channels.ptp;
+ if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {
+ netdev_err(netdev,
+ "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
+ return -EINVAL;
+ }
+
if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
return -EINVAL;
@@ -2905,9 +3016,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
netdev_err(netdev, "Min tx rate is not supported\n");
return -EINVAL;
}
+
if (mqprio->max_rate[i]) {
- netdev_err(netdev, "Max tx rate is not supported\n");
- return -EINVAL;
+ int err;
+
+ err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
+ if (err)
+ return err;
}
if (mqprio->qopt.offset[i] != agg_count) {
@@ -2917,8 +3032,8 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
agg_count += mqprio->qopt.count[i];
}
- if (priv->channels.params.num_channels < agg_count) {
- netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n",
+ if (priv->channels.params.num_channels != agg_count) {
+ netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
agg_count, priv->channels.params.num_channels);
return -EINVAL;
}
@@ -2926,36 +3041,53 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx)
+static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
{
- struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx;
- struct net_device *netdev = priv->netdev;
- u8 num_tc;
-
- if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
- return -EINVAL;
-
- num_tc = priv->channels.params.mqprio.num_tc;
- mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio);
+ int tc;
- return 0;
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
+ if (mqprio->max_rate[tc])
+ return true;
+ return false;
}
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
+ mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
+ struct mlx5e_mqprio_rl *rl;
+ bool nch_changed;
int err;
err = mlx5e_mqprio_channel_validate(priv, mqprio);
if (err)
return err;
+ rl = NULL;
+ if (mlx5e_mqprio_rate_limit(mqprio)) {
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return -ENOMEM;
+ err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
+ mqprio->max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return err;
+ }
+ }
+
new_params = priv->channels.params;
- new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
- new_params.mqprio.num_tc = mqprio->qopt.num_tc;
- err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true);
+ mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
+
+ nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
+ preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
+ mlx5e_update_netdev_queues_ctx;
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ if (err && rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
return err;
}
@@ -3065,7 +3197,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
{
int i;
- for (i = 0; i < priv->max_nch; i++) {
+ for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
@@ -3175,7 +3307,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ eth_hw_addr_set(netdev, saddr->sa_data);
netif_addr_unlock_bh(netdev);
mlx5e_nic_set_rx_mode(priv);
@@ -3274,20 +3406,67 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
return mlx5_set_port_fcs(mdev, !enable);
}
+static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
+ bool supported, curr_state;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, ports_check))
+ return 0;
+
+ err = mlx5_query_ports_check(mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
+ curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
+
+ if (!supported || enable == curr_state)
+ return 0;
+
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+ MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
+
+ return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_channels *chs = &priv->channels;
+ struct mlx5_core_dev *mdev = priv->mdev;
int err;
mutex_lock(&priv->state_lock);
- priv->channels.params.scatter_fcs_en = enable;
- err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
- if (err)
- priv->channels.params.scatter_fcs_en = !enable;
+ if (enable) {
+ err = mlx5e_set_rx_port_ts(mdev, false);
+ if (err)
+ goto out;
- mutex_unlock(&priv->state_lock);
+ chs->params.scatter_fcs_en = true;
+ err = mlx5e_modify_channels_scatter_fcs(chs, true);
+ if (err) {
+ chs->params.scatter_fcs_en = false;
+ mlx5e_set_rx_port_ts(mdev, true);
+ }
+ } else {
+ chs->params.scatter_fcs_en = false;
+ err = mlx5e_modify_channels_scatter_fcs(chs, false);
+ if (err) {
+ chs->params.scatter_fcs_en = true;
+ goto out;
+ }
+ err = mlx5e_set_rx_port_ts(mdev, true);
+ if (err) {
+ mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
+ err = 0;
+ }
+ }
+out:
+ mutex_unlock(&priv->state_lock);
return err;
}
@@ -4186,13 +4365,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
- priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
-
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
priv->max_nch);
- params->mqprio.num_tc = 1;
+ mlx5e_params_mqprio_reset(params);
/* Set an initial non-zero value, so that mlx5e_select_queue won't
* divide by zero if called before first activating channels.
@@ -4482,6 +4659,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
+ err = mlx5e_fs_init(priv);
+ if (err) {
+ mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
+ return err;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@@ -4499,6 +4682,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
+ mlx5e_fs_cleanup(priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -4682,8 +4866,35 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_ptp_support = true,
};
+static unsigned int
+mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
+ const struct mlx5e_profile *profile)
+
+{
+ unsigned int max_nch, tmp;
+
+ /* core resources */
+ max_nch = mlx5e_get_max_num_channels(mdev);
+
+ /* netdev rx queues */
+ tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
+ max_nch = min_t(unsigned int, max_nch, tmp);
+
+ /* netdev tx queues */
+ tmp = netdev->num_tx_queues;
+ if (mlx5_qos_is_supported(mdev))
+ tmp -= mlx5e_qos_max_leaf_nodes(mdev);
+ if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+ tmp -= profile->max_tc;
+ tmp = tmp / profile->max_tc;
+ max_nch = min_t(unsigned int, max_nch, tmp);
+
+ return max_nch;
+}
+
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
@@ -4691,6 +4902,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
priv->mdev = mdev;
priv->netdev = netdev;
priv->msglevel = MLX5E_MSG_LEVEL;
+ priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile);
+ priv->stats_nch = priv->max_nch;
priv->max_opened_tc = 1;
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
@@ -4730,11 +4943,17 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb.qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats);
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+
memset(priv, 0, sizeof(*priv));
}
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs)
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+ unsigned int txqs, unsigned int rxqs)
{
struct net_device *netdev;
int err;
@@ -4745,7 +4964,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int
return NULL;
}
- err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev);
+ err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
goto err_free_netdev;
@@ -4787,7 +5006,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
- max_nch = mlx5e_get_max_num_channels(priv->mdev);
+ max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
/* Reducing the number of channels - RXFH has to be reset, and
@@ -4795,7 +5014,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
*/
priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
priv->channels.params.num_channels = max_nch;
+ if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
+ mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
+ mlx5e_params_mqprio_reset(&priv->channels.params);
+ }
+ }
+ if (max_nch != priv->max_nch) {
+ mlx5_core_warn(priv->mdev,
+ "MLX5E: Updating max number of channels from %u to %u\n",
+ priv->max_nch, max_nch);
+ priv->max_nch = max_nch;
}
+
/* 1. Set the real number of queues in the kernel the first time.
* 2. Set our default XPS cpumask.
* 3. Build the RQT.
@@ -4860,7 +5090,7 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- err = mlx5e_priv_init(priv, netdev, mdev);
+ err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
return err;
@@ -4886,20 +5116,12 @@ priv_cleanup:
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
const struct mlx5e_profile *new_profile, void *new_ppriv)
{
- unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
const struct mlx5e_profile *orig_profile = priv->profile;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
void *orig_ppriv = priv->ppriv;
int err, rollback_err;
- /* sanity */
- if (new_max_nch != priv->max_nch) {
- netdev_warn(netdev, "%s: Replacing profile with different max channels\n",
- __func__);
- return -EINVAL;
- }
-
/* cleanup old profile */
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
@@ -4995,7 +5217,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
nch = mlx5e_get_max_num_channels(mdev);
txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(mdev, txqs, rxqs);
+ netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index ae71a17fdb27..0684ac6699b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -596,7 +596,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
params = &priv->channels.params;
params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
@@ -619,6 +618,11 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
+ /* Set an initial non-zero value, so that mlx5e_select_queue won't
+ * divide by zero if called before first activating channels.
+ */
+ priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
+
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -644,7 +648,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
- netdev->features |= NETIF_F_VLAN_CHALLENGED;
netdev->features |= NETIF_F_NETNS_LOCAL;
}
@@ -1169,7 +1172,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
nch = mlx5e_get_max_num_channels(dev);
txqs = nch * profile->max_tc;
rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(dev, txqs, rxqs);
+ netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
if (!netdev) {
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3c65fd0bcf31..29a6586ef28d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1001,14 +1001,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
- u8 ipproto = get_ip_proto(skb, network_depth, proto);
-
- if (unlikely(ipproto == IPPROTO_SCTP))
+ if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
- if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
- goto csum_none;
-
stats->csum_complete++;
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index ce8ab1f01876..8c9163d2c646 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -35,30 +35,7 @@
#include <net/udp.h>
#include "en.h"
#include "en/port.h"
-
-enum {
- MLX5E_ST_LINK_STATE,
- MLX5E_ST_LINK_SPEED,
- MLX5E_ST_HEALTH_INFO,
-#ifdef CONFIG_INET
- MLX5E_ST_LOOPBACK,
-#endif
- MLX5E_ST_NUM,
-};
-
-const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = {
- "Link Test",
- "Speed Test",
- "Health Test",
-#ifdef CONFIG_INET
- "Loopback Test",
-#endif
-};
-
-int mlx5e_self_test_num(struct mlx5e_priv *priv)
-{
- return ARRAY_SIZE(mlx5e_self_tests);
-}
+#include "eswitch.h"
static int mlx5e_test_health_info(struct mlx5e_priv *priv)
{
@@ -265,6 +242,14 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
mlx5e_refresh_tirs(priv, false, false);
}
+static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
+{
+ if (is_mdev_switchdev_mode(priv->mdev))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
static int mlx5e_test_loopback(struct mlx5e_priv *priv)
{
@@ -313,37 +298,47 @@ out:
}
#endif
-static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = {
- mlx5e_test_link_state,
- mlx5e_test_link_speed,
- mlx5e_test_health_info,
+typedef int (*mlx5e_st_func)(struct mlx5e_priv *);
+
+struct mlx5e_st {
+ char name[ETH_GSTRING_LEN];
+ mlx5e_st_func st_func;
+ mlx5e_st_func cond_func;
+};
+
+static struct mlx5e_st mlx5e_sts[] = {
+ { "Link Test", mlx5e_test_link_state },
+ { "Speed Test", mlx5e_test_link_speed },
+ { "Health Test", mlx5e_test_health_info },
#ifdef CONFIG_INET
- mlx5e_test_loopback,
+ { "Loopback Test", mlx5e_test_loopback, mlx5e_cond_loopback },
#endif
};
+#define MLX5E_ST_NUM ARRAY_SIZE(mlx5e_sts)
+
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf)
{
struct mlx5e_priv *priv = netdev_priv(ndev);
- int i;
-
- memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM);
+ int i, count = 0;
mutex_lock(&priv->state_lock);
netdev_info(ndev, "Self test begin..\n");
for (i = 0; i < MLX5E_ST_NUM; i++) {
- netdev_info(ndev, "\t[%d] %s start..\n",
- i, mlx5e_self_tests[i]);
- buf[i] = mlx5e_st_func[i](priv);
- netdev_info(ndev, "\t[%d] %s end: result(%lld)\n",
- i, mlx5e_self_tests[i], buf[i]);
+ struct mlx5e_st st = mlx5e_sts[i];
+
+ if (st.cond_func && st.cond_func(priv))
+ continue;
+ netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
+ buf[count] = st.st_func(priv);
+ netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
}
mutex_unlock(&priv->state_lock);
- for (i = 0; i < MLX5E_ST_NUM; i++) {
+ for (i = 0; i < count; i++) {
if (buf[i]) {
etest->flags |= ETH_TEST_FL_FAILED;
break;
@@ -352,3 +347,24 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
netdev_info(ndev, "Self test out: status flags(0x%x)\n",
etest->flags);
}
+
+int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data)
+{
+ int i, count = 0;
+
+ for (i = 0; i < MLX5E_ST_NUM; i++) {
+ struct mlx5e_st st = mlx5e_sts[i];
+
+ if (st.cond_func && st.cond_func(priv))
+ continue;
+ if (data)
+ strcpy(data + count * ETH_GSTRING_LEN, st.name);
+ count++;
+ }
+ return count;
+}
+
+int mlx5e_self_test_num(struct mlx5e_priv *priv)
+{
+ return mlx5e_self_test_fill_strings(priv, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e4f5b6395148..e1dd17019030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -34,6 +34,7 @@
#include "en.h"
#include "en_accel/tls.h"
#include "en_accel/en_accel.h"
+#include "en/ptp.h"
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
@@ -450,7 +451,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->max_nch; i++) {
+ for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
int j;
@@ -2076,7 +2077,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_rq_stats_desc[i].format);
+ ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
}
return idx;
}
@@ -2119,7 +2120,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
{
- int max_nch = priv->max_nch;
+ int max_nch = priv->stats_nch;
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
@@ -2133,7 +2134,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
{
bool is_xsk = priv->xsk.ever_used;
- int max_nch = priv->max_nch;
+ int max_nch = priv->stats_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)
@@ -2175,7 +2176,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
{
bool is_xsk = priv->xsk.ever_used;
- int max_nch = priv->max_nch;
+ int max_nch = priv->stats_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ba8164792016..3af3da214a5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,6 +39,7 @@
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
+#include <linux/if_macvlan.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/psample.h>
@@ -67,6 +68,8 @@
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
+#include "lag/lag.h"
+#include "lag/mp.h"
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
@@ -246,7 +249,6 @@ get_ct_priv(struct mlx5e_priv *priv)
return priv->fs.tc.ct;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
static struct mlx5e_tc_psample *
get_sample_priv(struct mlx5e_priv *priv)
{
@@ -263,7 +265,6 @@ get_sample_priv(struct mlx5e_priv *priv)
return NULL;
}
-#endif
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
@@ -1146,11 +1147,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr,
mod_hdr_acts);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
} else if (flow_flag_test(flow, SAMPLE)) {
rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
mlx5e_tc_get_flow_tun_id(flow));
-#endif
} else {
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
@@ -1186,12 +1185,10 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
return;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
if (flow_flag_test(flow, SAMPLE)) {
mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
return;
}
-#endif
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -1688,8 +1685,8 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
if (opt->opt_class != htons(U16_MAX) ||
opt->type != U8_MAX) {
- NL_SET_ERR_MSG(extack,
- "Partial match of tunnel options in chain > 0 isn't supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Partial match of tunnel options in chain > 0 isn't supported");
netdev_warn(priv->netdev,
"Partial match of tunnel options in chain > 0 isn't supported");
return -EOPNOTSUPP;
@@ -1896,8 +1893,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
bool needs_mapping, sets_mapping;
int err;
- if (!mlx5e_is_eswitch_flow(flow))
+ if (!mlx5e_is_eswitch_flow(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
return -EOPNOTSUPP;
+ }
needs_mapping = !!flow->attr->chain;
sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
@@ -1905,8 +1904,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
if ((needs_mapping || sets_mapping) &&
!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
- NL_SET_ERR_MSG(extack,
- "Chains on tunnel devices isn't supported without register loopback support");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Chains on tunnel devices isn't supported without register loopback support");
netdev_warn(priv->netdev,
"Chains on tunnel devices isn't supported without register loopback support");
return -EOPNOTSUPP;
@@ -2269,8 +2268,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
addr_type = match.key->addr_type;
/* the HW doesn't support frag first/later */
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
return -EOPNOTSUPP;
+ }
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
@@ -2437,8 +2438,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
switch (ip_proto) {
case IPPROTO_ICMP:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_ICMP))
+ MLX5_FLEX_PROTO_ICMP)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on Flex protocols for ICMP is not supported");
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
@@ -2450,8 +2454,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
break;
case IPPROTO_ICMPV6:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_ICMPV6))
+ MLX5_FLEX_PROTO_ICMPV6)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on Flex protocols for ICMPV6 is not supported");
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
@@ -2557,15 +2564,19 @@ static int pedit_header_offsets[] = {
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
- struct pedit_headers_action *hdrs)
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
{
u32 *curr_pmask, *curr_pval;
curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
- if (*curr_pmask & mask) /* disallow acting twice on the same location */
+ if (*curr_pmask & mask) { /* disallow acting twice on the same location */
+ NL_SET_ERR_MSG_MOD(extack,
+ "curr_pmask and new mask same. Acting twice on same location");
goto out_err;
+ }
*curr_pmask |= mask;
*curr_pval |= (val & mask);
@@ -2898,7 +2909,7 @@ parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
val = act->mangle.val;
offset = act->mangle.offset;
- err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
+ err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
if (err)
goto out_err;
@@ -2910,16 +2921,17 @@ out_err:
}
static int
-parse_pedit_to_reformat(struct mlx5e_priv *priv,
- const struct flow_action_entry *act,
+parse_pedit_to_reformat(const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
u32 mask, val, offset;
u32 *p;
- if (act->id != FLOW_ACTION_MANGLE)
+ if (act->id != FLOW_ACTION_MANGLE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
return -EOPNOTSUPP;
+ }
if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
@@ -2943,7 +2955,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(priv, act, parse_attr, extack);
+ return parse_pedit_to_reformat(act, parse_attr, extack);
return parse_pedit_to_modify_hdr(priv, act, namespace,
parse_attr, hdrs, extack);
@@ -3025,10 +3037,10 @@ struct ipv6_hoplimit_word {
__u8 hop_limit;
};
-static int is_action_keys_supported(const struct flow_action_entry *act,
- bool ct_flow, bool *modify_ip_header,
- bool *modify_tuple,
- struct netlink_ext_ack *extack)
+static bool
+is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
+ bool *modify_ip_header, bool *modify_tuple,
+ struct netlink_ext_ack *extack)
{
u32 mask, offset;
u8 htype;
@@ -3056,7 +3068,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv4 address with action ct");
- return -EOPNOTSUPP;
+ return false;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
@@ -3074,7 +3086,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv6 address with action ct");
- return -EOPNOTSUPP;
+ return false;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
@@ -3082,11 +3094,11 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of transport header ports with action ct");
- return -EOPNOTSUPP;
+ return false;
}
}
- return 0;
+ return true;
}
static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
@@ -3133,7 +3145,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
void *headers_v;
u16 ethertype;
u8 ip_proto;
- int i, err;
+ int i;
headers_c = get_match_headers_criteria(actions, spec);
headers_v = get_match_headers_value(actions, spec);
@@ -3151,11 +3163,10 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
act->id != FLOW_ACTION_ADD)
continue;
- err = is_action_keys_supported(act, ct_flow,
- &modify_ip_header,
- &modify_tuple, extack);
- if (err)
- return err;
+ if (!is_action_keys_supported(act, ct_flow,
+ &modify_ip_header,
+ &modify_tuple, extack))
+ return false;
}
if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
@@ -3176,37 +3187,65 @@ out_ok:
return true;
}
-static bool actions_match_supported(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+static bool
+actions_match_supported_fdb(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
- bool ct_flow = false, ct_clear = false;
- u32 actions;
+ struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
+ bool ct_flow, ct_clear;
- ct_clear = flow->attr->ct_attr.ct_action &
- TCA_CT_ACT_CLEAR;
+ ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
- actions = flow->attr->action;
- if (mlx5e_is_eswitch_flow(flow)) {
- if (flow->attr->esw_attr->split_count && ct_flow &&
- !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
- /* All registers used by ct are cleared when using
- * split rules.
- */
- NL_SET_ERR_MSG_MOD(extack,
- "Can't offload mirroring with action ct");
- return false;
- }
+ if (esw_attr->split_count && ct_flow &&
+ !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
+ /* All registers used by ct are cleared when using
+ * split rules.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
+ return false;
}
- if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- return modify_header_match_supported(priv, &parse_attr->spec,
- flow_action, actions,
- ct_flow, ct_clear,
- extack);
+ if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "current firmware doesn't support split rule for port mirroring");
+ netdev_warn_once(priv->netdev,
+ "current firmware doesn't support split rule for port mirroring\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+actions_match_supported(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ u32 actions = flow->attr->action;
+ bool ct_flow, ct_clear;
+
+ ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
+ ct_flow = flow_flag_test(flow, CT) && !ct_clear;
+
+ if (!(actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
+ actions, ct_flow, ct_clear, extack))
+ return false;
+
+ if (mlx5e_is_eswitch_flow(flow) &&
+ !actions_match_supported_fdb(priv, parse_attr, flow, extack))
+ return false;
return true;
}
@@ -3355,11 +3394,51 @@ static int validate_goto_chain(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_nic_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
+static int
+actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
+ !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
+ return 0;
+
+ ns_type = get_flow_name_space(flow);
+
+ err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
+ &attr->action, extack);
+ if (err)
+ return err;
+
+ /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
+ if (parse_attr->mod_hdr_acts.num_actions > 0)
+ return 0;
+
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+
+ if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
+ return 0;
+
+ if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+ (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
+ attr->esw_attr->split_count = 0;
+
+ return 0;
+}
+
+static int
+parse_tc_nic_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
@@ -3368,12 +3447,16 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
u32 action = 0;
int err, i;
- if (!flow_action_has_entries(flow_action))
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
+ }
if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
nic_attr = attr->nic_attr;
nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
@@ -3451,7 +3534,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
@@ -3462,38 +3546,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
flow_flag_set(flow, CT);
break;
default:
- NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "The offload action is not supported in NIC action");
return -EOPNOTSUPP;
}
}
- if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, &action, extack);
- if (err)
- return err;
- /* in case all pedit actions are skipped, remove the MOD_HDR
- * flag.
- */
- if (parse_attr->mod_hdr_acts.num_actions == 0) {
- action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- }
- }
-
attr->action = action;
- if (attr->dest_chain) {
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
- return -EOPNOTSUPP;
- }
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ if (err)
+ return err;
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
@@ -3517,19 +3585,25 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
u8 vlan_idx = attr->total_vlan;
- if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
return -EOPNOTSUPP;
+ }
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH))
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan pop action is not supported");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
} else {
@@ -3545,20 +3619,27 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH))
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported for vlan depth > 1");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
(act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio))
+ act->vlan.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
}
@@ -3592,7 +3673,8 @@ static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
static int add_vlan_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
struct net_device *vlan_dev = *out_dev;
struct flow_action_entry vlan_act = {
@@ -3603,7 +3685,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
if (err)
return err;
@@ -3614,14 +3696,15 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
return -ENODEV;
if (is_vlan_dev(*out_dev))
- err = add_vlan_push_action(priv, attr, out_dev, action);
+ err = add_vlan_push_action(priv, attr, out_dev, action, extack);
return err;
}
static int add_vlan_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
@@ -3631,7 +3714,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
if (err)
return err;
}
@@ -3753,18 +3836,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
int err, i, if_count = 0;
bool mpls_push = false;
- if (!flow_action_has_entries(flow_action))
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
+ }
if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+ break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
@@ -3902,18 +3994,21 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
&out_dev,
- &action);
+ &action, extack);
if (err)
return err;
}
if (is_vlan_dev(parse_attr->filter_dev)) {
err = add_vlan_pop_action(priv, attr,
- &action);
+ &action, extack);
if (err)
return err;
}
+ if (netif_is_macvlan(out_dev))
+ out_dev = macvlan_dev_real_dev(out_dev);
+
err = verify_uplink_forwarding(priv, flow, out_dev, extack);
if (err)
return err;
@@ -3955,10 +4050,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_TUNNEL_ENCAP:
info = act->tunnel;
- if (info)
+ if (info) {
encap = true;
- else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Zero tunnel attributes is not supported");
return -EOPNOTSUPP;
+ }
break;
case FLOW_ACTION_VLAN_PUSH:
@@ -3972,7 +4070,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
act, parse_attr, hdrs,
&action, extack);
} else {
- err = parse_tc_vlan_action(priv, act, esw_attr, &action);
+ err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack);
}
if (err)
return err;
@@ -3998,7 +4096,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
@@ -4025,7 +4124,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
flow_flag_set(flow, SAMPLE);
break;
default:
- NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "The offload action is not supported in FDB action");
return -EOPNOTSUPP;
}
}
@@ -4045,60 +4145,26 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
}
- if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, &action, extack);
- if (err)
- return err;
- /* in case all pedit actions are skipped, remove the MOD_HDR
- * flag. we might have set split_count either by pedit or
- * pop/push. if there is no pop/push either, reset it too.
- */
- if (parse_attr->mod_hdr_acts.num_actions == 0) {
- action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
- (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
- esw_attr->split_count = 0;
- }
- }
-
attr->action = action;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
- return -EOPNOTSUPP;
-
- if (attr->dest_chain) {
- if (decap) {
- /* It can be supported if we'll create a mapping for
- * the tunnel device only (without tunnel), and set
- * this tunnel id with this decap flow.
- *
- * On restore (miss), we'll just set this saved tunnel
- * device.
- */
- NL_SET_ERR_MSG(extack,
- "Decap with goto isn't supported");
- netdev_warn(priv->netdev,
- "Decap with goto isn't supported");
- return -EOPNOTSUPP;
- }
-
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- }
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ if (err)
+ return err;
- if (!(attr->action &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack,
- "Rule must have at least one forward/drop action");
+ if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
- }
- if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "current firmware doesn't support split rule for port mirroring");
- netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
+ if (attr->dest_chain && decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG(extack, "Decap with goto isn't supported");
+ netdev_warn(priv->netdev, "Decap with goto isn't supported");
return -EOPNOTSUPP;
}
@@ -4733,8 +4799,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -5006,9 +5074,7 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
MLX5_FLOW_NAMESPACE_FDB,
uplink_priv->post_act);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
-#endif
mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
@@ -5022,9 +5088,11 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
}
uplink_priv->tunnel_mapping = mapping;
- /* 0xFFF is reserved for stack devices slow path table mark */
+ /* Two last values are reserved for stack devices slow path table mark
+ * and bridge ingress push mark.
+ */
mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
- sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
+ sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
@@ -5052,9 +5120,7 @@ err_ht_init:
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
-#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv);
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
@@ -5074,9 +5140,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
-#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index c63d78eda606..188994d091c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
-/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
- * need to set L3 checksum flag for IPsec
- */
static void
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
- if (skb->encapsulation) {
- eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (xo->inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
- } else {
- sq->stats->csum_partial++;
}
}
@@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
+ if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
+ ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+ return;
+ }
+
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
- } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
- ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
} else
sq->stats->csum_none++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 605c8ecc3610..792e0d6aa861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
@@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_ASYNC_EQE,
};
@@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
@@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
ncomp_eqs = table->num_comp_eqs;
nent = MLX5_COMP_EQ_SIZE;
for (i = 0; i < ncomp_eqs; i++) {
- int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
struct mlx5_eq_param param = {};
+ int vecidx = i;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev)
goto err_out;
}
- vecidx = MLX5_IRQ_VEC_COMP_BASE;
- for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
- vecidx++) {
+ for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) {
err = irq_cpu_rmap_add(eq_table->rmap,
pci_irq_vector(mdev->pdev, vecidx));
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 0399a396d166..60a73990017c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -79,12 +79,16 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
int dest_num = 0;
int err = 0;
- if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+ if (vport->egress.legacy.drop_counter) {
+ drop_counter = vport->egress.legacy.drop_counter;
+ } else if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
drop_counter = mlx5_fc_create(esw->dev, false);
- if (IS_ERR(drop_counter))
+ if (IS_ERR(drop_counter)) {
esw_warn(esw->dev,
"vport[%d] configure egress drop rule counter err(%ld)\n",
vport->vport, PTR_ERR(drop_counter));
+ drop_counter = NULL;
+ }
vport->egress.legacy.drop_counter = drop_counter;
}
@@ -123,7 +127,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
- if (!IS_ERR_OR_NULL(drop_counter)) {
+ if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
@@ -162,7 +166,7 @@ void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw,
esw_acl_egress_table_destroy(vport);
clean_drop_counter:
- if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) {
+ if (vport->egress.legacy.drop_counter) {
mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
vport->egress.legacy.drop_counter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index f75b86abaf1c..b1a5199260f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -160,7 +160,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
esw_acl_ingress_lgcy_rules_destroy(vport);
- if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+ if (vport->ingress.legacy.drop_counter) {
+ counter = vport->ingress.legacy.drop_counter;
+ } else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(counter)) {
esw_warn(esw->dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 7e221038df8d..588622ba38c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -28,7 +28,10 @@
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
@@ -61,6 +64,9 @@ struct mlx5_esw_bridge {
struct mlx5_flow_table *egress_ft;
struct mlx5_flow_group *egress_vlan_fg;
struct mlx5_flow_group *egress_mac_fg;
+ struct mlx5_flow_group *egress_miss_fg;
+ struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
+ struct mlx5_flow_handle *egress_miss_handle;
unsigned long ageing_time;
u32 flags;
};
@@ -86,6 +92,26 @@ mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
SWITCHDEV_FDB_DEL_TO_BRIDGE);
}
+static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
+{
+ return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
+ offsetof(struct vlan_ethhdr, h_vlan_proto);
+}
+
+static struct mlx5_pkt_reformat *
+mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
+{
+ struct mlx5_pkt_reformat_params reformat_params = {};
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
+ reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
+ reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ reformat_params.size = sizeof(struct vlan_hdr);
+ return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
+}
+
static struct mlx5_flow_table *
mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
{
@@ -287,43 +313,74 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_
return fg;
}
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(egress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create bridge egress table miss flow group (err=%ld)\n",
+ PTR_ERR(fg));
+ kvfree(in);
+ return fg;
+}
+
static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
struct mlx5_flow_table *ingress_ft, *skip_ft;
+ struct mlx5_eswitch *esw = br_offloads->esw;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return -EOPNOTSUPP;
ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(ingress_ft))
return PTR_ERR(ingress_ft);
skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(skip_ft)) {
err = PTR_ERR(skip_ft);
goto err_skip_tbl;
}
- vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
+ vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
if (IS_ERR(vlan_fg)) {
err = PTR_ERR(vlan_fg);
goto err_vlan_fg;
}
- filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft);
+ filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft);
if (IS_ERR(filter_fg)) {
err = PTR_ERR(filter_fg);
goto err_filter_fg;
}
- mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
+ mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
goto err_mac_fg;
@@ -362,35 +419,82 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa
br_offloads->ingress_ft = NULL;
}
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
+ struct mlx5_flow_table *skip_ft,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_flow_group *mac_fg, *vlan_fg;
+ struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg;
+ struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
+ struct mlx5_flow_handle *miss_handle = NULL;
+ struct mlx5_eswitch *esw = br_offloads->esw;
struct mlx5_flow_table *egress_ft;
int err;
egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(egress_ft))
return PTR_ERR(egress_ft);
- vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
+ vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
if (IS_ERR(vlan_fg)) {
err = PTR_ERR(vlan_fg);
goto err_vlan_fg;
}
- mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
+ mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
goto err_mac_fg;
}
+ if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
+ miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
+ if (IS_ERR(miss_fg)) {
+ esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
+ PTR_ERR(miss_fg));
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+
+ miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
+ if (IS_ERR(miss_pkt_reformat)) {
+ esw_warn(esw->dev,
+ "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
+ PTR_ERR(miss_pkt_reformat));
+ miss_pkt_reformat = NULL;
+ mlx5_destroy_flow_group(miss_fg);
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+
+ miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
+ br_offloads->skip_ft,
+ miss_pkt_reformat);
+ if (IS_ERR(miss_handle)) {
+ esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
+ PTR_ERR(miss_handle));
+ miss_handle = NULL;
+ mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
+ miss_pkt_reformat = NULL;
+ mlx5_destroy_flow_group(miss_fg);
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+ }
+skip_miss_flow:
+
bridge->egress_ft = egress_ft;
bridge->egress_vlan_fg = vlan_fg;
bridge->egress_mac_fg = mac_fg;
+ bridge->egress_miss_fg = miss_fg;
+ bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
+ bridge->egress_miss_handle = miss_handle;
return 0;
err_mac_fg:
@@ -403,6 +507,13 @@ err_vlan_fg:
static void
mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
{
+ if (bridge->egress_miss_handle)
+ mlx5_del_flow_rules(bridge->egress_miss_handle);
+ if (bridge->egress_miss_pkt_reformat)
+ mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
+ bridge->egress_miss_pkt_reformat);
+ if (bridge->egress_miss_fg)
+ mlx5_destroy_flow_group(bridge->egress_miss_fg);
mlx5_destroy_flow_group(bridge->egress_mac_fg);
mlx5_destroy_flow_group(bridge->egress_vlan_fg);
mlx5_destroy_flow_table(bridge->egress_ft);
@@ -443,8 +554,10 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
if (vlan && vlan->pkt_reformat_push) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.pkt_reformat = vlan->pkt_reformat_push;
+ flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
} else if (vlan) {
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.cvlan_tag);
@@ -564,6 +677,10 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
if (!rule_spec)
return ERR_PTR(-ENOMEM);
+ if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
+ vport_num == MLX5_VPORT_UPLINK)
+ rule_spec->flow_context.flow_source =
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
@@ -599,6 +716,41 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
return handle;
}
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
+ struct mlx5_flow_table *skip_ft,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ .ft = skip_ft,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
+ .flags = FLOW_ACT_NO_APPEND,
+ .pkt_reformat = pkt_reformat,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
+ ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
+
+ handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
struct mlx5_esw_bridge_offloads *br_offloads)
{
@@ -798,24 +950,14 @@ mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5
static int
mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
{
- struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_pkt_reformat *pkt_reformat;
- if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) ||
- MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) ||
- MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) <
- offsetof(struct vlan_ethhdr, h_vlan_proto)) {
+ if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
return -EOPNOTSUPP;
}
- reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
- reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
- reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
- reformat_params.size = sizeof(struct vlan_hdr);
- pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
- &reformat_params,
- MLX5_FLOW_NAMESPACE_FDB);
+ pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(pkt_reformat)) {
esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
PTR_ERR(pkt_reformat));
@@ -833,6 +975,33 @@ mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_
vlan->pkt_reformat_pop = NULL;
}
+static int
+mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_modify_hdr *pkt_mod_hdr;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(set_action_in, action, offset, 8);
+ MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
+ MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
+
+ pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
+ if (IS_ERR(pkt_mod_hdr))
+ return PTR_ERR(pkt_mod_hdr);
+
+ vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
+ return 0;
+}
+
+static void
+mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
+ vlan->pkt_mod_hdr_push_mark = NULL;
+}
+
static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
struct mlx5_eswitch *esw)
@@ -852,6 +1021,10 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
if (err)
goto err_vlan_push;
+
+ err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
+ if (err)
+ goto err_vlan_push_mark;
}
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
@@ -870,6 +1043,9 @@ err_xa_insert:
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
err_vlan_pop:
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
+err_vlan_push_mark:
if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
err_vlan_push:
@@ -886,6 +1062,7 @@ static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
struct mlx5_esw_bridge *bridge)
{
+ struct mlx5_eswitch *esw = bridge->br_offloads->esw;
struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) {
@@ -894,9 +1071,11 @@ static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
}
if (vlan->pkt_reformat_pop)
- mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw);
+ mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
if (vlan->pkt_reformat_push)
- mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw);
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
}
static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
index 52964a82d6a6..878311fe950a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan {
struct list_head fdb_list;
struct mlx5_pkt_reformat *pkt_reformat_push;
struct mlx5_pkt_reformat *pkt_reformat_pop;
+ struct mlx5_modify_hdr *pkt_mod_hdr_push_mark;
};
struct mlx5_esw_bridge_port {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 20af557ae30c..7f9b96d9537e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -36,7 +36,7 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
return NULL;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = PCI_FUNC(dev->pdev->devfn);
+ pfnum = mlx5_get_dev_index(dev);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -149,7 +149,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
if (IS_ERR(vport))
return PTR_ERR(vport);
- pfnum = PCI_FUNC(dev->pdev->devfn);
+ pfnum = mlx5_get_dev_index(dev);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 985e305179d1..c6cc67cb4f6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
err_min_rate:
list_del(&group->list);
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- group->tsar_ix);
- if (err)
+ if (mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ group->tsar_ix))
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
err_sched_elem:
kfree(group);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2c7444101bb9..28467f11f04b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -433,7 +433,7 @@ enum mlx5_flow_match_level {
};
/* current maximum for flow based vport multicasting */
-#define MLX5_MAX_FLOW_FWD_VPORTS 2
+#define MLX5_MAX_FLOW_FWD_VPORTS 32
enum {
MLX5_ESW_DEST_ENCAP = BIT(0),
@@ -447,8 +447,16 @@ enum {
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
+ MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5),
};
+/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
+static inline bool
+mlx5_esw_attr_flags_skip(u32 attr_flags)
+{
+ return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT);
+}
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0d461e38add3..0ef126fd6a8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -440,7 +440,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
- } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ } else if (mlx5_esw_attr_flags_skip(attr->flags)) {
esw_setup_slow_path_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->dest_chain) {
@@ -467,7 +467,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
if (attr->dest_ft) {
esw_cleanup_decap_indir(esw, attr);
- } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ } else if (!mlx5_esw_attr_flags_skip(attr->flags)) {
if (attr->dest_chain)
esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
else if (esw_is_indir_table(esw, attr))
@@ -482,12 +482,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
bool split = !!(esw_attr->split_count);
struct mlx5_vport_tbl_attr fwd_attr;
+ struct mlx5_flow_destination *dest;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int i = 0;
@@ -495,6 +495,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return ERR_PTR(-ENOMEM);
+
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
@@ -574,6 +578,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
else
atomic64_inc(&esw->offloads.num_flows);
+ kfree(dest);
return rule;
err_add_rule:
@@ -584,6 +589,7 @@ err_add_rule:
err_esw_get:
esw_cleanup_dests(esw, attr);
err_create_goto_table:
+ kfree(dest);
return rule;
}
@@ -592,16 +598,20 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
struct mlx5_vport_tbl_attr fwd_attr;
+ struct mlx5_flow_destination *dest;
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
int i, err = 0;
+ dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return ERR_PTR(-ENOMEM);
+
fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
@@ -654,6 +664,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
atomic64_inc(&esw->offloads.num_flows);
+ kfree(dest);
return rule;
err_chain_src_rewrite:
esw_put_dest_tables_loop(esw, attr, 0, i);
@@ -661,6 +672,7 @@ err_chain_src_rewrite:
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
+ kfree(dest);
return rule;
}
@@ -678,7 +690,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ if (!mlx5_esw_attr_flags_skip(attr->flags)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].termtbl)
@@ -1009,7 +1021,7 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
u16 vport_num;
num_vfs = esw->esw_funcs.num_vfs;
- flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
if (!flows)
return -ENOMEM;
@@ -1188,7 +1200,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
- flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
@@ -2798,7 +2810,7 @@ u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
int id;
/* Only 4 bits of pf_num */
- pf_num = PCI_FUNC(esw->dev->pdev->devfn);
+ pf_num = mlx5_get_dev_index(esw->dev);
if (pf_num > max_pf_num)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index b45954905845..879d78e46e47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -219,7 +219,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
- attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
+ mlx5_esw_attr_flags_skip(attr->flags) ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7db8df64a60e..750b21124a1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -185,6 +185,20 @@ static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
}
+static int
+mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return 0;
+}
+
+static int
+mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return 0;
+}
+
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
@@ -563,8 +577,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
!!(dst->dest_attr.vport.flags &
@@ -572,6 +586,12 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
+ if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
+ /* destination_id is reserved */
+ id = 0;
+ break;
+ }
+ id = dst->dest_attr.vport.num;
if (extended_dest &&
dst->dest_attr.vport.pkt_reformat) {
MLX5_SET(dest_format_struct, in_dests,
@@ -909,6 +929,45 @@ static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
+static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
+
+ return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+ void *ptr;
+ int err;
+
+ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
+ MLX5_SET(match_definer, ptr, format_id, format_id);
+
+ ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
+ memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
+
+ err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
+ return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+}
+
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
@@ -923,6 +982,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@@ -942,6 +1003,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_stub_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@@ -969,6 +1032,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_NIC_TX:
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
+ case FS_FT_PORT_SEL:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 5ecd33cdc087..220ec632d35a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -97,6 +97,10 @@ struct mlx5_flow_cmds {
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
+ int (*create_match_definer)(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask);
+ int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns,
+ int definer_id);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index fe501ba88bea..873efde0d458 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2191,6 +2191,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns;
return NULL;
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ if (steering->port_sel_root_ns)
+ return &steering->port_sel_root_ns->ns;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@ -2596,6 +2600,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
+ cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
@@ -2634,6 +2639,21 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
return PTR_ERR_OR_ZERO(prio);
}
+#define PORT_SEL_NUM_LEVELS 3
+static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
+ if (!steering->port_sel_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
+ PORT_SEL_NUM_LEVELS);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@@ -3020,6 +3040,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
+ err = init_port_sel_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
err = init_rdma_rx_root_ns(steering);
@@ -3224,6 +3250,52 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
+{
+ return definer->id;
+}
+
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_definer *definer;
+ int id;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return ERR_PTR(-ENOMEM);
+
+ definer->ns_type = ns_type;
+ id = root->cmds->create_match_definer(root, format_id, match_mask);
+ if (id < 0) {
+ mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
+ kfree(definer);
+ return ERR_PTR(id);
+ }
+ definer->id = id;
+ return definer;
+}
+
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, definer->ns_type);
+ if (WARN_ON(!root))
+ return;
+
+ root->cmds->destroy_match_definer(root, definer->id);
+ kfree(definer);
+}
+
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 98240badc342..7711db245c63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -49,6 +49,11 @@
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
+struct mlx5_flow_definer {
+ enum mlx5_flow_namespace_type ns_type;
+ u32 id;
+};
+
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
union {
@@ -97,7 +102,8 @@ enum fs_flow_table_type {
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
- FS_FT_MAX_TYPE = FS_FT_RDMA_TX,
+ FS_FT_PORT_SEL = 0X9,
+ FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
};
enum fs_flow_table_op_mod {
@@ -129,6 +135,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
+ struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
};
@@ -341,7 +348,8 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 18e5aec14641..f542a36be62c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -497,8 +497,7 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
- GFP_KERNEL);
+ bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
if (!bulk)
goto err_alloc_bulk;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 016d26f809a5..1037e3629e7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
+#include "lib/tout.h"
#include "accel/tls.h"
enum {
@@ -148,6 +149,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (err)
return err;
+ if (MLX5_CAP_GEN(dev, port_selection_cap)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
+ if (err)
+ return err;
+ }
+
if (MLX5_CAP_GEN(dev, hca_cap_2)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
if (err)
@@ -317,10 +324,9 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0;
}
-#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
{
- unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
+ unsigned long end, delay_ms = mlx5_tout_ms(dev, TEARDOWN);
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
int state;
@@ -618,17 +624,18 @@ static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
fwhandle, 0);
}
-#define MLX5_FSM_REACTIVATE_TOUT 5000 /* msecs */
static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status)
{
- unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5_FSM_REACTIVATE_TOUT);
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
u32 out[MLX5_ST_SZ_DW(mirc_reg)];
u32 in[MLX5_ST_SZ_DW(mirc_reg)];
+ unsigned long exp_time;
int err;
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FSM_REACTIVATE));
+
if (!MLX5_CAP_MCAM_REG2(dev, mirc))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 106b50e42b46..0b0234f9d694 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -3,6 +3,7 @@
#include "fw_reset.h"
#include "diag/fw_tracer.h"
+#include "lib/tout.h"
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
@@ -228,8 +229,6 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
-#define MLX5_PCI_LINK_UP_TIMEOUT 2000
-
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -286,7 +285,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
goto restore;
}
- timeout = jiffies + msecs_to_jiffies(MLX5_PCI_LINK_UP_TIMEOUT);
+ timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, PCI_TOGGLE));
do {
err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, &reg16);
if (err)
@@ -299,8 +298,8 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
if (reg16 & PCI_EXP_LNKSTA_DLLLA) {
mlx5_core_info(dev, "PCI Link up\n");
} else {
- mlx5_core_err(dev, "PCI link not ready (0x%04x) after %d ms\n",
- reg16, MLX5_PCI_LINK_UP_TIMEOUT);
+ mlx5_core_err(dev, "PCI link not ready (0x%04x) after %llu ms\n",
+ reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
err = -ETIMEDOUT;
}
@@ -395,16 +394,16 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
return NOTIFY_OK;
}
-#define MLX5_FW_RESET_TIMEOUT_MSEC 5000
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
{
- unsigned long timeout = msecs_to_jiffies(MLX5_FW_RESET_TIMEOUT_MSEC);
+ unsigned long pci_sync_update_timeout = mlx5_tout_ms(dev, PCI_SYNC_UPDATE);
+ unsigned long timeout = msecs_to_jiffies(pci_sync_update_timeout);
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
int err;
if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
- mlx5_core_warn(dev, "FW sync reset timeout after %d seconds\n",
- MLX5_FW_RESET_TIMEOUT_MSEC / 1000);
+ mlx5_core_warn(dev, "FW sync reset timeout after %lu seconds\n",
+ pci_sync_update_timeout / 1000);
err = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 037e18dd4be0..6a4dd7f78958 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -40,10 +40,10 @@
#include "lib/eq.h"
#include "lib/mlx5.h"
#include "lib/pci_vsc.h"
+#include "lib/tout.h"
#include "diag/fw_tracer.h"
enum {
- MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
MAX_MISSES = 3,
};
@@ -219,11 +219,9 @@ unlock:
mutex_unlock(&dev->intf_state_mutex);
}
-#define MLX5_CRDUMP_WAIT_MS 60000
-#define MLX5_FW_RESET_WAIT_MS 1000
void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
{
- unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS;
+ unsigned long end, delay_ms = mlx5_tout_ms(dev, PCI_TOGGLE);
int lock = -EBUSY;
mutex_lock(&dev->intf_state_mutex);
@@ -237,7 +235,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
lock = lock_sem_sw_reset(dev, true);
if (lock == -EBUSY) {
- delay_ms = MLX5_CRDUMP_WAIT_MS;
+ delay_ms = mlx5_tout_ms(dev, FULL_CRDUMP);
goto recover_from_sw_reset;
}
/* Execute SW reset */
@@ -307,13 +305,11 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
-/* How much time to wait until health resetting the driver (in msecs) */
-#define MLX5_RECOVERY_WAIT_MSECS 60000
int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
{
unsigned long end;
- end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS);
+ end = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FW_RESET));
while (sensor_pci_not_working(dev)) {
if (time_after(jiffies, end))
return -ETIMEDOUT;
@@ -674,13 +670,13 @@ static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
devlink_health_reporter_destroy(health->fw_fatal_reporter);
}
-static unsigned long get_next_poll_jiffies(void)
+static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev)
{
unsigned long next;
get_random_bytes(&next, sizeof(next));
next %= HZ;
- next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+ next += jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL));
return next;
}
@@ -740,11 +736,12 @@ static void poll_health(struct timer_list *t)
queue_work(health->wq, &health->report_work);
out:
- mod_timer(&health->timer, get_next_poll_jiffies());
+ mod_timer(&health->timer, get_next_poll_jiffies(dev));
}
void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
+ u64 poll_interval_ms = mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL);
struct mlx5_core_health *health = &dev->priv.health;
timer_setup(&health->timer, poll_health, 0);
@@ -753,7 +750,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
- health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
+ health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 0c8594c7df21..ee0eb4a4b819 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -33,6 +33,11 @@
#include "en.h"
#include "ipoib.h"
+static u32 mlx5i_flow_type_mask(u32 flow_type)
+{
+ return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+}
+
static void mlx5i_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
@@ -217,6 +222,27 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_MLX5_EN_RXNFC
+static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+
+ if (mlx5i_flow_type_mask(fs->flow_type) == ETHER_FLOW)
+ return -EINVAL;
+
+ return mlx5e_ethtool_set_rxnfc(priv, cmd);
+}
+
+static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
+}
+#endif
+
const struct ethtool_ops mlx5i_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -233,6 +259,10 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
+#ifdef CONFIG_MLX5_EN_RXNFC
+ .get_rxnfc = mlx5i_get_rxnfc,
+ .set_rxnfc = mlx5i_set_rxnfc,
+#endif
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 67571e5040d6..3b8d8ada1a01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -113,7 +113,7 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
struct mlx5e_sw_stats s = { 0 };
int i, j;
- for (i = 0; i < priv->max_nch; i++) {
+ for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
@@ -219,7 +219,7 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
{
- unsigned char *dev_addr = priv->netdev->dev_addr;
+ const unsigned char *dev_addr = priv->netdev->dev_addr;
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
struct mlx5i_priv *ipriv = priv->ppriv;
@@ -336,6 +336,8 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_arfs_tables;
}
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
err_destroy_arfs_tables:
@@ -348,6 +350,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
+ mlx5e_ethtool_cleanup_steering(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
@@ -711,7 +714,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
goto destroy_ht;
}
- err = mlx5e_priv_init(epriv, netdev, mdev);
+ err = mlx5e_priv_init(epriv, prof, netdev, mdev);
if (err)
goto destroy_mdev_resources;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index ca5690b0a7ab..48d2ea690d7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -38,7 +38,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "lag.h"
-#include "lag_mp.h"
+#include "mp.h"
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -47,16 +47,21 @@
static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
- u8 remap_port2, bool shared_fdb)
+ u8 remap_port2, bool shared_fdb, u8 flags)
{
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
+ if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+ } else {
+ MLX5_SET(lagc, lag_ctx, port_select_mode,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
+ }
return mlx5_cmd_exec_in(dev, create_lag, in);
}
@@ -199,6 +204,15 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port1 = 2;
}
+static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+
+ if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
+ return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2);
+ return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
@@ -211,39 +225,56 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
+ err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to modify LAG (%d)\n",
+ err);
+ return;
+ }
ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
-
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
-
- err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
- if (err)
- mlx5_core_err(dev0,
- "Failed to modify LAG (%d)\n",
- err);
}
}
+static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker, u8 *flags)
+{
+ bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
+ struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+
+ if (roce_lag ||
+ !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
+ tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return;
+ *flags |= MLX5_LAG_FLAG_HASH_BASED;
+}
+
+static char *get_str_port_sel_mode(u8 flags)
+{
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ return "hash";
+ return "queue_affinity";
+}
+
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- bool shared_fdb)
+ bool shared_fdb, u8 flags)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
- mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
- &ldev->v2p_map[MLX5_LAG_P2]);
-
- mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d",
+ mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s",
ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
- shared_fdb);
+ shared_fdb, get_str_port_sel_mode(flags));
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
- ldev->v2p_map[MLX5_LAG_P2], shared_fdb);
+ ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -279,16 +310,32 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
- err = mlx5_create_lag(ldev, tracker, shared_fdb);
+ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
+ &ldev->v2p_map[MLX5_LAG_P2]);
+ mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
+ if (flags & MLX5_LAG_FLAG_HASH_BASED) {
+ err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
+ ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to create LAG port selection(%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
if (err) {
- if (roce_lag) {
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ mlx5_lag_port_sel_destroy(ldev);
+ if (roce_lag)
mlx5_core_err(dev0,
"Failed to activate RoCE LAG\n");
- } else {
+ else
mlx5_core_err(dev0,
"Failed to activate VF LAG\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
- }
return err;
}
@@ -302,6 +349,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
+ u8 flags = ldev->flags;
int err;
ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
@@ -324,6 +372,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
+ } else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
+ mlx5_lag_port_sel_destroy(ldev);
}
return err;
@@ -442,6 +492,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
+ /* VF LAG is in multipath mode, ignore bond change requests */
+ if (mlx5_lag_is_multipath(dev0))
+ return;
+
tracker = ldev->tracker;
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
@@ -588,8 +642,10 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
if (!(bond_status & 0x3))
return 0;
- if (lag_upper_info)
+ if (lag_upper_info) {
tracker->tx_type = lag_upper_info->tx_type;
+ tracker->hash_type = lag_upper_info->hash_type;
+ }
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
@@ -688,7 +744,7 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev,
struct net_device *netdev)
{
- unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+ unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS)
return;
@@ -718,7 +774,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+ unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index d4bae528954e..e5d231c31b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -5,7 +5,8 @@
#define __MLX5_LAG_H__
#include "mlx5_core.h"
-#include "lag_mp.h"
+#include "mp.h"
+#include "port_sel.h"
enum {
MLX5_LAG_P1,
@@ -17,10 +18,12 @@ enum {
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
MLX5_LAG_FLAG_READY = 1 << 3,
+ MLX5_LAG_FLAG_HASH_BASED = 1 << 4,
};
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
- MLX5_LAG_FLAG_MULTIPATH)
+ MLX5_LAG_FLAG_MULTIPATH | \
+ MLX5_LAG_FLAG_HASH_BASED)
struct lag_func {
struct mlx5_core_dev *dev;
@@ -32,6 +35,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1;
+ enum netdev_lag_hash hash_type;
};
/* LAG data of a ConnectX card.
@@ -49,6 +53,7 @@ struct mlx5_lag {
struct delayed_work bond_work;
struct notifier_block nb;
struct lag_mp lag_mp;
+ struct mlx5_lag_port_sel port_sel;
};
static inline struct mlx5_lag *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index f239b352a58a..bf4d3cbefa63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -3,26 +3,29 @@
#include <linux/netdevice.h>
#include <net/nexthop.h>
-#include "lag.h"
-#include "lag_mp.h"
+#include "lag/lag.h"
+#include "lag/mp.h"
#include "mlx5_core.h"
#include "eswitch.h"
#include "lib/mlx5.h"
+static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
+}
+
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
if (!mlx5_lag_is_ready(ldev))
return false;
+ if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
+ return false;
+
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
ldev->pf[MLX5_LAG_P2].dev);
}
-static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
-{
- return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
-}
-
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
index 729c839397a8..dea199e79bed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
@@ -24,12 +24,14 @@ struct lag_mp {
void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
int mlx5_lag_mp_init(struct mlx5_lag *ldev);
void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_LAG_MP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
new file mode 100644
index 000000000000..adc836b3d857
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/netdevice.h>
+#include "lag.h"
+
+enum {
+ MLX5_LAG_FT_LEVEL_TTC,
+ MLX5_LAG_FT_LEVEL_INNER_TTC,
+ MLX5_LAG_FT_LEVEL_DEFINER,
+};
+
+static struct mlx5_flow_group *
+mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_definer *definer)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_definer_id,
+ mlx5_get_match_definer_id(definer));
+ MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1);
+ MLX5_SET(create_flow_group_in, in, group_type,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
+
+ fg = mlx5_create_flow_group(ft, in);
+ kvfree(in);
+ return fg;
+}
+
+static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer *lag_definer,
+ u8 port1, u8 port2)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_namespace *ns;
+ int err, i;
+
+ ft_attr.max_fte = MLX5_MAX_PORTS;
+ ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
+ if (!ns) {
+ mlx5_core_warn(dev, "Failed to get port selection namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(lag_definer->ft)) {
+ mlx5_core_warn(dev, "Failed to create port selection table\n");
+ return PTR_ERR(lag_definer->ft);
+ }
+
+ lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
+ lag_definer->definer);
+ if (IS_ERR(lag_definer->fg)) {
+ err = PTR_ERR(lag_definer->fg);
+ goto destroy_ft;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ u8 affinity = i == 0 ? port1 : port2;
+
+ dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
+ vhca_id);
+ lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft,
+ NULL, &flow_act,
+ &dest, 1);
+ if (IS_ERR(lag_definer->rules[i])) {
+ err = PTR_ERR(lag_definer->rules[i]);
+ while (i--)
+ mlx5_del_flow_rules(lag_definer->rules[i]);
+ goto destroy_fg;
+ }
+ }
+
+ return 0;
+
+destroy_fg:
+ mlx5_destroy_flow_group(lag_definer->fg);
+destroy_ft:
+ mlx5_destroy_flow_table(lag_definer->ft);
+ return err;
+}
+
+static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
+ enum mlx5_traffic_types tt)
+{
+ int format_id;
+ u8 *ipv6;
+
+ switch (tt) {
+ case MLX5_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_TCP:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l4_dport);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV4:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV6_TCP:
+ case MLX5_TT_IPV6_UDP:
+ format_id = 31;
+ MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
+ inner_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
+ inner_l4_dport);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
+ inner_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
+ inner_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ break;
+ case MLX5_TT_IPV6:
+ format_id = 32;
+ ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
+ inner_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
+ inner_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_smac_15_0);
+ break;
+ default:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_15_0);
+ break;
+ }
+
+ return format_id;
+}
+
+static int mlx5_lag_set_definer(u32 *match_definer_mask,
+ enum mlx5_traffic_types tt, bool tunnel,
+ enum netdev_lag_hash hash)
+{
+ int format_id;
+ u8 *ipv6;
+
+ if (tunnel)
+ return mlx5_lag_set_definer_inner(match_definer_mask, tt);
+
+ switch (tt) {
+ case MLX5_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_TCP:
+ format_id = 22;
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l4_dport);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV4:
+ format_id = 22;
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_smac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV6_TCP:
+ case MLX5_TT_IPV6_UDP:
+ format_id = 29;
+ MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
+ outer_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
+ outer_l4_dport);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
+ outer_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
+ outer_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ break;
+ case MLX5_TT_IPV6:
+ format_id = 30;
+ ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
+ outer_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
+ outer_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_smac_15_0);
+ break;
+ default:
+ format_id = 0;
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_smac_15_0);
+
+ if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) {
+ MLX5_SET_TO_ONES(match_definer_format_0,
+ match_definer_mask,
+ outer_first_vlan_vid);
+ break;
+ }
+
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_ethertype);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_dmac_15_0);
+ break;
+ }
+
+ return format_id;
+}
+
+static struct mlx5_lag_definer *
+mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
+ enum mlx5_traffic_types tt, bool tunnel, u8 port1,
+ u8 port2)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_definer *lag_definer;
+ u32 *match_definer_mask;
+ int format_id, err;
+
+ lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
+ if (!lag_definer)
+ return ERR_PTR(ENOMEM);
+
+ match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
+ match_mask),
+ GFP_KERNEL);
+ if (!match_definer_mask) {
+ err = -ENOMEM;
+ goto free_lag_definer;
+ }
+
+ format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
+ lag_definer->definer =
+ mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
+ format_id, match_definer_mask);
+ if (IS_ERR(lag_definer->definer)) {
+ err = PTR_ERR(lag_definer->definer);
+ goto free_mask;
+ }
+
+ err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2);
+ if (err)
+ goto destroy_match_definer;
+
+ kvfree(match_definer_mask);
+
+ return lag_definer;
+
+destroy_match_definer:
+ mlx5_destroy_match_definer(dev, lag_definer->definer);
+free_mask:
+ kvfree(match_definer_mask);
+free_lag_definer:
+ kfree(lag_definer);
+ return ERR_PTR(err);
+}
+
+static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer *lag_definer)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ mlx5_del_flow_rules(lag_definer->rules[i]);
+ mlx5_destroy_flow_group(lag_definer->fg);
+ mlx5_destroy_flow_table(lag_definer->ft);
+ mlx5_destroy_match_definer(dev, lag_definer->definer);
+ kfree(lag_definer);
+}
+
+static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int tt;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ if (port_sel->outer.definers[tt])
+ mlx5_lag_destroy_definer(ldev,
+ port_sel->outer.definers[tt]);
+ if (port_sel->inner.definers[tt])
+ mlx5_lag_destroy_definer(ldev,
+ port_sel->inner.definers[tt]);
+ }
+}
+
+static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type,
+ u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_lag_definer *lag_definer;
+ int tt, err;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
+ false, port1, port2);
+ if (IS_ERR(lag_definer)) {
+ err = PTR_ERR(lag_definer);
+ goto destroy_definers;
+ }
+ port_sel->outer.definers[tt] = lag_definer;
+
+ if (!port_sel->tunnel)
+ continue;
+
+ lag_definer =
+ mlx5_lag_create_definer(ldev, hash_type, tt,
+ true, port1, port2);
+ if (IS_ERR(lag_definer)) {
+ err = PTR_ERR(lag_definer);
+ goto destroy_definers;
+ }
+ port_sel->inner.definers[tt] = lag_definer;
+ }
+
+ return 0;
+
+destroy_definers:
+ mlx5_lag_destroy_definers(ldev);
+ return err;
+}
+
+static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
+ enum netdev_lag_hash hash)
+{
+ port_sel->tunnel = false;
+
+ switch (hash) {
+ case NETDEV_LAG_HASH_E34:
+ port_sel->tunnel = true;
+ fallthrough;
+ case NETDEV_LAG_HASH_L34:
+ set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV4, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6, port_sel->tt_map);
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ case NETDEV_LAG_HASH_E23:
+ port_sel->tunnel = true;
+ fallthrough;
+ case NETDEV_LAG_HASH_L23:
+ set_bit(MLX5_TT_IPV4, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6, port_sel->tt_map);
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ default:
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ }
+}
+
+#define SET_IGNORE_DESTS_BITS(tt_map, dests) \
+ do { \
+ int idx; \
+ \
+ for_each_clear_bit(idx, tt_map, MLX5_NUM_TT) \
+ set_bit(idx, dests); \
+ } while (0)
+
+static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_table_attr *ft_attr;
+ int tt;
+
+ ttc_params->ns = mlx5_get_flow_namespace(dev,
+ MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ft_attr = &ttc_params->ft_attr;
+ ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ ttc_params->dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft;
+ }
+ SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
+}
+
+static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_table_attr *ft_attr;
+ int tt;
+
+ ttc_params->ns = mlx5_get_flow_namespace(dev,
+ MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ft_attr = &ttc_params->ft_attr;
+ ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ ttc_params->dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft;
+ }
+ SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
+
+ ttc_params->inner_ttc = port_sel->tunnel;
+ if (!port_sel->tunnel)
+ return;
+
+ for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
+ ttc_params->tunnel_dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->tunnel_dests[tt].ft =
+ mlx5_get_ttc_flow_table(port_sel->inner.ttc);
+ }
+}
+
+static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct ttc_params ttc_params = {};
+
+ mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
+ port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ if (IS_ERR(port_sel->outer.ttc))
+ return PTR_ERR(port_sel->outer.ttc);
+
+ return 0;
+}
+
+static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct ttc_params ttc_params = {};
+
+ mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
+ port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ if (IS_ERR(port_sel->inner.ttc))
+ return PTR_ERR(port_sel->inner.ttc);
+
+ return 0;
+}
+
+int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type, u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int err;
+
+ set_tt_map(port_sel, hash_type);
+ err = mlx5_lag_create_definers(ldev, hash_type, port1, port2);
+ if (err)
+ return err;
+
+ if (port_sel->tunnel) {
+ err = mlx5_lag_create_inner_ttc_table(ldev);
+ if (err)
+ goto destroy_definers;
+ }
+
+ err = mlx5_lag_create_ttc_table(ldev);
+ if (err)
+ goto destroy_inner;
+
+ return 0;
+
+destroy_inner:
+ if (port_sel->tunnel)
+ mlx5_destroy_ttc_table(port_sel->inner.ttc);
+destroy_definers:
+ mlx5_lag_destroy_definers(ldev);
+ return err;
+}
+
+static int
+mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer **definers,
+ u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_destination dest = {};
+ int err;
+ int tt;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ struct mlx5_flow_handle **rules = definers[tt]->rules;
+
+ if (ldev->v2p_map[MLX5_LAG_P1] != port1) {
+ dest.vport.vhca_id =
+ MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id);
+ err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1],
+ &dest, NULL);
+ if (err)
+ return err;
+ }
+
+ if (ldev->v2p_map[MLX5_LAG_P2] != port2) {
+ dest.vport.vhca_id =
+ MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id);
+ err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2],
+ &dest, NULL);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int err;
+
+ err = mlx5_lag_modify_definers_destinations(ldev,
+ port_sel->outer.definers,
+ port1, port2);
+ if (err)
+ return err;
+
+ if (!port_sel->tunnel)
+ return 0;
+
+ return mlx5_lag_modify_definers_destinations(ldev,
+ port_sel->inner.definers,
+ port1, port2);
+}
+
+void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+
+ mlx5_destroy_ttc_table(port_sel->outer.ttc);
+ if (port_sel->tunnel)
+ mlx5_destroy_ttc_table(port_sel->inner.ttc);
+ mlx5_lag_destroy_definers(ldev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
new file mode 100644
index 000000000000..6d15b28a42fc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_LAG_FS_H__
+#define __MLX5_LAG_FS_H__
+
+#include "lib/fs_ttc.h"
+
+struct mlx5_lag_definer {
+ struct mlx5_flow_definer *definer;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *rules[MLX5_MAX_PORTS];
+};
+
+struct mlx5_lag_ttc {
+ struct mlx5_ttc_table *ttc;
+ struct mlx5_lag_definer *definers[MLX5_NUM_TT];
+};
+
+struct mlx5_lag_port_sel {
+ DECLARE_BITMAP(tt_map, MLX5_NUM_TT);
+ bool tunnel;
+ struct mlx5_lag_ttc outer;
+ struct mlx5_lag_ttc inner;
+};
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2);
+void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev);
+int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type, u8 port1,
+ u8 port2);
+
+#else /* CONFIG_MLX5_ESWITCH */
+static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type,
+ u8 port1, u8 port2)
+{
+ return 0;
+}
+
+static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1,
+ u8 port2)
+{
+ return 0;
+}
+
+static inline void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) {}
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_LAG_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index ffac8a0e7a23..91e806c1aa21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -448,22 +448,20 @@ static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
return cycles_now + cycles_delta;
}
-static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev,
- s64 sec, u32 nsec)
+static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
{
- struct timespec64 ts;
+ struct timespec64 ts = {};
s64 target_ns;
ts.tv_sec = sec;
- ts.tv_nsec = nsec;
target_ns = timespec64_to_ns(&ts);
return find_target_cycles(mdev, target_ns);
}
-static u64 perout_conf_real_time(s64 sec, u32 nsec)
+static u64 perout_conf_real_time(s64 sec)
{
- return (u64)nsec | (u64)sec << 32;
+ return (u64)sec << 32;
}
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
@@ -474,6 +472,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
+ bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
struct timespec64 ts;
u32 field_select = 0;
@@ -501,8 +500,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (on) {
bool rt_mode = mlx5_real_time_mode(mdev);
- u32 nsec;
- s64 sec;
+ s64 sec = rq->perout.start.sec;
+
+ if (rq->perout.start.nsec)
+ return -EINVAL;
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
@@ -513,14 +514,11 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if ((ns >> 1) != 500000000LL)
return -EINVAL;
- nsec = rq->perout.start.nsec;
- sec = rq->perout.start.sec;
-
if (rt_mode && sec > U32_MAX)
return -EINVAL;
- time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) :
- perout_conf_internal_timer(mdev, sec, nsec);
+ time_stamp = rt_mode ? perout_conf_real_time(sec) :
+ perout_conf_internal_timer(mdev, sec);
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@@ -538,6 +536,9 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (err)
return err;
+ if (rt_mode)
+ return 0;
+
return mlx5_set_mtppse(mdev, pin, 0,
MLX5_EVENT_MODE_REPETETIVE & on);
}
@@ -705,20 +706,14 @@ static void ts_next_sec(struct timespec64 *ts)
static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
struct mlx5_clock *clock)
{
- bool rt_mode = mlx5_real_time_mode(mdev);
struct timespec64 ts;
s64 target_ns;
- if (rt_mode)
- ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
- else
- mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
-
+ mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
ts_next_sec(&ts);
target_ns = timespec64_to_ns(&ts);
- return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) :
- find_target_cycles(mdev, target_ns);
+ return find_target_cycles(mdev, target_ns);
}
static int mlx5_pps_event(struct notifier_block *nb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 749d17c0057d..b63dec24747a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -247,6 +247,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (test_bit(tt, params->ignore_dests))
+ continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -266,6 +268,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
if (!mlx5_tunnel_proto_supported_rx(dev,
ttc_tunnel_rules[tt].proto))
continue;
+ if (test_bit(tt, params->ignore_tunnel_dests))
+ continue;
trules[tt] = mlx5_generate_ttc_rule(dev, ft,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index ce95be8f8382..85fef0cd1c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -43,7 +43,9 @@ struct ttc_params {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT];
+ DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
bool inner_ttc;
+ DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT);
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
new file mode 100644
index 000000000000..0dd96a6b140d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/driver.h>
+#include "lib/tout.h"
+
+struct mlx5_timeouts {
+ u64 to[MAX_TIMEOUT_TYPES];
+};
+
+static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = {
+ [MLX5_TO_FW_PRE_INIT_TIMEOUT_MS] = 120000,
+ [MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS] = 20000,
+ [MLX5_TO_FW_PRE_INIT_WAIT_MS] = 2,
+ [MLX5_TO_FW_INIT_MS] = 2000,
+ [MLX5_TO_CMD_MS] = 60000,
+ [MLX5_TO_PCI_TOGGLE_MS] = 2000,
+ [MLX5_TO_HEALTH_POLL_INTERVAL_MS] = 2000,
+ [MLX5_TO_FULL_CRDUMP_MS] = 60000,
+ [MLX5_TO_FW_RESET_MS] = 60000,
+ [MLX5_TO_FLUSH_ON_ERROR_MS] = 2000,
+ [MLX5_TO_PCI_SYNC_UPDATE_MS] = 5000,
+ [MLX5_TO_TEARDOWN_MS] = 3000,
+ [MLX5_TO_FSM_REACTIVATE_MS] = 5000,
+ [MLX5_TO_RECLAIM_PAGES_MS] = 5000,
+ [MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000
+};
+
+static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_types type)
+{
+ dev->timeouts->to[type] = val;
+}
+
+static void tout_set_def_val(struct mlx5_core_dev *dev)
+{
+ int i;
+
+ for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+ tout_set(dev, tout_def_sw_val[i], i);
+}
+
+int mlx5_tout_init(struct mlx5_core_dev *dev)
+{
+ dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
+ if (!dev->timeouts)
+ return -ENOMEM;
+
+ tout_set_def_val(dev);
+ return 0;
+}
+
+void mlx5_tout_cleanup(struct mlx5_core_dev *dev)
+{
+ kfree(dev->timeouts);
+}
+
+/* Time register consists of two fields to_multiplier(time out multiplier)
+ * and to_value(time out value). to_value is the quantity of the time units and
+ * to_multiplier is the type and should be one off these four values.
+ * 0x0: millisecond
+ * 0x1: seconds
+ * 0x2: minutes
+ * 0x3: hours
+ * this function converts the time stored in the two register fields into
+ * millisecond.
+ */
+static u64 tout_convert_reg_field_to_ms(u32 to_mul, u32 to_val)
+{
+ u64 msec = to_val;
+
+ to_mul &= 0x3;
+ /* convert hours/minutes/seconds to miliseconds */
+ if (to_mul)
+ msec *= 1000 * int_pow(60, to_mul - 1);
+
+ return msec;
+}
+
+static u64 tout_convert_iseg_to_ms(u32 iseg_to)
+{
+ return tout_convert_reg_field_to_ms(iseg_to >> 29, iseg_to & 0xfffff);
+}
+
+static bool tout_is_supported(struct mlx5_core_dev *dev)
+{
+ return !!ioread32be(&dev->iseg->cmd_q_init_to);
+}
+
+void mlx5_tout_query_iseg(struct mlx5_core_dev *dev)
+{
+ u32 to;
+
+ if (!tout_is_supported(dev))
+ return;
+
+ to = ioread32be(&dev->iseg->cmd_q_init_to);
+ tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_FW_INIT_MS);
+
+ to = ioread32be(&dev->iseg->cmd_exec_to);
+ tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_CMD_MS);
+}
+
+u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type)
+{
+ return dev->timeouts->to[type];
+}
+
+#define MLX5_TIMEOUT_QUERY(fld, reg_out) \
+ ({ \
+ struct mlx5_ifc_default_timeout_bits *time_field; \
+ u32 to_multi, to_value; \
+ u64 to_val_ms; \
+ \
+ time_field = MLX5_ADDR_OF(dtor_reg, reg_out, fld); \
+ to_multi = MLX5_GET(default_timeout, time_field, to_multiplier); \
+ to_value = MLX5_GET(default_timeout, time_field, to_value); \
+ to_val_ms = tout_convert_reg_field_to_ms(to_multi, to_value); \
+ to_val_ms; \
+ })
+
+#define MLX5_TIMEOUT_FILL(fld, reg_out, dev, to_type, to_extra) \
+ ({ \
+ u64 fw_to = MLX5_TIMEOUT_QUERY(fld, reg_out); \
+ tout_set(dev, fw_to + (to_extra), to_type); \
+ fw_to; \
+ })
+
+static int tout_query_dtor(struct mlx5_core_dev *dev)
+{
+ u64 pcie_toggle_to_val, tear_down_to_val;
+ u32 out[MLX5_ST_SZ_DW(dtor_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(dtor_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_DTOR, 0, 0);
+ if (err)
+ return err;
+
+ pcie_toggle_to_val = MLX5_TIMEOUT_FILL(pcie_toggle_to, out, dev, MLX5_TO_PCI_TOGGLE_MS, 0);
+ MLX5_TIMEOUT_FILL(fw_reset_to, out, dev, MLX5_TO_FW_RESET_MS, pcie_toggle_to_val);
+
+ tear_down_to_val = MLX5_TIMEOUT_FILL(tear_down_to, out, dev, MLX5_TO_TEARDOWN_MS, 0);
+ MLX5_TIMEOUT_FILL(pci_sync_update_to, out, dev, MLX5_TO_PCI_SYNC_UPDATE_MS,
+ tear_down_to_val);
+
+ MLX5_TIMEOUT_FILL(health_poll_to, out, dev, MLX5_TO_HEALTH_POLL_INTERVAL_MS, 0);
+ MLX5_TIMEOUT_FILL(full_crdump_to, out, dev, MLX5_TO_FULL_CRDUMP_MS, 0);
+ MLX5_TIMEOUT_FILL(flush_on_err_to, out, dev, MLX5_TO_FLUSH_ON_ERROR_MS, 0);
+ MLX5_TIMEOUT_FILL(fsm_reactivate_to, out, dev, MLX5_TO_FSM_REACTIVATE_MS, 0);
+ MLX5_TIMEOUT_FILL(reclaim_pages_to, out, dev, MLX5_TO_RECLAIM_PAGES_MS, 0);
+ MLX5_TIMEOUT_FILL(reclaim_vfs_pages_to, out, dev, MLX5_TO_RECLAIM_VFS_PAGES_MS, 0);
+
+ return 0;
+}
+
+int mlx5_tout_query_dtor(struct mlx5_core_dev *dev)
+{
+ if (tout_is_supported(dev))
+ return tout_query_dtor(dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
new file mode 100644
index 000000000000..31faa5c17aa9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef MLX5_TIMEOUTS_H
+#define MLX5_TIMEOUTS_H
+
+enum mlx5_timeouts_types {
+ /* pre init timeouts (not read from FW) */
+ MLX5_TO_FW_PRE_INIT_TIMEOUT_MS,
+ MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS,
+ MLX5_TO_FW_PRE_INIT_WAIT_MS,
+
+ /* init segment timeouts */
+ MLX5_TO_FW_INIT_MS,
+ MLX5_TO_CMD_MS,
+
+ /* DTOR timeouts */
+ MLX5_TO_PCI_TOGGLE_MS,
+ MLX5_TO_HEALTH_POLL_INTERVAL_MS,
+ MLX5_TO_FULL_CRDUMP_MS,
+ MLX5_TO_FW_RESET_MS,
+ MLX5_TO_FLUSH_ON_ERROR_MS,
+ MLX5_TO_PCI_SYNC_UPDATE_MS,
+ MLX5_TO_TEARDOWN_MS,
+ MLX5_TO_FSM_REACTIVATE_MS,
+ MLX5_TO_RECLAIM_PAGES_MS,
+ MLX5_TO_RECLAIM_VFS_PAGES_MS,
+
+ MAX_TIMEOUT_TYPES
+};
+
+struct mlx5_core_dev;
+int mlx5_tout_init(struct mlx5_core_dev *dev);
+void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
+void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
+int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
+
+#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
+
+# endif /* MLX5_TIMEOUTS_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 79482824c64f..f8446395163a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -60,6 +60,7 @@
#include "devlink.h"
#include "fw_reset.h"
#include "lib/mlx5.h"
+#include "lib/tout.h"
#include "fpga/core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec.h"
@@ -176,11 +177,6 @@ static struct mlx5_profile profile[] = {
},
};
-#define FW_INIT_TIMEOUT_MILI 2000
-#define FW_INIT_WAIT_MS 2
-#define FW_PRE_INIT_TIMEOUT_MILI 120000
-#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
-
static int fw_initializing(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->initializing) >> 31;
@@ -193,8 +189,6 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
int err = 0;
- BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
-
while (fw_initializing(dev)) {
if (time_after(jiffies, end)) {
err = -EBUSY;
@@ -205,7 +199,7 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
jiffies_to_msecs(end - warn) / 1000);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
- msleep(FW_INIT_WAIT_MS);
+ msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
}
return err;
@@ -564,15 +558,38 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
+/* Cached MLX5_CAP_GEN(dev, roce) can be out of sync this early in the
+ * boot process.
+ * In case RoCE cap is writable in FW and user/devlink requested to change the
+ * cap, we are yet to query the final state of the above cap.
+ * Hence, the need for this function.
+ *
+ * Returns
+ * True:
+ * 1) RoCE cap is read only in FW and already disabled
+ * OR:
+ * 2) RoCE cap is writable in FW and user/devlink requested it off.
+ *
+ * In any other case, return False.
+ */
+static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+ (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
+}
+
static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
int err;
- if (!MLX5_CAP_GEN(dev, roce))
+ if (is_roce_fw_disabled(dev))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
@@ -975,25 +992,34 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
+ err = mlx5_tout_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+ return err;
+ }
+
/* wait for firmware to accept initialization segments configurations
*/
- err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT),
+ mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
if (err) {
- mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
- FW_PRE_INIT_TIMEOUT_MILI);
- return err;
+ mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
+ mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
+ goto err_tout_cleanup;
}
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
- return err;
+ goto err_tout_cleanup;
}
- err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
+ mlx5_tout_query_iseg(dev);
+
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
if (err) {
- mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
- FW_INIT_TIMEOUT_MILI);
+ mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
+ mlx5_tout_ms(dev, FW_INIT));
goto err_cmd_cleanup;
}
@@ -1017,6 +1043,12 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
goto err_disable_hca;
}
+ err = mlx5_tout_query_dtor(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to read dtor\n");
+ goto reclaim_boot_pages;
+ }
+
err = set_hca_ctrl(dev);
if (err) {
mlx5_core_err(dev, "set_hca_ctrl failed\n");
@@ -1062,6 +1094,8 @@ err_disable_hca:
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
+err_tout_cleanup:
+ mlx5_tout_cleanup(dev);
return err;
}
@@ -1080,6 +1114,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
+ mlx5_tout_cleanup(dev);
return 0;
}
@@ -1112,8 +1147,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
- mlx5_core_err(dev, "Failed to init FW tracer\n");
- goto err_fw_tracer;
+ mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
+ mlx5_fw_tracer_destroy(dev->tracer);
+ dev->tracer = NULL;
}
mlx5_fw_reset_events_start(dev);
@@ -1121,8 +1157,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_rsc_dump_init(dev);
if (err) {
- mlx5_core_err(dev, "Failed to init Resource dump\n");
- goto err_rsc_dump;
+ mlx5_core_err(dev, "Failed to init Resource dump %d\n", err);
+ mlx5_rsc_dump_destroy(dev);
+ dev->rsc_dump = NULL;
}
err = mlx5_fpga_device_start(dev);
@@ -1192,11 +1229,9 @@ err_tls_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
-err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
-err_fw_tracer:
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
@@ -1381,6 +1416,7 @@ static const int types[] = {
MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
+ MLX5_CAP_PORT_SELECTION,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1537,8 +1573,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
pci_save_state(pdev);
- if (!mlx5_core_is_mp_slave(dev))
- devlink_reload_enable(devlink);
+ devlink_register(devlink);
return 0;
err_init_one:
@@ -1558,7 +1593,7 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
mlx5_crdump_disable(dev);
mlx5_drain_health_wq(dev);
mlx5_uninit_one(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index abd024173c42..8116815663a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -8,8 +8,6 @@
#define MLX5_COMP_EQS_PER_SF 8
-#define MLX5_IRQ_EQ_CTRL (0)
-
struct mlx5_irq;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 110c0837f95b..f6b5451328fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -38,6 +38,7 @@
#include <linux/xarray.h>
#include "mlx5_core.h"
#include "lib/eq.h"
+#include "lib/tout.h"
enum {
MLX5_PAGES_CANT_GIVE = 0,
@@ -65,11 +66,6 @@ struct fw_page {
};
enum {
- MAX_RECLAIM_TIME_MSECS = 5000,
- MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
-};
-
-enum {
MLX5_MAX_RECLAIM_TIME_MILI = 5000,
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
@@ -641,7 +637,8 @@ static int optimal_reclaimed_pages(void)
static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
struct rb_root *root, u16 func_id)
{
- unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
+ unsigned long end = jiffies + recl_pages_to_jiffies;
while (!RB_EMPTY_ROOT(root)) {
int nclaimed;
@@ -656,7 +653,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
}
if (nclaimed)
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ end = jiffies + recl_pages_to_jiffies;
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
@@ -727,7 +724,8 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
{
- unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+ u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
+ unsigned long end = jiffies + recl_vf_pages_to_jiffies;
int prev_pages = *pages;
/* In case of internal error we will free the pages manually later */
@@ -743,7 +741,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
return -ETIMEDOUT;
}
if (*pages < prev_pages) {
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+ end = jiffies + recl_vf_pages_to_jiffies;
prev_pages = *pages;
}
msleep(50);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index c79a10b3454d..830444f927d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -13,8 +13,8 @@
#endif
#define MLX5_MAX_IRQ_NAME (32)
-/* max irq_index is 255. three chars */
-#define MLX5_MAX_IRQ_IDX_CHARS (3)
+/* max irq_index is 2047, so four chars */
+#define MLX5_MAX_IRQ_IDX_CHARS (4)
#define MLX5_SFS_PER_CTRL_IRQ 64
#define MLX5_IRQ_CTRL_SF_MAX 8
@@ -194,15 +194,25 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
}
-static void irq_set_name(char *name, int vecidx)
+static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
{
- if (vecidx == 0) {
+ if (!pool->xa_num_irqs.max) {
+ /* in case we only have a single irq for the device */
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx);
+ return;
+ }
+
+ if (vecidx == pool->xa_num_irqs.max) {
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
return;
}
- snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
- vecidx - MLX5_IRQ_VEC_COMP_BASE);
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
+}
+
+static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
+{
+ return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
}
static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
@@ -216,8 +226,8 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
if (!irq)
return ERR_PTR(-ENOMEM);
irq->irqn = pci_irq_vector(dev->pdev, i);
- if (!pool->name[0])
- irq_set_name(name, i);
+ if (!irq_pool_is_sf_pool(pool))
+ irq_set_name(pool, name, i);
else
irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
@@ -386,6 +396,9 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
if (IS_ERR(irq) || !affinity)
goto unlock;
cpumask_copy(irq->mask, affinity);
+ if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
+ cpumask_empty(irq->mask))
+ cpumask_set_cpu(0, irq->mask);
irq_set_affinity_hint(irq->irqn, irq->mask);
unlock:
mutex_unlock(&pool->lock);
@@ -440,6 +453,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
}
pf_irq:
pool = irq_table->pf_pool;
+ vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
irq = irq_pool_request_vector(pool, vecidx, affinity);
out:
if (IS_ERR(irq))
@@ -577,6 +591,8 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
{
+ if (!table->pf_pool->xa_num_irqs.max)
+ return 1;
return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
}
@@ -592,19 +608,15 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
- pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
- MLX5_IRQ_VEC_COMP_BASE;
+ pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
pf_vec = min_t(int, pf_vec, num_eqs);
- if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE)
- return -ENOMEM;
total_vec = pf_vec;
if (mlx5_sf_max_functions(dev))
total_vec += MLX5_IRQ_CTRL_SF_MAX +
MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
- total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
- total_vec, PCI_IRQ_MSIX);
+ total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX);
if (total_vec < 0)
return total_vec;
pf_vec = min(pf_vec, total_vec);
@@ -633,8 +645,9 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
{
if (table->sf_comp_pool)
- return table->sf_comp_pool->xa_num_irqs.max -
- table->sf_comp_pool->xa_num_irqs.min + 1;
+ return min_t(int, num_online_cpus(),
+ table->sf_comp_pool->xa_num_irqs.max -
+ table->sf_comp_pool->xa_num_irqs.min + 1);
else
return mlx5_irq_table_get_num_comp(table);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 052f48068dc1..7b4783ce213e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -46,7 +46,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
goto init_one_err;
}
- devlink_reload_enable(devlink);
+ devlink_register(devlink);
return 0;
init_one_err:
@@ -61,10 +61,9 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink;
+ struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
- devlink = priv_to_devlink(sf_dev->mdev);
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
mlx5_mdev_uninit(sf_dev->mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 13891fdc607e..e1bb3acf45e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -323,7 +323,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
+ if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index a5b9f65db23c..07936841ce99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -39,6 +39,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT",
[DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN",
[DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN",
+ [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
@@ -513,9 +514,9 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
/* If destination is vport we will get the FW flow table
* that recalculates the CS and forwards to the vport.
*/
- ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn,
- dest_action->vport->caps->num,
- final_icm_addr);
+ ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn,
+ dest_action->vport->caps->num,
+ final_icm_addr);
if (ret) {
mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
return ret;
@@ -632,7 +633,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
return -EOPNOTSUPP;
case DR_ACTION_TYP_CTR:
attr.ctr_id = action->ctr->ctr_id +
- action->ctr->offeset;
+ action->ctr->offset;
break;
case DR_ACTION_TYP_TAG:
attr.flow_tag = action->flow_tag->flow_tag;
@@ -669,7 +670,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
if (rx_rule) {
- if (action->vport->caps->num == WIRE_PORT) {
+ if (action->vport->caps->num == MLX5_VPORT_UPLINK) {
mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
return -EOPNOTSUPP;
}
@@ -853,6 +854,7 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action;
bool reformat_req = false;
u32 num_of_ref = 0;
+ u32 ref_act_cnt;
int ret;
int i;
@@ -861,11 +863,14 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
return NULL;
}
- hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL);
+ hw_dests = kcalloc(num_of_dests, sizeof(*hw_dests), GFP_KERNEL);
if (!hw_dests)
return NULL;
- ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL);
+ if (unlikely(check_mul_overflow(num_of_dests, 2u, &ref_act_cnt)))
+ goto free_hw_dests;
+
+ ref_actions = kcalloc(ref_act_cnt, sizeof(*ref_actions), GFP_KERNEL);
if (!ref_actions)
goto free_hw_dests;
@@ -1747,7 +1752,7 @@ dec_ref:
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
- u32 vport, u8 vhca_id_valid,
+ u16 vport, u8 vhca_id_valid,
u16 vhca_id)
{
struct mlx5dr_cmd_vport_cap *vport_cap;
@@ -1767,9 +1772,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return NULL;
}
- vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport);
if (!vport_cap) {
- mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport);
+ mlx5dr_err(dmn,
+ "Failed to get vport 0x%x caps - vport is disabled or invalid\n",
+ vport);
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 56307283bf9b..1d8febed0d76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -195,6 +195,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
+ caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
return 0;
}
@@ -272,7 +274,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id)
+ u16 vport)
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
void *in_flow_context;
@@ -303,7 +305,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format_struct, in_dests, destination_type,
MLX5_FLOW_DESTINATION_TYPE_VPORT);
- MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
+ MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 0fe159809ba1..49089cbe897c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -9,48 +9,45 @@
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
(dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
-static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
+static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
/* Per vport cached FW FT for checksum recalculation, this
- * recalculation is needed due to a HW bug.
+ * recalculation is needed due to a HW bug in STEv0.
*/
- dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
- sizeof(dmn->cache.recalc_cs_ft[0]),
- GFP_KERNEL);
- if (!dmn->cache.recalc_cs_ft)
- return -ENOMEM;
-
- return 0;
+ xa_init(&dmn->csum_fts_xa);
}
-static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
+static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
- int i;
-
- for (i = 0; i < dmn->info.caps.num_vports; i++) {
- if (!dmn->cache.recalc_cs_ft[i])
- continue;
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ unsigned long i;
- mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
+ xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
+ if (recalc_cs_ft)
+ mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
}
- kfree(dmn->cache.recalc_cs_ft);
+ xa_destroy(&dmn->csum_fts_xa);
}
-int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr)
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr)
{
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ int ret;
- recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
+ recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
if (!recalc_cs_ft) {
- /* Table not in cache, need to allocate a new one */
+ /* Table hasn't been created yet */
recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
if (!recalc_cs_ft)
return -EINVAL;
- dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
+ ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
+ recalc_cs_ft, GFP_KERNEL));
+ if (ret)
+ return ret;
}
*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
@@ -124,18 +121,39 @@ static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
}
+static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_vport_cap *uplink_vport)
+{
+ struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
+
+ uplink_vport->num = MLX5_VPORT_UPLINK;
+ uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
+ uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
+ uplink_vport->vport_gvmi = 0;
+ uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
+}
+
static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
- bool other_vport,
- u16 vport_number)
+ u16 vport_number,
+ struct mlx5dr_cmd_vport_cap *vport_caps)
{
- struct mlx5dr_cmd_vport_cap *vport_caps;
+ u16 cmd_vport = vport_number;
+ bool other_vport = true;
int ret;
- vport_caps = &dmn->info.caps.vports_caps[vport_number];
+ if (vport_number == MLX5_VPORT_UPLINK) {
+ dr_domain_fill_uplink_caps(dmn, vport_caps);
+ return 0;
+ }
+
+ if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
+ other_vport = false;
+ cmd_vport = 0;
+ }
ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
other_vport,
- vport_number,
+ cmd_vport,
&vport_caps->icm_address_rx,
&vport_caps->icm_address_tx);
if (ret)
@@ -143,7 +161,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
other_vport,
- vport_number,
+ cmd_vport,
&vport_caps->vport_gvmi);
if (ret)
return ret;
@@ -154,27 +172,82 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
return 0;
}
-static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
+static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{
- struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
- struct mlx5dr_cmd_vport_cap *wire_vport;
- int vport;
+ return dr_domain_query_vport(dmn,
+ dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
+ &dmn->info.caps.vports.esw_manager_caps);
+}
+
+static struct mlx5dr_cmd_vport_cap *
+dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+ struct mlx5dr_cmd_vport_cap *vport_caps;
int ret;
- /* Query vports (except wire vport) */
- for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
- ret = dr_domain_query_vport(dmn, !!vport, vport);
- if (ret)
- return ret;
+ vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
+ if (!vport_caps)
+ return NULL;
+
+ ret = dr_domain_query_vport(dmn, vport, vport_caps);
+ if (ret) {
+ kvfree(vport_caps);
+ return NULL;
}
- /* Last vport is the wire port */
- wire_vport = &dmn->info.caps.vports_caps[vport];
- wire_vport->num = WIRE_PORT;
- wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
- wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
- wire_vport->vport_gvmi = 0;
- wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
+ ret = xa_insert(&caps->vports.vports_caps_xa, vport,
+ vport_caps, GFP_KERNEL);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
+ kvfree(vport_caps);
+ return ERR_PTR(ret);
+ }
+
+ return vport_caps;
+}
+
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+
+ if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
+ (!caps->is_ecpf && vport == 0))
+ return &caps->vports.esw_manager_caps;
+
+vport_load:
+ vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
+ if (vport_caps)
+ return vport_caps;
+
+ vport_caps = dr_domain_add_vport_cap(dmn, vport);
+ if (PTR_ERR(vport_caps) == -EBUSY)
+ /* caps were already stored by another thread */
+ goto vport_load;
+
+ return vport_caps;
+}
+
+static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+ unsigned long i;
+
+ xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
+ vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
+ kvfree(vport_caps);
+ }
+}
+
+static int dr_domain_query_uplink(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+
+ vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK);
+ if (!vport_caps)
+ return -EINVAL;
return 0;
}
@@ -196,25 +269,29 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
- dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
- sizeof(dmn->info.caps.vports_caps[0]),
- GFP_KERNEL);
- if (!dmn->info.caps.vports_caps)
- return -ENOMEM;
+ xa_init(&dmn->info.caps.vports.vports_caps_xa);
- ret = dr_domain_query_vports(dmn);
+ /* Query eswitch manager and uplink vports only. Rest of the
+ * vports (vport 0, VFs and SFs) will be queried dynamically.
+ */
+
+ ret = dr_domain_query_esw_mngr(dmn);
if (ret) {
- mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
- goto free_vports_caps;
+ mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
+ goto free_vports_caps_xa;
}
- dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
+ ret = dr_domain_query_uplink(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret);
+ goto free_vports_caps_xa;
+ }
return 0;
-free_vports_caps:
- kfree(dmn->info.caps.vports_caps);
- dmn->info.caps.vports_caps = NULL;
+free_vports_caps_xa:
+ xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
+
return ret;
}
@@ -229,8 +306,6 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
}
- dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
-
ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
if (ret)
return ret;
@@ -267,11 +342,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
- vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
- if (!vport_cap) {
- mlx5dr_err(dmn, "Failed to get esw manager vport\n");
- return -ENOENT;
- }
+ vport_cap = &dmn->info.caps.vports.esw_manager_caps;
dmn->info.supp_sw_steering = true;
dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
@@ -290,7 +361,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
{
- kfree(dmn->info.caps.vports_caps);
+ dr_domain_clear_vports(dmn);
+ xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
}
struct mlx5dr_domain *
@@ -333,16 +405,10 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
goto uninit_caps;
}
- ret = dr_domain_init_cache(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Failed initialize domain cache\n");
- goto uninit_resourses;
- }
+ dr_domain_init_csum_recalc_fts(dmn);
return dmn;
-uninit_resourses:
- dr_domain_uninit_resources(dmn);
uninit_caps:
dr_domain_caps_uninit(dmn);
free_domain:
@@ -381,7 +447,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
/* make sure resources are not used by the hardware */
mlx5dr_cmd_sync_steering(dmn->mdev);
- dr_domain_uninit_cache(dmn);
+ dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
mutex_destroy(&dmn->info.tx.mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 0d6f86eb248b..68a4c32d5f34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -5,7 +5,7 @@
#include "dr_types.h"
struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index aca80efc28fa..323ea138ad99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -1042,10 +1042,10 @@ static bool dr_rule_skip(enum mlx5dr_domain_type domain,
return false;
if (mask->misc.source_port) {
- if (rx && value->misc.source_port != WIRE_PORT)
+ if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
return true;
- if (!rx && value->misc.source_port == WIRE_PORT)
+ if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 9c704bce3c12..b0649c2877dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -1645,7 +1645,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
+ struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
bool source_gvmi_set;
@@ -1654,23 +1654,24 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
+ vport_dmn = dmn->peer_dmn;
else
return -EINVAL;
misc->source_eswitch_owner_vhca_id = 0;
} else {
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
}
source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
if (source_gvmi_set) {
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
+ misc->source_port);
if (!vport_cap) {
- mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
+ mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index b2481c99da79..cb9cf67b0a02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -586,9 +586,11 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
u8 *d_action;
- dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
- action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
- action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
d_action = action + DR_STE_ACTION_SINGLE_SZ;
dr_ste_v1_set_encap_l3(last_ste,
@@ -1776,7 +1778,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
+ struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
@@ -1784,22 +1786,22 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
+ vport_dmn = dmn->peer_dmn;
else
return -EINVAL;
- misc->source_eswitch_owner_vhca_id = 0;
+ misc->source_eswitch_owner_vhca_id = 0;
} else {
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
}
if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
return 0;
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
if (!vport_cap) {
mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index b20e8aabb861..73fed94af09a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -4,7 +4,7 @@
#ifndef _DR_TYPES_
#define _DR_TYPES_
-#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <linux/refcount.h>
#include "fs_core.h"
#include "wq.h"
@@ -14,7 +14,6 @@
#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
-#define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
@@ -752,9 +751,9 @@ struct mlx5dr_esw_caps {
struct mlx5dr_cmd_vport_cap {
u16 vport_gvmi;
u16 vhca_gvmi;
+ u16 num;
u64 icm_address_rx;
u64 icm_address_tx;
- u32 num;
};
struct mlx5dr_roce_cap {
@@ -763,6 +762,11 @@ struct mlx5dr_roce_cap {
u8 fl_rc_qp_when_roce_enabled:1;
};
+struct mlx5dr_vports {
+ struct mlx5dr_cmd_vport_cap esw_manager_caps;
+ struct xarray vports_caps_xa;
+};
+
struct mlx5dr_cmd_caps {
u16 gvmi;
u64 nic_rx_drop_address;
@@ -786,7 +790,6 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level;
u16 roce_min_src_udp;
- u8 num_esw_ports;
u8 sw_format_ver;
bool eswitch_manager;
bool rx_sw_owner;
@@ -795,11 +798,11 @@ struct mlx5dr_cmd_caps {
u8 rx_sw_owner_v2:1;
u8 tx_sw_owner_v2:1;
u8 fdb_sw_owner_v2:1;
- u32 num_vports;
struct mlx5dr_esw_caps esw_caps;
- struct mlx5dr_cmd_vport_cap *vports_caps;
+ struct mlx5dr_vports vports;
bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
+ u8 is_ecpf:1;
u8 isolate_vl_tc:1;
};
@@ -826,10 +829,6 @@ struct mlx5dr_domain_info {
struct mlx5dr_cmd_caps caps;
};
-struct mlx5dr_domain_cache {
- struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
-};
-
struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn;
struct mlx5_core_dev *mdev;
@@ -841,7 +840,7 @@ struct mlx5dr_domain {
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
- struct mlx5dr_domain_cache cache;
+ struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
};
@@ -942,7 +941,7 @@ struct mlx5dr_action_dest_tbl {
struct mlx5dr_action_ctr {
u32 ctr_id;
- u32 offeset;
+ u32 offset;
};
struct mlx5dr_action_vport {
@@ -1102,18 +1101,8 @@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
return true;
}
-static inline struct mlx5dr_cmd_vport_cap *
-mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
-{
- if (!caps->vports_caps ||
- (vport >= caps->num_vports && vport != WIRE_PORT))
- return NULL;
-
- if (vport == WIRE_PORT)
- vport = caps->num_vports;
-
- return &caps->vports_caps[vport];
-}
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
struct mlx5dr_cmd_query_flow_table_details {
u8 status;
@@ -1154,7 +1143,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id);
+ u16 vport_id);
int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type,
u32 table_id);
@@ -1372,12 +1361,12 @@ struct mlx5dr_fw_recalc_cs_ft {
};
struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
-int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr);
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr);
int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_flow_destination_hw_info *dest,
int num_dest,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 7e58f4e594b7..2632d5ae9bc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -222,7 +222,7 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
-#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
@@ -625,6 +625,19 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n
mlx5dr_action_destroy(modify_hdr->action.dr_action);
}
+static int
+mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
@@ -727,6 +740,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
.packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_dr_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer,
.set_peer = mlx5_cmd_dr_set_peer,
.create_ns = mlx5_cmd_dr_create_ns,
.destroy_ns = mlx5_cmd_dr_destroy_ns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index c5a8b1601999..c7c93131b762 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -89,7 +89,7 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
- u32 vport, u8 vhca_id_valid,
+ u16 vport, u8 vhca_id_valid,
u16 vhca_id);
struct mlx5dr_action *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 4c1440a95ad7..8846d30a380a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -421,19 +421,21 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
-
+out:
kvfree(out);
-
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
@@ -1133,19 +1135,20 @@ EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
{
int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- u64 tmp = 0;
+ u64 tmp;
+ int err;
if (mdev->sys_image_guid)
return mdev->sys_image_guid;
if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
- mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
+ err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
else
- mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+ err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
- mdev->sys_image_guid = tmp;
+ mdev->sys_image_guid = err ? 0 : tmp;
- return tmp;
+ return mdev->sys_image_guid;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 6704f5c1aa32..b990782c1eb1 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -75,7 +75,7 @@ static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
u64_to_ether_addr(local_mac, mac);
if (is_valid_ether_addr(mac)) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ eth_hw_addr_set(priv->netdev, mac);
} else {
/* Provide a random MAC if for some reason the device has
* not been configured with a valid MAC address already.
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index 7654841a05c2..e6475ea77cd1 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -19,7 +19,7 @@ struct mlxfw_dev {
static inline
struct device *mlxfw_dev_dev(struct mlxfw_dev *mlxfw_dev)
{
- return mlxfw_dev->devlink->dev;
+ return devlink_to_dev(mlxfw_dev->devlink);
}
#define MLXFW_PRFX "mlxfw: "
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 9a570fa167b6..3fd3812b8f31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1973,9 +1973,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_emad_init;
- if (!reload)
- devlink_register(devlink);
-
if (!reload) {
err = mlxsw_core_params_register(mlxsw_core);
if (err)
@@ -2010,11 +2007,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_driver_init;
}
- devlink_params_publish(devlink);
-
- if (!reload)
- devlink_reload_enable(devlink);
-
+ if (!reload) {
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
+ }
return 0;
err_driver_init:
@@ -2030,8 +2026,6 @@ err_fw_rev_validate:
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
err_register_params:
- if (!reload)
- devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
kfree(mlxsw_core->lag.mapping);
@@ -2081,7 +2075,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (!reload)
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
+
if (devlink_is_reload_failed(devlink)) {
if (!reload)
/* Only the parts that were not de-initialized in the
@@ -2092,7 +2087,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
}
- devlink_params_unpublish(devlink);
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_env_fini(mlxsw_core->env);
@@ -2101,8 +2095,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_core_health_fini(mlxsw_core);
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
- if (!reload)
- devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
@@ -2116,7 +2108,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 9e367174743d..6dd4ae2f45f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -17,6 +17,7 @@ struct mlxsw_env_module_info {
bool is_overheat;
int num_ports_mapped;
int num_ports_up;
+ enum ethtool_module_power_mode_policy power_mode_policy;
};
struct mlxsw_env {
@@ -445,6 +446,152 @@ out:
}
EXPORT_SYMBOL(mlxsw_env_reset_module);
+int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ char mcion_pl[MLXSW_REG_MCION_LEN];
+ u32 status_bits;
+ int err;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ params->policy = mlxsw_env->module_info[module].power_mode_policy;
+
+ mlxsw_reg_mcion_pack(mcion_pl, module);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode");
+ goto out;
+ }
+
+ status_bits = mlxsw_reg_mcion_module_status_bits_get(mcion_pl);
+ if (!(status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK))
+ goto out;
+
+ if (status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK)
+ params->mode = ETHTOOL_MODULE_POWER_MODE_LOW;
+ else
+ params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH;
+
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_power_mode);
+
+static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
+ u8 module, bool enable)
+{
+ enum mlxsw_reg_pmaos_admin_status admin_status;
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED :
+ MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED;
+ mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status);
+ mlxsw_reg_pmaos_ase_set(pmaos_pl, true);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
+ u8 module, bool low_power)
+{
+ u16 eeprom_override_mask, eeprom_override;
+ char pmmp_pl[MLXSW_REG_PMMP_LEN];
+
+ mlxsw_reg_pmmp_pack(pmmp_pl, module);
+ mlxsw_reg_pmmp_sticky_set(pmmp_pl, true);
+ /* Mask all the bits except low power mode. */
+ eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK;
+ mlxsw_reg_pmmp_eeprom_override_mask_set(pmmp_pl, eeprom_override_mask);
+ eeprom_override = low_power ? MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK :
+ 0;
+ mlxsw_reg_pmmp_eeprom_override_set(pmmp_pl, eeprom_override);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmmp), pmmp_pl);
+}
+
+static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
+ u8 module, bool low_power,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, module, false);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to disable module");
+ return err;
+ }
+
+ err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode");
+ goto err_module_low_power_set;
+ }
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to enable module");
+ goto err_module_enable_set;
+ }
+
+ return 0;
+
+err_module_enable_set:
+ mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power);
+err_module_low_power_set:
+ mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ return err;
+}
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ bool low_power;
+ int err = 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
+ policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].power_mode_policy == policy)
+ goto out;
+
+ /* If any ports are up, we are already in high power mode. */
+ if (mlxsw_env->module_info[module].num_ports_up)
+ goto out_set_policy;
+
+ low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO;
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power,
+ extack);
+ if (err)
+ goto out;
+
+out_set_policy:
+ mlxsw_env->module_info[module].power_mode_policy = policy;
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_set_module_power_mode);
+
static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
u8 module,
bool *p_has_temp_sensor)
@@ -794,15 +941,33 @@ EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err = 0;
if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
return -EINVAL;
mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_inc;
+
+ if (mlxsw_env->module_info[module].num_ports_up != 0)
+ goto out_inc;
+
+ /* Transition to high power mode following first port using the module
+ * being put administratively up.
+ */
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false,
+ NULL);
+ if (err)
+ goto out_unlock;
+
+out_inc:
mlxsw_env->module_info[module].num_ports_up++;
+out_unlock:
mutex_unlock(&mlxsw_env->module_info_lock);
-
- return 0;
+ return err;
}
EXPORT_SYMBOL(mlxsw_env_module_port_up);
@@ -814,7 +979,22 @@ void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
return;
mutex_lock(&mlxsw_env->module_info_lock);
+
mlxsw_env->module_info[module].num_ports_up--;
+
+ if (mlxsw_env->module_info[module].power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_unlock;
+
+ if (mlxsw_env->module_info[module].num_ports_up != 0)
+ goto out_unlock;
+
+ /* Transition to low power mode following last port using the module
+ * being put administratively down.
+ */
+ __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL);
+
+out_unlock:
mutex_unlock(&mlxsw_env->module_info_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_down);
@@ -824,7 +1004,7 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_env *env;
u8 module_count;
- int err;
+ int i, err;
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
@@ -837,6 +1017,13 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (!env)
return -ENOMEM;
+ /* Firmware defaults to high power mode policy where modules are
+ * transitioned to high power mode following plug-in.
+ */
+ for (i = 0; i < module_count; i++)
+ env->module_info[i].power_mode_policy =
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
+
mutex_init(&env->module_info_lock);
env->core = mlxsw_core;
env->module_count = module_count;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index c486397f5dfe..da121b1a84b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -29,6 +29,16 @@ int mlxsw_env_reset_module(struct net_device *netdev,
u32 *flags);
int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack);
+
+int
mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
u64 *p_counter);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 0998dcc9cac0..b29824448aa8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -24,16 +24,8 @@
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
+#define MLXSW_THERMAL_MIN_STATE 2
#define MLXSW_THERMAL_MAX_DUTY 255
-/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
- * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for
- * setting fan speed dynamic minimum. For example, if value is set to 14 (40%)
- * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
- * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
- */
-#define MLXSW_THERMAL_SPEED_MIN (MLXSW_THERMAL_MAX_STATE + 2)
-#define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2)
-#define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */
/* External cooling devices, allowed for binding to mlxsw thermal zones. */
static char * const mlxsw_thermal_external_allowed_cdev[] = {
@@ -646,49 +638,16 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
struct mlxsw_thermal *thermal = cdev->devdata;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- unsigned long cur_state, i;
int idx;
- u8 duty;
int err;
+ if (state > MLXSW_THERMAL_MAX_STATE)
+ return -EINVAL;
+
idx = mlxsw_get_cooling_device_idx(thermal, cdev);
if (idx < 0)
return idx;
- /* Verify if this request is for changing allowed fan dynamical
- * minimum. If it is - update cooling levels accordingly and update
- * state, if current state is below the newly requested minimum state.
- * For example, if current state is 5, and minimal state is to be
- * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed
- * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be
- * overwritten.
- */
- if (state >= MLXSW_THERMAL_SPEED_MIN &&
- state <= MLXSW_THERMAL_SPEED_MAX) {
- state -= MLXSW_THERMAL_MAX_STATE;
- for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++)
- thermal->cooling_levels[i] = max(state, i);
-
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
- err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
- if (err)
- return err;
-
- duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
- cur_state = mlxsw_duty_to_state(duty);
-
- /* If current fan state is lower than requested dynamical
- * minimum, increase fan speed up to dynamical minimum.
- */
- if (state < cur_state)
- return 0;
-
- state = cur_state;
- }
-
- if (state > MLXSW_THERMAL_MAX_STATE)
- return -EINVAL;
-
/* Normalize the state to the valid speed range. */
state = thermal->cooling_levels[state];
mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
@@ -998,8 +957,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
/* Initialize cooling levels per PWM state. */
for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
- thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
- i);
+ thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
thermal->polling_delay = bus_info->low_frequency ?
MLXSW_THERMAL_SLOW_POLL_INT :
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index e92cadc98128..ab70a873a01a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -270,11 +270,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u8 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val) \
{ \
__mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -290,13 +292,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 \
+static inline u8 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u8 val) \
{ \
@@ -311,11 +313,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u16 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val) \
{ \
__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -331,13 +335,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u16 \
+static inline u16 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u16 val) \
{ \
@@ -352,11 +356,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
{ \
__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -372,13 +378,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u32 \
+static inline u32 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u32 val) \
{ \
@@ -393,11 +399,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u64 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val) \
{ \
__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -413,13 +421,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u64 \
+static inline u64 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u64 val) \
{ \
@@ -433,19 +441,19 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
{ \
__mlxsw_item_memcpy_from(buf, dst, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
{ \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline char * \
+static inline char * __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \
{ \
return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
@@ -460,7 +468,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
unsigned short index, \
char *dst) \
@@ -468,7 +476,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
__mlxsw_item_memcpy_from(buf, dst, \
&__ITEM_NAME(_type, _cname, _iname), index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
unsigned short index, \
const char *src) \
@@ -476,7 +484,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), index); \
} \
-static inline char * \
+static inline char * __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \
{ \
return __mlxsw_item_data(buf, \
@@ -491,14 +499,14 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 \
+static inline u8 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
{ \
return __mlxsw_item_bit_array_get(buf, \
&__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
{ \
return __mlxsw_item_bit_array_set(buf, \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 9644e9c486b8..5d4dfa5ddbb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -145,12 +145,38 @@ static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
flags);
}
+static int
+mlxsw_m_get_module_power_mode(struct net_device *netdev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module,
+ params, extack);
+}
+
+static int
+mlxsw_m_set_module_power_mode(struct net_device *netdev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module,
+ params->policy, extack);
+}
+
static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
.get_drvinfo = mlxsw_m_module_get_drvinfo,
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
.reset = mlxsw_m_reset,
+ .get_module_power_mode = mlxsw_m_get_module_power_mode,
+ .set_module_power_mode = mlxsw_m_set_module_power_mode,
};
static int
@@ -174,20 +200,16 @@ static int
mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
{
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- struct net_device *dev = mlxsw_m_port->dev;
char ppad_pl[MLXSW_REG_PPAD_LEN];
+ u8 addr[ETH_ALEN];
int err;
mlxsw_reg_ppad_pack(ppad_pl, false, 0);
err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(ppad), ppad_pl);
if (err)
return err;
- mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
- /* The last byte value in base mac address is guaranteed
- * to be such it does not overflow when adding local_port
- * value.
- */
- dev->dev_addr[ETH_ALEN - 1] += mlxsw_m_port->module + 1;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, addr);
+ eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6c96e124e4c8..48b817ba6d4e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4951,7 +4951,7 @@ enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
- MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
+ MLXSW_REG_PPCNT_TC_CONG_CNT = 0x13,
};
/* reg_ppcnt_grp
@@ -5371,7 +5371,7 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_duration,
MLXSW_ITEM64(reg, ppcnt, tx_pause_transition,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
-/* Ethernet Per Traffic Group Counters */
+/* Ethernet Per Traffic Class Counters */
/* reg_ppcnt_tc_transmit_queue
* Contains the transmit queue depth in cells of traffic class
@@ -5398,6 +5398,12 @@ MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc,
MLXSW_ITEM64(reg, ppcnt, wred_discard,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+/* reg_ppcnt_ecn_marked_tc
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
enum mlxsw_reg_ppcnt_grp grp,
u8 prio_tc)
@@ -5946,6 +5952,55 @@ static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port,
mlxsw_reg_pddr_page_select_set(payload, page_select);
}
+/* PMMP - Port Module Memory Map Properties Register
+ * -------------------------------------------------
+ * The PMMP register allows to override the module memory map advertisement.
+ * The register can only be set when the module is disabled by PMAOS register.
+ */
+#define MLXSW_REG_PMMP_ID 0x5044
+#define MLXSW_REG_PMMP_LEN 0x2C
+
+MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN);
+
+/* reg_pmmp_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8);
+
+/* reg_pmmp_sticky
+ * When set, will keep eeprom_override values after plug-out event.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmmp, sticky, 0x00, 0, 1);
+
+/* reg_pmmp_eeprom_override_mask
+ * Write mask bit (negative polarity).
+ * 0 - Allow write
+ * 1 - Ignore write
+ * On write, indicates which of the bits from eeprom_override field are
+ * updated.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmmp, eeprom_override_mask, 0x04, 16, 16);
+
+enum {
+ /* Set module to low power mode */
+ MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK = BIT(8),
+};
+
+/* reg_pmmp_eeprom_override
+ * Override / ignore EEPROM advertisement properties bitmask
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16);
+
+static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module)
+{
+ MLXSW_REG_ZERO(pmmp, payload);
+ mlxsw_reg_pmmp_module_set(payload, module);
+}
+
/* PLLP - Port Local port to Label Port mapping Register
* -----------------------------------------------------
* The PLLP register returns the mapping from Local Port into Label Port.
@@ -6734,6 +6789,23 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload,
mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip);
}
+static inline void
+mlxsw_reg_ritr_loopback_ipip6_pack(char *payload,
+ enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
+ enum mlxsw_reg_ritr_loopback_ipip_options options,
+ u16 uvr_id, u16 underlay_rif,
+ const struct in6_addr *usip, u32 gre_key)
+{
+ enum mlxsw_reg_ritr_loopback_protocol protocol =
+ MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6;
+
+ mlxsw_reg_ritr_loopback_protocol_set(payload, protocol);
+ mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options,
+ uvr_id, underlay_rif, gre_key);
+ mlxsw_reg_ritr_loopback_ipip_usip6_memcpy_to(payload,
+ (const char *)usip);
+}
+
/* RTAR - Router TCAM Allocation Register
* --------------------------------------
* This register is used for allocation of regions in the TCAM table.
@@ -7002,6 +7074,12 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip)
mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip);
}
+static inline void mlxsw_reg_ratr_ipip6_entry_pack(char *payload, u32 ipv6_ptr)
+{
+ mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV6);
+ mlxsw_reg_ratr_ipip_ipv6_ptr_set(payload, ipv6_ptr);
+}
+
static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
bool counter_enable)
{
@@ -8187,19 +8265,71 @@ static inline void mlxsw_reg_rtdp_pack(char *payload,
}
static inline void
-mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
- enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
- unsigned int type_check, bool gre_key_check,
- u32 ipv4_usip, u32 expected_gre_key)
+mlxsw_reg_rtdp_ipip_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 expected_gre_key)
{
mlxsw_reg_rtdp_ipip_irif_set(payload, irif);
mlxsw_reg_rtdp_ipip_sip_check_set(payload, sip_check);
mlxsw_reg_rtdp_ipip_type_check_set(payload, type_check);
mlxsw_reg_rtdp_ipip_gre_key_check_set(payload, gre_key_check);
- mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip);
mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key);
}
+static inline void
+mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv4_usip, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip);
+}
+
+static inline void
+mlxsw_reg_rtdp_ipip6_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv6_usip_ptr, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv6_usip_ptr_set(payload, ipv6_usip_ptr);
+}
+
+/* RIPS - Router IP version Six Register
+ * -------------------------------------
+ * The RIPS register is used to store IPv6 addresses for use by the NVE and
+ * IPinIP
+ */
+#define MLXSW_REG_RIPS_ID 0x8021
+#define MLXSW_REG_RIPS_LEN 0x14
+
+MLXSW_REG_DEFINE(rips, MLXSW_REG_RIPS_ID, MLXSW_REG_RIPS_LEN);
+
+/* reg_rips_index
+ * Index to IPv6 address.
+ * For Spectrum, the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rips, index, 0x00, 0, 24);
+
+/* reg_rips_ipv6
+ * IPv6 address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rips, ipv6, 0x04, 16);
+
+static inline void mlxsw_reg_rips_pack(char *payload, u32 index,
+ const struct in6_addr *ipv6)
+{
+ MLXSW_REG_ZERO(rips, payload);
+ mlxsw_reg_rips_index_set(payload, index);
+ mlxsw_reg_rips_ipv6_memcpy_to(payload, (const char *)ipv6);
+}
+
/* RATRAD - Router Adjacency Table Activity Dump Register
* ------------------------------------------------------
* The RATRAD register is used to dump and optionally clear activity bits of
@@ -10278,6 +10408,39 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MCION - Management Cable IO and Notifications Register
+ * ------------------------------------------------------
+ * The MCION register is used to query transceiver modules' IO pins and other
+ * notifications.
+ */
+#define MLXSW_REG_MCION_ID 0x9052
+#define MLXSW_REG_MCION_LEN 0x18
+
+MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN);
+
+/* reg_mcion_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8);
+
+enum {
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0),
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8),
+};
+
+/* reg_mcion_module_status_bits
+ * Module IO status as defined by SFF.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16);
+
+static inline void mlxsw_reg_mcion_pack(char *payload, u8 module)
+{
+ MLXSW_REG_ZERO(mcion, payload);
+ mlxsw_reg_mcion_module_set(payload, module);
+}
+
/* MTPPS - Management Pulse Per Second Register
* --------------------------------------------
* This register provides the device PPS capabilities, configure the PPS in and
@@ -12273,6 +12436,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pmtdb),
MLXSW_REG(pmpe),
MLXSW_REG(pddr),
+ MLXSW_REG(pmmp),
MLXSW_REG(pllp),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
@@ -12281,6 +12445,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(rips),
MLXSW_REG(ratrad),
MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
@@ -12320,6 +12485,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mgir),
MLXSW_REG(mrsr),
MLXSW_REG(mlcr),
+ MLXSW_REG(mcion),
MLXSW_REG(mtpps),
MLXSW_REG(mtutc),
MLXSW_REG(mpsc),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 0e81ae723bc8..66c346a86ec5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -316,11 +316,11 @@ static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
- ether_addr_copy(addr, mlxsw_sp->base_mac);
- addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
- return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
+ eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
+ mlxsw_sp_port->local_port);
+ return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
+ mlxsw_sp_port->dev->dev_addr);
}
static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
@@ -674,7 +674,7 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
if (err)
return err;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -824,12 +824,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
for (i = 0; i < TC_MAX_QUEUE; i++) {
err = mlxsw_sp_port_get_stats_raw(dev,
- MLXSW_REG_PPCNT_TC_CONG_TC,
+ MLXSW_REG_PPCNT_TC_CONG_CNT,
i, ppcnt_pl);
- if (!err)
- xstats->wred_drop[i] =
- mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+ if (err)
+ goto tc_cnt;
+
+ xstats->wred_drop[i] =
+ mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+ xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
+tc_cnt:
err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
i, ppcnt_pl);
if (err)
@@ -1035,6 +1039,8 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
+ case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
+ return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 83ab1ea92d31..3ab57e98cad2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -285,6 +285,7 @@ struct mlxsw_sp_port_vlan {
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
+ u64 tc_ecn[TC_MAX_QUEUE];
u64 wred_drop[TC_MAX_QUEUE];
u64 tail_drop[TC_MAX_QUEUE];
u64 backlog[TC_MAX_QUEUE];
@@ -747,6 +748,7 @@ enum mlxsw_sp_kvdl_entry_type {
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS,
MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
};
@@ -758,6 +760,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET:
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS:
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS:
case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT:
default:
return 1;
@@ -1193,6 +1196,8 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_fifo_qopt_offload *p);
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f);
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 3a73d654017f..10ae1115de6c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
MAX_KVD_ACTION_SETS),
MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(IPV6_ADDRESS, 0x28, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE),
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 9de160e740b2..d78cf5a7220a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1583,7 +1583,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
- unsigned long cb_priv;
+ unsigned long cb_priv = 0;
LIST_HEAD(bulk_list);
char *sbsr_pl;
u8 masked_count;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 06f1645561c6..84d4460f3dcd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -96,6 +96,9 @@ mlxsw_sp_link_ext_state_opcode_map[] = {
{1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0},
{1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0},
+
+ {1042, ETHTOOL_LINK_EXT_STATE_MODULE,
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY},
};
static void
@@ -124,6 +127,10 @@ mlxsw_sp_port_set_link_ext_state(struct mlxsw_sp_ethtool_link_ext_state_opcode_m
link_ext_state_info->cable_issue =
link_ext_state_mapping.link_ext_substate;
break;
+ case ETHTOOL_LINK_EXT_STATE_MODULE:
+ link_ext_state_info->module =
+ link_ext_state_mapping.link_ext_substate;
+ break;
default:
break;
}
@@ -1206,6 +1213,32 @@ static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags);
}
+static int
+mlxsw_sp_get_module_power_mode(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params,
+ extack);
+}
+
+static int
+mlxsw_sp_set_module_power_mode(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module,
+ params->policy, extack);
+}
+
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.cap_link_lanes_supported = true,
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
@@ -1228,6 +1261,8 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats,
.get_rmon_stats = mlxsw_sp_get_rmon_stats,
.reset = mlxsw_sp_reset,
+ .get_module_power_mode = mlxsw_sp_get_module_power_mode,
+ .set_module_power_mode = mlxsw_sp_set_module_power_mode,
};
struct mlxsw_sp1_port_link_mode {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 5facabd86882..ad3926de88f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -24,50 +24,72 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms)
{
- return !!(parms.i_flags & TUNNEL_KEY);
+ return !!(parms->i_flags & TUNNEL_KEY);
}
-static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms.o_flags & TUNNEL_KEY);
+ return !!(parms->i_flags & TUNNEL_KEY);
}
-static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms)
{
return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
- be32_to_cpu(parms.i_key) : 0;
+ be32_to_cpu(parms->i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_ikey(parms) ?
+ be32_to_cpu(parms->i_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms)
{
return mlxsw_sp_ipip_parms4_has_okey(parms) ?
- be32_to_cpu(parms.o_key) : 0;
+ be32_to_cpu(parms->o_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_okey(parms) ?
+ be32_to_cpu(parms->o_key) : 0;
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr };
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms)
+mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->laddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr };
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms)
+mlxsw_sp_ipip_parms6_daddr(const struct __ip6_tnl_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->raddr };
}
union mlxsw_sp_l3addr
@@ -80,10 +102,10 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_saddr(parms4);
+ return mlxsw_sp_ipip_parms4_saddr(&parms4);
case MLXSW_SP_L3_PROTO_IPV6:
parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
- return mlxsw_sp_ipip_parms6_saddr(parms6);
+ return mlxsw_sp_ipip_parms6_saddr(&parms6);
}
WARN_ON(1);
@@ -95,7 +117,7 @@ static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_daddr(parms4).addr4;
+ return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4;
}
static union mlxsw_sp_l3addr
@@ -108,10 +130,10 @@ mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_daddr(parms4);
+ return mlxsw_sp_ipip_parms4_daddr(&parms4);
case MLXSW_SP_L3_PROTO_IPV6:
parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
- return mlxsw_sp_ipip_parms6_daddr(parms6);
+ return mlxsw_sp_ipip_parms6_daddr(&parms6);
}
WARN_ON(1);
@@ -125,6 +147,21 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
return !memcmp(&addr, &naddr, sizeof(naddr));
}
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev)
+{
+ struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV4,
+ .saddr = mlxsw_sp_ipip_parms4_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms4_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms4_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
+ };
+}
+
static int
mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -158,8 +195,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
u32 ikey;
parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
- has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms);
- ikey = mlxsw_sp_ipip_parms4_ikey(parms);
+ has_ikey = mlxsw_sp_ipip_parms4_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms4_ikey(&parms);
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
@@ -218,12 +255,12 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
- lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ?
+ lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ?
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
return (struct mlxsw_sp_rif_ipip_lb_config){
.lb_ipipt = lb_ipipt,
- .okey = mlxsw_sp_ipip_parms4_okey(parms),
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
.ul_protocol = MLXSW_SP_L3_PROTO_IPV4,
.saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ol_dev),
@@ -231,48 +268,39 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_ipip_entry *ipip_entry,
- struct netlink_ext_ack *extack)
+mlxsw_sp_ipip_ol_netdev_change_gre(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ const struct mlxsw_sp_ipip_parms *new_parms,
+ struct netlink_ext_ack *extack)
{
- union mlxsw_sp_l3addr old_saddr, new_saddr;
- union mlxsw_sp_l3addr old_daddr, new_daddr;
- struct ip_tunnel_parm new_parms;
+ const struct mlxsw_sp_ipip_parms *old_parms = &ipip_entry->parms;
bool update_tunnel = false;
bool update_decap = false;
bool update_nhs = false;
int err = 0;
- new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
-
- new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms);
- old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4);
- new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms);
- old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4);
-
- if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
+ if (!mlxsw_sp_l3addr_eq(&new_parms->saddr, &old_parms->saddr)) {
u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
/* Since the local address has changed, if there is another
* tunnel with a matching saddr, both need to be demoted.
*/
if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp,
- MLXSW_SP_L3_PROTO_IPV4,
- new_saddr, ul_tb_id,
+ new_parms->proto,
+ new_parms->saddr,
+ ul_tb_id,
ipip_entry)) {
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
return 0;
}
update_tunnel = true;
- } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) !=
- mlxsw_sp_ipip_parms4_okey(new_parms)) ||
- ipip_entry->parms4.link != new_parms.link) {
+ } else if (old_parms->okey != new_parms->okey ||
+ old_parms->link != new_parms->link) {
update_tunnel = true;
- } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
+ } else if (!mlxsw_sp_l3addr_eq(&new_parms->daddr, &old_parms->daddr)) {
update_nhs = true;
- } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) !=
- mlxsw_sp_ipip_parms4_ikey(new_parms)) {
+ } else if (old_parms->ikey != new_parms->ikey) {
update_decap = true;
}
@@ -288,23 +316,308 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
false, false, false,
extack);
+ if (err)
+ return err;
- ipip_entry->parms4 = new_parms;
- return err;
+ ipip_entry->parms = *new_parms;
+ return 0;
+}
+
+static int
+mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp_ipip_netdev_parms_init_gre4(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp_ipip_rem_addr_set_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp_ipip_rem_addr_unset_gre4(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.dev_type = ARPHRD_IPGRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV4,
+ .inc_parsing_depth = false,
+ .parms_init = mlxsw_sp_ipip_netdev_parms_init_gre4,
.nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4,
.decap_config = mlxsw_sp_ipip_decap_config_gre4,
.can_offload = mlxsw_sp_ipip_can_offload_gre4,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
.ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
+ .rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre4,
+ .rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre4,
};
-const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp1_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_parms parms = {0};
+
+ WARN_ON_ONCE(1);
+ return parms;
+}
+
+static int
+mlxsw_sp1_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static bool mlxsw_sp1_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ return false;
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp1_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_rif_ipip_lb_config config = {0};
+
+ WARN_ON_ONCE(1);
+ return config;
+}
+
+static int
+mlxsw_sp1_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static void
+mlxsw_sp1_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp1_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp1_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp1_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp1_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp1_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp1_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp1_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp1_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = {
[MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops,
+};
+
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_parms6_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms6_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms6_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ enum mlxsw_reg_ratr_op op;
+
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP,
+ adj_index, rif_index);
+ mlxsw_reg_ratr_ipip6_entry_pack(ratr_pl,
+ ipip_entry->dip_kvdl_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int
+mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+ struct __ip6_tnl_parm parms;
+ unsigned int type_check;
+ bool has_ikey;
+ u32 ikey;
+
+ parms = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ has_ikey = mlxsw_sp_ipip_parms6_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms6_ikey(&parms);
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
+
+ type_check = has_ikey ?
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY :
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE;
+
+ /* Linux demuxes tunnels based on packet SIP (which must match tunnel
+ * remote IP). Thus configure decap so that it filters out packets that
+ * are not IPv6 or have the wrong SIP. IPIP_DECAP_ERROR trap is
+ * generated for packets that fail this criterion. Linux then handles
+ * such packets in slow path and generates ICMP destination unreachable.
+ */
+ mlxsw_reg_rtdp_ipip6_pack(rtdp_pl, rif_index,
+ MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6,
+ type_check, has_ikey,
+ ipip_entry->dip_kvdl_index, ikey);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
+ bool inherit_ttl = tparm.hop_limit == 0;
+ __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
+
+ return (tparm.i_flags & ~okflags) == 0 &&
+ (tparm.o_flags & ~okflags) == 0 &&
+ inherit_ttl && inherit_tos &&
+ mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev);
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
+
+ lb_ipipt = mlxsw_sp_ipip_parms6_has_okey(&parms) ?
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
+ return (struct mlxsw_sp_rif_ipip_lb_config){
+ .lb_ipipt = lb_ipipt,
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ .ul_protocol = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV6,
+ ol_dev),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp2_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ char rips_pl[MLXSW_REG_RIPS_LEN];
+ struct __ip6_tnl_parm parms6;
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ &ipip_entry->dip_kvdl_index);
+ if (err)
+ return err;
+
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index,
+ &parms6.raddr);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
+ if (err)
+ goto err_rips_write;
+
+ return 0;
+
+err_rips_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ ipip_entry->dip_kvdl_index);
+ return err;
+}
+
+static void
+mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ ipip_entry->dip_kvdl_index);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp2_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp2_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp2_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp2_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp2_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp2_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp2_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp2_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[] = {
+ [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp2_ipip_gre6_ops,
};
static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp,
@@ -363,3 +676,22 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
+{
+ struct net *net = dev_net(ol_dev);
+ struct ip_tunnel *tun4;
+ struct ip6_tnl *tun6;
+
+ switch (ol_dev->type) {
+ case ARPHRD_IPGRE:
+ tun4 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun4->parms.link);
+ case ARPHRD_IP6GRE:
+ tun6 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun6->parms.link);
+ default:
+ return NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index f0837b42d1d6..8cc259dcc8d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -7,6 +7,7 @@
#include "spectrum_router.h"
#include <net/ip_fib.h>
#include <linux/if_tunnel.h>
+#include <net/ip6_tunnel.h>
struct ip_tunnel_parm
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
@@ -21,23 +22,36 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr);
enum mlxsw_sp_ipip_type {
MLXSW_SP_IPIP_TYPE_GRE4,
+ MLXSW_SP_IPIP_TYPE_GRE6,
MLXSW_SP_IPIP_TYPE_MAX,
};
+struct mlxsw_sp_ipip_parms {
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr saddr;
+ union mlxsw_sp_l3addr daddr;
+ int link;
+ u32 ikey;
+ u32 okey;
+};
+
struct mlxsw_sp_ipip_entry {
enum mlxsw_sp_ipip_type ipipt;
struct net_device *ol_dev; /* Overlay. */
struct mlxsw_sp_rif_ipip_lb *ol_lb;
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
- union {
- struct ip_tunnel_parm parms4;
- };
+ struct mlxsw_sp_ipip_parms parms;
+ u32 dip_kvdl_index;
};
struct mlxsw_sp_ipip_ops {
int dev_type;
enum mlxsw_sp_l3proto ul_proto; /* Underlay. */
+ bool inc_parsing_depth;
+
+ struct mlxsw_sp_ipip_parms
+ (*parms_init)(const struct net_device *ol_dev);
int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -58,8 +72,13 @@ struct mlxsw_sp_ipip_ops {
int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct netlink_ext_ack *extack);
+ int (*rem_ip_addr_set)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry);
+ void (*rem_ip_addr_unset)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry);
};
-extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[];
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[];
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[];
#endif /* _MLXSW_IPIP_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 9958d503bf0e..ddb5ad88b350 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -50,12 +50,24 @@ struct mlxsw_sp_qdisc_ops {
struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u32 parent);
unsigned int num_classes;
+
+ u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+ int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+};
+
+struct mlxsw_sp_qdisc_ets_band {
+ u8 prio_bitmap;
+ int tclass_num;
+};
+
+struct mlxsw_sp_qdisc_ets_data {
+ struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
};
struct mlxsw_sp_qdisc {
u32 handle;
- int tclass_num;
- u8 prio_bitmap;
union {
struct red_stats red;
} xstats_base;
@@ -67,6 +79,10 @@ struct mlxsw_sp_qdisc {
u64 backlog;
} stats_base;
+ union {
+ struct mlxsw_sp_qdisc_ets_data *ets_data;
+ };
+
struct mlxsw_sp_qdisc_ops *ops;
struct mlxsw_sp_qdisc *parent;
struct mlxsw_sp_qdisc *qdiscs;
@@ -141,8 +157,7 @@ mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
}
static struct mlxsw_sp_qdisc *
-mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
- bool root_only)
+mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
@@ -150,8 +165,6 @@ mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
return NULL;
if (parent == TC_H_ROOT)
return &qdisc_state->root_qdisc;
- if (root_only)
- return NULL;
return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
mlxsw_sp_qdisc_walk_cb_find, &parent);
}
@@ -187,6 +200,32 @@ mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
}
+static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return 0xff;
+ if (!parent->ops->get_prio_bitmap)
+ return mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, parent);
+ return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
+}
+
+#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+
+static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return MLXSW_SP_PORT_DEFAULT_TCLASS;
+ if (!parent->ops->get_tclass_num)
+ return mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, parent);
+ return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
+}
+
static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
@@ -194,6 +233,7 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
int err_hdroom = 0;
int err = 0;
+ int i;
if (!mlxsw_sp_qdisc)
return 0;
@@ -211,6 +251,9 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
if (!mlxsw_sp_qdisc->ops)
return 0;
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_qdisc->qdiscs[i]);
mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
if (mlxsw_sp_qdisc->ops->destroy)
err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
@@ -226,6 +269,78 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
return err_hdroom ?: err;
}
+struct mlxsw_sp_qdisc_tree_validate {
+ bool forbid_ets;
+ bool forbid_tbf;
+ bool forbid_red;
+};
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate);
+
+static int
+mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
+ err = __mlxsw_sp_qdisc_tree_validate(&mlxsw_sp_qdisc->qdiscs[i],
+ validate);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ if (!mlxsw_sp_qdisc->ops)
+ return 0;
+
+ switch (mlxsw_sp_qdisc->ops->type) {
+ case MLXSW_SP_QDISC_FIFO:
+ break;
+ case MLXSW_SP_QDISC_RED:
+ if (validate.forbid_red)
+ return -EINVAL;
+ validate.forbid_red = true;
+ validate.forbid_ets = true;
+ break;
+ case MLXSW_SP_QDISC_TBF:
+ if (validate.forbid_tbf)
+ return -EINVAL;
+ validate.forbid_tbf = true;
+ validate.forbid_ets = true;
+ break;
+ case MLXSW_SP_QDISC_PRIO:
+ case MLXSW_SP_QDISC_ETS:
+ if (validate.forbid_ets)
+ return -EINVAL;
+ validate.forbid_ets = true;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return mlxsw_sp_qdisc_tree_validate_children(mlxsw_sp_qdisc, validate);
+}
+
+static int mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_qdisc_tree_validate validate = {};
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ return __mlxsw_sp_qdisc_tree_validate(mlxsw_sp_qdisc, validate);
+}
+
static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -268,6 +383,10 @@ static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->num_classes = ops->num_classes;
mlxsw_sp_qdisc->ops = ops;
mlxsw_sp_qdisc->handle = handle;
+ err = mlxsw_sp_qdisc_tree_validate(mlxsw_sp_port);
+ if (err)
+ goto err_replace;
+
err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
goto err_replace;
@@ -406,13 +525,17 @@ mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *p_tx_bytes, u64 *p_tx_packets,
u64 *p_drops, u64 *p_backlog)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
u64 tx_bytes, tx_packets;
+ u8 prio_bitmap;
+ int tclass_num;
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
&tx_packets, &tx_bytes);
*p_tx_packets += tx_packets;
@@ -506,19 +629,24 @@ static void
mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *red_base;
+ u8 prio_bitmap;
+ int tclass_num;
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
&stats_base->tx_packets,
&stats_base->tx_bytes);
+ red_base->prob_mark = xstats->tc_ecn[tclass_num];
red_base->prob_drop = xstats->wred_drop[tclass_num];
red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
@@ -532,8 +660,10 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
- mlxsw_sp_qdisc->tclass_num);
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
}
static int
@@ -564,15 +694,33 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc);
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle);
+
+static int
mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct tc_red_qopt_offload_params *p = params;
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num;
u32 min, max;
u64 prob;
+ int err;
+
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
/* calculate probability in percentage */
prob = p->probability;
@@ -615,22 +763,27 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
void *xstats_ptr)
{
struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *res = xstats_ptr;
- int early_drops, pdrops;
+ int early_drops, marks, pdrops;
+ int tclass_num;
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
+ res->prob_mark += marks;
xstats_base->pdrop += pdrops;
xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
@@ -639,16 +792,19 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
u64 overlimits;
+ int tclass_num;
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
- overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
+ overlimits = xstats->wred_drop[tclass_num] +
+ xstats->tc_ecn[tclass_num] - stats_base->overlimits;
stats_ptr->qstats->overlimits += overlimits;
stats_base->overlimits += overlimits;
@@ -660,11 +816,12 @@ static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u32 parent)
{
- return NULL;
+ /* RED and TBF are formally classful qdiscs, but all class references,
+ * including X:0, just refer to the same one class.
+ */
+ return &mlxsw_sp_qdisc->qdiscs[0];
}
-#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
-
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.type = MLXSW_SP_QDISC_RED,
.check_params = mlxsw_sp_qdisc_red_check_params,
@@ -675,14 +832,19 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
.find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
};
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle);
+
static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -704,6 +866,9 @@ static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_RED_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_RED_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
default:
return -EOPNOTSUPP;
}
@@ -744,9 +909,12 @@ static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- mlxsw_sp_qdisc->tclass_num, 0,
+ tclass_num, 0,
MLXSW_REG_QEEC_MAS_DIS, 0);
}
@@ -830,9 +998,19 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
{
struct tc_tbf_qopt_offload_replace_params *p = params;
u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ int tclass_num;
u8 burst_size;
int err;
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
if (WARN_ON_ONCE(err))
/* check_params above was supposed to reject this value. */
@@ -848,7 +1026,7 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
*/
return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- mlxsw_sp_qdisc->tclass_num, 0,
+ tclass_num, 0,
rate_kbps, burst_size);
}
@@ -881,6 +1059,7 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
.find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
};
static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -888,7 +1067,7 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -907,6 +1086,9 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_TBF_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_TBF_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
default:
return -EOPNOTSUPP;
}
@@ -957,6 +1139,32 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
};
+static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ if (handle == qdisc_state->future_handle &&
+ qdisc_state->future_fifos[band])
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
+ child_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo,
+ NULL);
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ qdisc_state->future_handle = handle;
+ memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
+}
+
static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_fifo_qopt_offload *p)
{
@@ -965,16 +1173,15 @@ static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned int band;
u32 parent_handle;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
parent_handle = TC_H_MAJ(p->parent);
if (parent_handle != qdisc_state->future_handle) {
/* This notifications is for a different Qdisc than
* previously. Wipe the future cache.
*/
- memset(qdisc_state->future_fifos, 0,
- sizeof(qdisc_state->future_fifos));
- qdisc_state->future_handle = parent_handle;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
+ parent_handle);
}
band = TC_H_MIN(p->parent) - 1;
@@ -1033,11 +1240,10 @@ static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0, false, 0);
- mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_qdisc->qdiscs[i]);
- mlxsw_sp_qdisc->qdiscs[i].prio_bitmap = 0;
}
+ kfree(mlxsw_sp_qdisc->ets_data);
+ mlxsw_sp_qdisc->ets_data = NULL;
return 0;
}
@@ -1066,6 +1272,31 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
return __mlxsw_sp_qdisc_ets_check_params(p->bands);
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *mlxsw_sp_port)
+{
+ u64 backlog;
+
+ if (mlxsw_sp_qdisc->ops) {
+ backlog = mlxsw_sp_qdisc->stats_base.backlog;
+ if (mlxsw_sp_qdisc->ops->clean_stats)
+ mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ mlxsw_sp_qdisc->stats_base.backlog = backlog;
+ }
+
+ return NULL;
+}
+
+static void
+mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
+ mlxsw_sp_port);
+}
+
static int
__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -1074,69 +1305,80 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
const unsigned int *weights,
const u8 *priomap)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
+ struct mlxsw_sp_qdisc_ets_band *ets_band;
struct mlxsw_sp_qdisc *child_qdisc;
- int tclass, i, band, backlog;
- u8 old_priomap;
+ u8 old_priomap, new_priomap;
+ int i, band;
int err;
+ if (!ets_data) {
+ ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL);
+ if (!ets_data)
+ return -ENOMEM;
+ mlxsw_sp_qdisc->ets_data = ets_data;
+
+ for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+
+ ets_band = &ets_data->bands[band];
+ ets_band->tclass_num = tclass_num;
+ }
+ }
+
for (band = 0; band < nbands; band++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ int tclass_num;
+
child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
- old_priomap = child_qdisc->prio_bitmap;
- child_qdisc->prio_bitmap = 0;
+ ets_band = &ets_data->bands[band];
+
+ tclass_num = ets_band->tclass_num;
+ old_priomap = ets_band->prio_bitmap;
+ new_priomap = 0;
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- tclass, 0, !!quanta[band],
+ tclass_num, 0, !!quanta[band],
weights[band]);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (priomap[i] == band) {
- child_qdisc->prio_bitmap |= BIT(i);
+ new_priomap |= BIT(i);
if (BIT(i) & old_priomap)
continue;
err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
- i, tclass);
+ i, tclass_num);
if (err)
return err;
}
}
- child_qdisc->tclass_num = tclass;
+ ets_band->prio_bitmap = new_priomap;
- if (old_priomap != child_qdisc->prio_bitmap &&
- child_qdisc->ops && child_qdisc->ops->clean_stats) {
- backlog = child_qdisc->stats_base.backlog;
- child_qdisc->ops->clean_stats(mlxsw_sp_port,
- child_qdisc);
- child_qdisc->stats_base.backlog = backlog;
- }
+ if (old_priomap != new_priomap)
+ mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
+ child_qdisc);
- if (handle == qdisc_state->future_handle &&
- qdisc_state->future_fifos[band]) {
- err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
- child_qdisc,
- &mlxsw_sp_qdisc_ops_fifo,
- NULL);
- if (err)
- return err;
- }
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
+ band, child_qdisc);
+ if (err)
+ return err;
}
for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ ets_band = &ets_data->bands[band];
+ ets_band->prio_bitmap = 0;
+
child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
- child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
+
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- tclass, 0, false, 0);
+ ets_band->tclass_num, 0, false, 0);
}
- qdisc_state->future_handle = TC_H_UNSPEC;
- memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
return 0;
}
@@ -1238,6 +1480,31 @@ mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
return &mlxsw_sp_qdisc->qdiscs[band];
}
+static struct mlxsw_sp_qdisc_ets_band *
+mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
+
+ if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
+ band = 0;
+ return &mlxsw_sp_qdisc->ets_data->bands[band];
+}
+
+static u8
+mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
+}
+
+static int
+mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
+}
+
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.type = MLXSW_SP_QDISC_PRIO,
.check_params = mlxsw_sp_qdisc_prio_check_params,
@@ -1248,6 +1515,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
.find_class = mlxsw_sp_qdisc_prio_find_class,
.num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
};
static int
@@ -1299,6 +1568,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
.find_class = mlxsw_sp_qdisc_prio_find_class,
.num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
};
/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
@@ -1326,10 +1597,9 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
* grafted corresponds to the parent handle. If the two don't match, we
* unoffload the child.
*/
-static int
-__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- u8 band, u32 child_handle)
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle)
{
struct mlxsw_sp_qdisc *old_qdisc;
u32 parent;
@@ -1362,21 +1632,12 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static int
-mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- struct tc_prio_qopt_offload_graft_params *p)
-{
- return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- p->band, p->child_handle);
-}
-
static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -1396,8 +1657,9 @@ static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
case TC_PRIO_GRAFT:
- return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- &p->graft_params);
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
default:
return -EOPNOTSUPP;
}
@@ -1420,7 +1682,7 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -1440,9 +1702,9 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
case TC_ETS_GRAFT:
- return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- p->graft_params.band,
- p->graft_params.child_handle);
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
default:
return -EOPNOTSUPP;
}
@@ -1472,6 +1734,7 @@ struct mlxsw_sp_qevent_binding {
u32 handle;
int tclass_num;
enum mlxsw_sp_span_trigger span_trigger;
+ unsigned int action_mask;
};
static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
@@ -1482,8 +1745,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_agent_parms *agent_parms,
int *p_span_id)
{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ bool ingress;
int span_id;
int err;
@@ -1491,18 +1756,19 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
if (err)
goto err_analyzed_port_get;
trigger_parms.span_id = span_id;
trigger_parms.probability_rate = 1;
- err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
if (err)
goto err_agent_bind;
- err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
+ err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
qevent_binding->tclass_num);
if (err)
goto err_trigger_enable;
@@ -1511,10 +1777,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
return 0;
err_trigger_enable:
- mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
err_agent_bind:
- mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
err_analyzed_port_get:
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
return err;
@@ -1524,16 +1790,20 @@ static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_qevent_binding *qevent_binding,
int span_id)
{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {
.span_id = span_id,
};
+ bool ingress;
- mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
+
+ mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
qevent_binding->tclass_num);
- mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
- mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
}
@@ -1583,10 +1853,17 @@ static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
}
-static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mall_entry *mall_entry,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int
+mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
{
+ if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
+ NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
+ return -EOPNOTSUPP;
+ }
+
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
@@ -1614,15 +1891,17 @@ static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
}
}
-static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int
+mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_mall_entry *mall_entry;
int err;
list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
- qevent_binding);
+ qevent_binding, extack);
if (err)
goto err_entry_configure;
}
@@ -1646,13 +1925,17 @@ static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qe
qevent_binding);
}
-static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
+static int
+mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_qevent_binding *qevent_binding;
int err;
list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
- err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+ err = mlxsw_sp_qevent_binding_configure(qevent_block,
+ qevent_binding,
+ extack);
if (err)
goto err_binding_configure;
}
@@ -1737,7 +2020,7 @@ static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
- err = mlxsw_sp_qevent_block_configure(qevent_block);
+ err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
if (err)
goto err_block_configure;
@@ -1825,7 +2108,8 @@ static void mlxsw_sp_qevent_block_release(void *cb_priv)
static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
- enum mlxsw_sp_span_trigger span_trigger)
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
struct mlxsw_sp_qevent_binding *binding;
@@ -1837,6 +2121,7 @@ mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
binding->handle = handle;
binding->tclass_num = tclass_num;
binding->span_trigger = span_trigger;
+ binding->action_mask = action_mask;
return binding;
}
@@ -1862,9 +2147,11 @@ mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
return NULL;
}
-static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f,
- enum mlxsw_sp_span_trigger span_trigger)
+static int
+mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_qevent_binding *qevent_binding;
@@ -1872,6 +2159,7 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po
struct flow_block_cb *block_cb;
struct mlxsw_sp_qdisc *qdisc;
bool register_block = false;
+ int tclass_num;
int err;
block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
@@ -1904,14 +2192,19 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po
goto err_binding_exists;
}
- qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
- qdisc->tclass_num, span_trigger);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
+ qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
+ f->sch->handle,
+ tclass_num,
+ span_trigger,
+ action_mask);
if (IS_ERR(qevent_binding)) {
err = PTR_ERR(qevent_binding);
goto err_binding_create;
}
- err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+ err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
+ f->extack);
if (err)
goto err_binding_configure;
@@ -1963,15 +2256,19 @@ static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp
}
}
-static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f,
- enum mlxsw_sp_span_trigger span_trigger)
+static int
+mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
+ return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
+ span_trigger,
+ action_mask);
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
return 0;
@@ -1983,7 +2280,22 @@ static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
- return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
+ BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
+ action_mask);
+}
+
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_ECN,
+ action_mask);
}
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1995,8 +2307,6 @@ int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
return -ENOMEM;
mutex_init(&qdisc_state->lock);
- qdisc_state->root_qdisc.prio_bitmap = 0xff;
- qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
mlxsw_sp_port->qdisc = qdisc_state;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 331d26c1181e..1e141b5944cd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -115,6 +115,7 @@ struct mlxsw_sp_rif_ops {
struct mlxsw_sp_router_ops {
int (*init)(struct mlxsw_sp *mlxsw_sp);
+ int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
};
static struct mlxsw_sp_rif *
@@ -1055,22 +1056,13 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->router->vrs);
}
-static struct net_device *
-__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
-{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
- struct net *net = dev_net(ol_dev);
-
- return dev_get_by_index_rcu(net, tun->parms.link);
-}
-
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
struct net_device *d;
u32 tb_id;
rcu_read_lock();
- d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
if (d)
tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
@@ -1116,6 +1108,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_ipip_entry *ret = NULL;
+ int err;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
@@ -1131,26 +1124,30 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
+ ipip_entry->parms = ipip_ops->parms_init(ol_dev);
- switch (ipip_ops->ul_proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- WARN_ON(1);
- break;
+ err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_rem_ip_addr_set;
}
return ipip_entry;
+err_rem_ip_addr_set:
+ mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
err_ol_ipip_lb_create:
kfree(ipip_entry);
return ret;
}
-static void
-mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
+static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+
+ ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
kfree(ipip_entry);
}
@@ -1174,6 +1171,32 @@ mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
}
+static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ /* Not all tunnels require to increase the default pasing depth
+ * (96 bytes).
+ */
+ if (ipip_ops->inc_parsing_depth)
+ return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
+
+ return 0;
+}
+
+static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ if (ipip_ops->inc_parsing_depth)
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+}
+
static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
@@ -1187,18 +1210,32 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
+ err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
+ ipip_entry->ipipt);
+ if (err)
+ goto err_parsing_depth_inc;
+
ipip_entry->decap_fib_entry = fib_entry;
fib_entry->decap.ipip_entry = ipip_entry;
fib_entry->decap.tunnel_index = tunnel_index;
+
return 0;
+
+err_parsing_depth_inc:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ fib_entry->decap.tunnel_index);
+ return err;
}
static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
+ enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
+
/* Unlink this node from the IPIP entry that it's the decap entry of. */
fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
fib_entry->decap.ipip_entry = NULL;
+ mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1, fib_entry->decap.tunnel_index);
}
@@ -1309,6 +1346,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
saddr_len = 4;
saddr_prefix_len = 32;
break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ saddrp = &saddr.addr6;
+ saddr_len = 16;
+ saddr_prefix_len = 128;
+ break;
default:
WARN_ON(1);
return NULL;
@@ -1345,7 +1387,7 @@ mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
list_del(&ipip_entry->ipip_list_node);
- mlxsw_sp_ipip_entry_dealloc(ipip_entry);
+ mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
}
static bool
@@ -1450,7 +1492,7 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
struct net_device *ipip_ul_dev;
rcu_read_lock();
- ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
@@ -1536,23 +1578,34 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
u16 ul_rif_id, bool enable)
{
struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
+ enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
struct mlxsw_sp_rif *rif = &lb_rif->common;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ struct in6_addr *saddr6;
u32 saddr4;
+ ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
switch (lb_cf.ul_protocol) {
case MLXSW_SP_L3_PROTO_IPV4:
saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
- MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
- ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr4,
+ lb_cf.okey);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- return -EAFNOSUPPORT;
+ saddr6 = &lb_cf.saddr.addr6;
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu);
+ mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr6,
+ lb_cf.okey);
+ break;
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
@@ -1827,7 +1880,7 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
struct net_device *ipip_ul_dev;
rcu_read_lock();
- ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
@@ -4152,7 +4205,7 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
bool is_up;
rcu_read_lock();
- ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
rcu_read_unlock();
@@ -6069,8 +6122,8 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
@@ -6081,6 +6134,13 @@ mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
}
}
+static void
+mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib4_entry *fib4_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+}
+
static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
@@ -6141,7 +6201,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
fib_info_put(fib4_entry->fi);
- mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
@@ -6927,11 +6987,38 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
}
-static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry,
- const struct fib6_info *rt)
+static int
+mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
{
- if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
+ struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
+ union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
+ int ifindex = nhgi->nexthops[0].ifindex;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
+ MLXSW_SP_L3_PROTO_IPV6,
+ dip);
+
+ if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
+ return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
+ ipip_entry);
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
+{
+ if (rt->fib6_flags & RTF_LOCAL)
+ return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
+ rt);
+ if (rt->fib6_flags & RTF_ANYCAST)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
@@ -6941,6 +7028,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+
+ return 0;
}
static void
@@ -6998,12 +7087,16 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop_group_vr_link;
- mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+ err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+ if (err)
+ goto err_fib6_entry_type_set;
fib_entry->fib_node = fib_node;
return fib6_entry;
+err_fib6_entry_type_set:
+ mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
err_nexthop6_group_get:
@@ -7022,11 +7115,19 @@ err_fib_entry_priv_create:
return ERR_PTR(err);
}
+static void
+mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
+}
+
static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+ mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
@@ -9476,7 +9577,6 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
- mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
@@ -9489,6 +9589,18 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
}
+static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
+static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
{
WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
@@ -9903,6 +10015,7 @@ static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
.init = mlxsw_sp1_router_init,
+ .ipips_init = mlxsw_sp1_ipips_init,
};
static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
@@ -9918,6 +10031,7 @@ static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
.init = mlxsw_sp2_router_init,
+ .ipips_init = mlxsw_sp2_ipips_init,
};
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
@@ -9963,7 +10077,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rifs_init;
- err = mlxsw_sp_ipips_init(mlxsw_sp);
+ err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
if (err)
goto err_ipips_init;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index cc32d25c3bb2..1d0d28f8ff05 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -226,6 +226,8 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 3398cc01e5ec..f5f819aa9a65 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -1650,6 +1650,22 @@ void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
}
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger)
+{
+ switch (trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ return true;
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_ECN:
+ return false;
+ }
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
{
size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index efaefd1ae863..82e711afb02b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -120,6 +120,7 @@ int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger);
extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops;
extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops;
diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
index 5cc00d22c708..6ecc4eb30e74 100644
--- a/drivers/net/ethernet/micrel/Makefile
+++ b/drivers/net/ethernet/micrel/Makefile
@@ -4,8 +4,6 @@
#
obj-$(CONFIG_KS8842) += ks8842.o
-obj-$(CONFIG_KS8851) += ks8851.o
-ks8851-objs = ks8851_common.o ks8851_spi.o
-obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
-ks8851_mll-objs = ks8851_common.o ks8851_par.o
+obj-$(CONFIG_KS8851) += ks8851_common.o ks8851_spi.o
+obj-$(CONFIG_KS8851_MLL) += ks8851_common.o ks8851_par.o
obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index b27713906d3a..c11b118dc415 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -348,13 +348,15 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
}
-static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
+static void ks8842_init_mac_addr(struct ks8842_adapter *adapter)
{
+ u8 addr[ETH_ALEN];
int i;
u16 mac;
for (i = 0; i < ETH_ALEN; i++)
- dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+ addr[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+ eth_hw_addr_set(adapter->netdev, addr);
if (adapter->conf_flags & MICREL_KS884X) {
/*
@@ -380,7 +382,7 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
}
}
-static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, const u8 *mac)
{
unsigned long flags;
unsigned i;
@@ -1064,7 +1066,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ eth_hw_addr_set(netdev, mac);
ks8842_write_mac_addr(adapter, mac);
return 0;
@@ -1191,12 +1193,11 @@ static int ks8842_probe(struct platform_device *pdev)
if (i < netdev->addr_len)
/* an address was passed, use it */
- memcpy(netdev->dev_addr, pdata->macaddr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->macaddr);
}
if (i == netdev->addr_len) {
- ks8842_read_mac_addr(adapter, netdev->dev_addr);
+ ks8842_init_mac_addr(adapter);
if (!is_valid_ether_addr(netdev->dev_addr))
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e2eb0caeac82..6f34a61739b6 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -427,7 +427,7 @@ struct ks8851_net {
int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int msg_en);
-int ks8851_remove_common(struct device *dev);
+void ks8851_remove_common(struct device *dev);
int ks8851_suspend(struct device *dev);
int ks8851_resume(struct device *dev);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 3f69bb59ba49..691206f19ea7 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -165,6 +165,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
unsigned long flags;
+ u8 addr[ETH_ALEN];
u16 reg;
int i;
@@ -172,9 +173,10 @@ static void ks8851_read_mac_addr(struct net_device *dev)
for (i = 0; i < ETH_ALEN; i += 2) {
reg = ks8851_rdreg16(ks, KS_MAR(i));
- dev->dev_addr[i] = reg >> 8;
- dev->dev_addr[i + 1] = reg & 0xff;
+ addr[i] = reg >> 8;
+ addr[i + 1] = reg & 0xff;
}
+ eth_hw_addr_set(dev, addr);
ks8851_unlock(ks, &flags);
}
@@ -195,7 +197,7 @@ static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
struct net_device *dev = ks->netdev;
int ret;
- ret = of_get_mac_address(np, dev->dev_addr);
+ ret = of_get_ethdev_address(np, dev);
if (!ret) {
ks8851_write_mac_addr(dev);
return;
@@ -672,7 +674,7 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
return ks8851_write_mac_addr(dev);
}
@@ -1057,6 +1059,7 @@ int ks8851_suspend(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(ks8851_suspend);
int ks8851_resume(struct device *dev)
{
@@ -1070,6 +1073,7 @@ int ks8851_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(ks8851_resume);
#endif
static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
@@ -1243,8 +1247,9 @@ err_reg:
err_reg_io:
return ret;
}
+EXPORT_SYMBOL_GPL(ks8851_probe_common);
-int ks8851_remove_common(struct device *dev)
+void ks8851_remove_common(struct device *dev)
{
struct ks8851_net *priv = dev_get_drvdata(dev);
@@ -1258,6 +1263,9 @@ int ks8851_remove_common(struct device *dev)
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);
regulator_disable(priv->vdd_io);
-
- return 0;
}
+EXPORT_SYMBOL_GPL(ks8851_remove_common);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2e8fcce50f9d..2e25798c610e 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -327,7 +327,9 @@ static int ks8851_probe_par(struct platform_device *pdev)
static int ks8851_remove_par(struct platform_device *pdev)
{
- return ks8851_remove_common(&pdev->dev);
+ ks8851_remove_common(&pdev->dev);
+
+ return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 479406ecbaa3..0303e727e99f 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -454,7 +454,9 @@ static int ks8851_probe_spi(struct spi_device *spi)
static int ks8851_remove_spi(struct spi_device *spi)
{
- return ks8851_remove_common(&spi->dev);
+ ks8851_remove_common(&spi->dev);
+
+ return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index a0ee155f9f51..99c0c1491af2 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4033,7 +4033,7 @@ static void hw_set_add_addr(struct ksz_hw *hw)
}
}
-static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
+static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr)
{
int i;
int j = ADDITIONAL_ENTRIES;
@@ -4054,7 +4054,7 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
return -1;
}
-static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
+static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr)
{
int i;
@@ -5581,7 +5581,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
- memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, mac->sa_data);
interrupt = hw_block_intr(hw);
@@ -7005,12 +7005,14 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
dev->mem_end = dev->mem_start + reg_len - 1;
dev->irq = pdev->irq;
if (MAIN_PORT == i)
- memcpy(dev->dev_addr, hw_priv->hw.override_addr,
- ETH_ALEN);
+ eth_hw_addr_set(dev, hw_priv->hw.override_addr);
else {
- memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
+ u8 addr[ETH_ALEN];
+
+ ether_addr_copy(addr, sw->other_addr);
if (ether_addr_equal(sw->other_addr, hw->override_addr))
- dev->dev_addr[5] += port->first_port;
+ addr[5] += port->first_port;
+ eth_hw_addr_set(dev, addr);
}
dev->netdev_ops = &netdev_ops;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 09cdc2f2e7ff..634ac7649c43 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -517,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(dev->dev_addr, address->sa_data);
+ eth_hw_addr_set(dev, address->sa_data);
return enc28j60_set_hw_macaddr(dev);
}
@@ -1539,7 +1539,6 @@ static const struct net_device_ops enc28j60_netdev_ops = {
static int enc28j60_probe(struct spi_device *spi)
{
- unsigned char macaddr[ETH_ALEN];
struct net_device *dev;
struct enc28j60_net *priv;
int ret = 0;
@@ -1572,9 +1571,7 @@ static int enc28j60_probe(struct spi_device *spi)
goto error_irq;
}
- if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr)))
- ether_addr_copy(dev->dev_addr, macaddr);
- else
+ if (device_get_ethdev_address(&spi->dev, dev))
eth_hw_addr_random(dev);
enc28j60_set_hw_macaddr(dev);
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 796e46a53926..81a8ccca7e5e 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -497,13 +497,19 @@ static struct regmap_bus phymap_encx24j600 = {
.reg_read = regmap_encx24j600_phy_reg_read,
};
-void devm_regmap_init_encx24j600(struct device *dev,
- struct encx24j600_context *ctx)
+int devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx)
{
mutex_init(&ctx->mutex);
regcfg.lock_arg = ctx;
ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+ if (IS_ERR(ctx->phymap))
+ return PTR_ERR(ctx->phymap);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index c548e6372352..b90efc80fb59 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -761,7 +761,7 @@ static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, address->sa_data);
return encx24j600_set_hw_macaddr(dev);
}
@@ -1001,6 +1001,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
struct net_device *ndev;
struct encx24j600_priv *priv;
u16 eidled;
+ u8 addr[ETH_ALEN];
ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
@@ -1023,10 +1024,13 @@ static int encx24j600_spi_probe(struct spi_device *spi)
priv->speed = SPEED_100;
priv->ctx.spi = spi;
- devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
ndev->irq = spi->irq;
ndev->netdev_ops = &encx24j600_netdev_ops;
+ ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+ if (ret)
+ goto out_free;
+
mutex_init(&priv->lock);
/* Reset device and check if it is connected */
@@ -1053,7 +1057,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
}
/* Get the MAC address from the chip */
- encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
+ encx24j600_hw_get_macaddr(priv, addr);
+ eth_hw_addr_set(ndev, addr);
ndev->ethtool_ops = &encx24j600_ethtool_ops;
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index fac61a8fbd02..34c5a289898c 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -15,8 +15,8 @@ struct encx24j600_context {
int bank;
};
-void devm_regmap_init_encx24j600(struct device *dev,
- struct encx24j600_context *ctx);
+int devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx);
/* Single-byte instructions */
#define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 9e8561cdc32a..03d02403c19e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -816,7 +816,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
eth_random_addr(adapter->mac_address);
}
lan743x_mac_set_address(adapter, adapter->mac_address);
- ether_addr_copy(netdev->dev_addr, adapter->mac_address);
+ eth_hw_addr_set(netdev, adapter->mac_address);
return 0;
}
@@ -2645,7 +2645,7 @@ static int lan743x_netdev_set_mac_address(struct net_device *netdev,
ret = eth_prepare_mac_addr_change(netdev, sock_addr);
if (ret)
return ret;
- ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
lan743x_mac_set_address(adapter, sock_addr->sa_data);
lan743x_rfe_update_mac_address(adapter);
return 0;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 6080028c1df2..34c22eea0124 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -279,6 +279,7 @@
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_ (6)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
(((value) & 0x7) << (1 + ((channel) << 2)))
#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index ab6d719d40f0..9380e396f648 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -491,9 +491,10 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
int perout_pin = 0;
unsigned int index = perout_request->index;
struct lan743x_ptp_perout *perout = &ptp->perout[index];
+ int ret = 0;
/* Reject requests with unsupported flags */
- if (perout_request->flags)
+ if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
return -EOPNOTSUPP;
if (on) {
@@ -518,6 +519,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve event channel %d for PEROUT\n",
index);
+ ret = -EBUSY;
goto failed;
}
@@ -529,6 +531,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve gpio %d for PEROUT\n",
perout_pin);
+ ret = -EBUSY;
goto failed;
}
@@ -540,27 +543,93 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
period_sec += perout_request->period.nsec / 1000000000;
period_nsec = perout_request->period.nsec % 1000000000;
- if (period_sec == 0) {
- if (period_nsec >= 400000000) {
+ if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on, ts_period;
+ s64 wf_high, period64, half;
+ s32 reminder;
+
+ ts_on.tv_sec = perout_request->on.sec;
+ ts_on.tv_nsec = perout_request->on.nsec;
+ wf_high = timespec64_to_ns(&ts_on);
+ ts_period.tv_sec = perout_request->period.sec;
+ ts_period.tv_nsec = perout_request->period.nsec;
+ period64 = timespec64_to_ns(&ts_period);
+
+ if (period64 < 200) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ if (wf_high >= period64) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "pulse width must be smaller than period\n");
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* Check if we can do 50% toggle on an even value of period.
+ * If the period number is odd, then check if the requested
+ * pulse width is the same as one of pre-defined width values.
+ * Otherwise, return failure.
+ */
+ half = div_s64_rem(period64, 2, &reminder);
+ if (!reminder) {
+ if (half == wf_high) {
+ /* It's 50% match. Use the toggle option */
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_;
+ /* In this case, devide period value by 2 */
+ ts_period = ns_to_timespec64(div_s64(period64, 2));
+ period_sec = ts_period.tv_sec;
+ period_nsec = ts_period.tv_nsec;
+
+ goto program;
+ }
+ }
+ /* if we can't do toggle, then the width option needs to be the exact match */
+ if (wf_high == 200000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
- } else if (period_nsec >= 20000000) {
+ } else if (wf_high == 10000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
- } else if (period_nsec >= 2000000) {
+ } else if (wf_high == 1000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
- } else if (period_nsec >= 200000) {
+ } else if (wf_high == 100000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
- } else if (period_nsec >= 20000) {
+ } else if (wf_high == 10000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
- } else if (period_nsec >= 200) {
+ } else if (wf_high == 100) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
} else {
netif_warn(adapter, drv, adapter->netdev,
- "perout period too small, minimum is 200nS\n");
+ "duty cycle specified is not supported\n");
+ ret = -EOPNOTSUPP;
goto failed;
}
} else {
- pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ if (period_sec == 0) {
+ if (period_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (period_nsec >= 20000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (period_nsec >= 2000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (period_nsec >= 200000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (period_nsec >= 20000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (period_nsec >= 200) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ } else {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
}
+program:
/* turn off by setting target far in future */
lan743x_csr_write(adapter,
@@ -599,7 +668,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
failed:
lan743x_ptp_perout_off(adapter, index);
- return -ENODEV;
+ return ret;
}
static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index cbece6e9bff2..4625d4fb4cde 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -234,8 +234,7 @@ static int sparx5_create_targets(struct sparx5 *sparx5)
}
iomem[idx] = devm_ioremap(sparx5->dev,
iores[idx]->start,
- iores[idx]->end - iores[idx]->start
- + 1);
+ resource_size(iores[idx]));
if (!iomem[idx]) {
dev_err(sparx5->dev, "Unable to get switch registers: %s\n",
iores[idx]->name);
@@ -758,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
"port %u: missing serdes\n",
portno);
+ of_node_put(portnp);
goto cleanup_config;
}
config->portno = portno;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index cb68eaaac881..e042f117dc7a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -162,7 +162,7 @@ static int sparx5_set_mac_address(struct net_device *dev, void *p)
sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
/* Record the address */
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -200,7 +200,6 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
{
struct sparx5_port *spx5_port;
struct net_device *ndev;
- u64 val;
ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
if (!ndev)
@@ -216,8 +215,7 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
ndev->netdev_ops = &sparx5_port_netdev_ops;
ndev->ethtool_ops = &sparx5_ethtool_ops;
- val = ether_addr_to_u64(sparx5->base_mac) + portno + 1;
- u64_to_ether_addr(val, ndev->dev_addr);
+ eth_hw_addr_gen(ndev, sparx5->base_mac, portno + 1);
return ndev;
}
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index d5c485a6d284..7c7a5fb91f79 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -363,7 +363,7 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
}
hwc_cq->gdma_cq = cq;
- comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL);
+ comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
if (!comp_buf) {
err = -ENOMEM;
goto out;
@@ -580,7 +580,7 @@ static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
return err;
}
- ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL);
+ ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 1b21030308e5..d65697c239c8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1477,8 +1477,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
if (err)
goto out;
- if (cq->gdma_id >= gc->max_num_cqs)
+ if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
+ err = -EINVAL;
goto out;
+ }
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
@@ -1608,7 +1610,7 @@ static int mana_init_port(struct net_device *ndev)
if (apc->num_queues > apc->max_queues)
apc->num_queues = apc->max_queues;
- ether_addr_copy(ndev->dev_addr, apc->mac_addr);
+ eth_hw_addr_set(ndev, apc->mac_addr);
return 0;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 49def6934cad..15179b9529e1 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -65,7 +65,7 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
return 0;
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index b6a73d151dec..8dd8c7f425d2 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -28,7 +28,7 @@ config MSCC_OCELOT_SWITCH
depends on BRIDGE || BRIDGE=n
depends on NET_SWITCHDEV
depends on HAS_IOMEM
- depends on OF_NET
+ depends on OF
select MSCC_OCELOT_SWITCH_LIB
select GENERIC_PHY
help
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index c581b955efb3..4e5ae687d2e2 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -162,48 +162,117 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
return ocelot_vlant_wait_for_completion(ocelot);
}
-static void ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
- struct ocelot_vlan native_vlan)
+static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- u32 val = 0;
+ struct ocelot_bridge_vlan *vlan;
+ int num_untagged = 0;
- ocelot_port->native_vlan = native_vlan;
+ list_for_each_entry(vlan, &ocelot->vlans, list) {
+ if (!(vlan->portmask & BIT(port)))
+ continue;
- ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(native_vlan.vid),
- REW_PORT_VLAN_CFG_PORT_VID_M,
- REW_PORT_VLAN_CFG, port);
+ if (vlan->untagged & BIT(port))
+ num_untagged++;
+ }
+
+ return num_untagged;
+}
+
+static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port)
+{
+ struct ocelot_bridge_vlan *vlan;
+ int num_tagged = 0;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list) {
+ if (!(vlan->portmask & BIT(port)))
+ continue;
+
+ if (!(vlan->untagged & BIT(port)))
+ num_tagged++;
+ }
+
+ return num_tagged;
+}
+
+/* We use native VLAN when we have to mix egress-tagged VLANs with exactly
+ * _one_ egress-untagged VLAN (_the_ native VLAN)
+ */
+static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port)
+{
+ return ocelot_port_num_tagged_vlans(ocelot, port) &&
+ ocelot_port_num_untagged_vlans(ocelot, port) == 1;
+}
+
+static struct ocelot_bridge_vlan *
+ocelot_port_find_native_vlan(struct ocelot *ocelot, int port)
+{
+ struct ocelot_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list)
+ if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port))
+ return vlan;
+
+ return NULL;
+}
+
+/* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable,
+ * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness
+ * state of the port.
+ */
+static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ enum ocelot_port_tag_config tag_cfg;
+ bool uses_native_vlan = false;
if (ocelot_port->vlan_aware) {
- if (native_vlan.valid)
- /* Tag all frames except when VID == DEFAULT_VLAN */
- val = REW_TAG_CFG_TAG_CFG(1);
+ uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port);
+
+ if (uses_native_vlan)
+ tag_cfg = OCELOT_PORT_TAG_NATIVE;
+ else if (ocelot_port_num_untagged_vlans(ocelot, port))
+ tag_cfg = OCELOT_PORT_TAG_DISABLED;
else
- /* Tag all frames */
- val = REW_TAG_CFG_TAG_CFG(3);
+ tag_cfg = OCELOT_PORT_TAG_TRUNK;
} else {
- /* Port tagging disabled. */
- val = REW_TAG_CFG_TAG_CFG(0);
+ tag_cfg = OCELOT_PORT_TAG_DISABLED;
}
- ocelot_rmw_gix(ocelot, val,
+
+ ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg),
REW_TAG_CFG_TAG_CFG_M,
REW_TAG_CFG, port);
+
+ if (uses_native_vlan) {
+ struct ocelot_bridge_vlan *native_vlan;
+
+ /* Not having a native VLAN is impossible, because
+ * ocelot_port_num_untagged_vlans has returned 1.
+ * So there is no use in checking for NULL here.
+ */
+ native_vlan = ocelot_port_find_native_vlan(ocelot, port);
+
+ ocelot_rmw_gix(ocelot,
+ REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid),
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port);
+ }
}
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
- struct ocelot_vlan pvid_vlan)
+ const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u16 pvid = OCELOT_VLAN_UNAWARE_PVID;
u32 val = 0;
ocelot_port->pvid_vlan = pvid_vlan;
- if (!ocelot_port->vlan_aware)
- pvid_vlan.vid = 0;
+ if (ocelot_port->vlan_aware && pvid_vlan)
+ pvid = pvid_vlan->vid;
ocelot_rmw_gix(ocelot,
- ANA_PORT_VLAN_CFG_VLAN_VID(pvid_vlan.vid),
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
ANA_PORT_VLAN_CFG_VLAN_VID_M,
ANA_PORT_VLAN_CFG, port);
@@ -212,7 +281,7 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
* classified to VLAN 0, but that is always in our RX filter, so it
* would get accepted were it not for this setting.
*/
- if (!pvid_vlan.valid && ocelot_port->vlan_aware)
+ if (!pvid_vlan && ocelot_port->vlan_aware)
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
@@ -222,31 +291,90 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
ANA_PORT_DROP_CFG, port);
}
-static int ocelot_vlan_member_set(struct ocelot *ocelot, u32 vlan_mask, u16 vid)
+static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
+ u16 vid)
{
+ struct ocelot_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list)
+ if (vlan->vid == vid)
+ return vlan;
+
+ return NULL;
+}
+
+static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid,
+ bool untagged)
+{
+ struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
+ unsigned long portmask;
int err;
- err = ocelot_vlant_set_mask(ocelot, vid, vlan_mask);
- if (err)
+ if (vlan) {
+ portmask = vlan->portmask | BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err)
+ return err;
+
+ vlan->portmask = portmask;
+ /* Bridge VLANs can be overwritten with a different
+ * egress-tagging setting, so make sure to override an untagged
+ * with a tagged VID if that's going on.
+ */
+ if (untagged)
+ vlan->untagged |= BIT(port);
+ else
+ vlan->untagged &= ~BIT(port);
+
+ return 0;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ portmask = BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err) {
+ kfree(vlan);
return err;
+ }
- ocelot->vlan_mask[vid] = vlan_mask;
+ vlan->vid = vid;
+ vlan->portmask = portmask;
+ if (untagged)
+ vlan->untagged = BIT(port);
+ INIT_LIST_HEAD(&vlan->list);
+ list_add_tail(&vlan->list, &ocelot->vlans);
return 0;
}
-static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid)
-{
- return ocelot_vlan_member_set(ocelot,
- ocelot->vlan_mask[vid] | BIT(port),
- vid);
-}
-
static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
{
- return ocelot_vlan_member_set(ocelot,
- ocelot->vlan_mask[vid] & ~BIT(port),
- vid);
+ struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
+ unsigned long portmask;
+ int err;
+
+ if (!vlan)
+ return 0;
+
+ portmask = vlan->portmask & ~BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err)
+ return err;
+
+ vlan->portmask = portmask;
+ if (vlan->portmask)
+ return 0;
+
+ list_del(&vlan->list);
+ kfree(vlan);
+
+ return 0;
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
@@ -279,7 +407,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG, port);
ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
- ocelot_port_set_native_vlan(ocelot, port, ocelot_port->native_vlan);
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -288,14 +416,20 @@ EXPORT_SYMBOL(ocelot_port_vlan_filtering);
int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged, struct netlink_ext_ack *extack)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
-
- /* Deny changing the native VLAN, but always permit deleting it */
- if (untagged && ocelot_port->native_vlan.vid != vid &&
- ocelot_port->native_vlan.valid) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port already has a native VLAN");
- return -EBUSY;
+ if (untagged) {
+ /* We are adding an egress-tagged VLAN */
+ if (ocelot_port_uses_native_vlan(ocelot, port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN");
+ return -EBUSY;
+ }
+ } else {
+ /* We are adding an egress-tagged VLAN */
+ if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs");
+ return -EBUSY;
+ }
}
return 0;
@@ -307,27 +441,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
{
int err;
- err = ocelot_vlan_member_add(ocelot, port, vid);
+ err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
if (err)
return err;
/* Default ingress vlan classification */
- if (pvid) {
- struct ocelot_vlan pvid_vlan;
-
- pvid_vlan.vid = vid;
- pvid_vlan.valid = true;
- ocelot_port_set_pvid(ocelot, port, pvid_vlan);
- }
+ if (pvid)
+ ocelot_port_set_pvid(ocelot, port,
+ ocelot_bridge_vlan_find(ocelot, vid));
/* Untagged egress vlan clasification */
- if (untagged) {
- struct ocelot_vlan native_vlan;
-
- native_vlan.vid = vid;
- native_vlan.valid = true;
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
- }
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -343,18 +467,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan.vid == vid) {
- struct ocelot_vlan pvid_vlan = {0};
-
- ocelot_port_set_pvid(ocelot, port, pvid_vlan);
- }
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
- if (ocelot_port->native_vlan.vid == vid) {
- struct ocelot_vlan native_vlan = {0};
-
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
- }
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -372,13 +489,13 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
/* Configure the port VLAN memberships */
for (vid = 1; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_set(ocelot, 0, vid);
+ ocelot_vlant_set_mask(ocelot, vid, 0);
/* Because VLAN filtering is enabled, we need VID 0 to get untagged
* traffic. It is added automatically if 8021q module is loaded, but
* we can't rely on it since module may be not loaded.
*/
- ocelot_vlan_member_set(ocelot, all_ports, 0);
+ ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports);
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
@@ -472,9 +589,9 @@ void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
!(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
ocelot_port_rmwl(ocelot_port,
DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG);
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
@@ -563,65 +680,50 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
- /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
- * reset
- */
- ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
- DEV_CLOCK_CFG);
-
- /* No PFC */
- ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
- ANA_PFC_PFC_CFG, port);
-
/* Core: Enable port for frame transfer */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
-static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
+static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ unsigned long flags;
- spin_lock(&ocelot_port->ts_id_lock);
+ spin_lock_irqsave(&ocelot->ts_id_lock, flags);
+
+ if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
+ ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+ return -EBUSY;
+ }
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
- ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
- skb_queue_tail(&ocelot_port->tx_skbs, clone);
- spin_unlock(&ocelot_port->ts_id_lock);
-}
+ ocelot_port->ts_id++;
+ if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
+ ocelot_port->ts_id = 0;
-u32 ocelot_ptp_rew_op(struct sk_buff *skb)
-{
- struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
- u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
- u32 rew_op = 0;
+ ocelot_port->ptp_skbs_in_flight++;
+ ocelot->ptp_skbs_in_flight++;
- if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
- rew_op = ptp_cmd;
- rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
- } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
- rew_op = ptp_cmd;
- }
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
+
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
- return rew_op;
+ return 0;
}
-EXPORT_SYMBOL(ocelot_ptp_rew_op);
-static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb)
+static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
+ unsigned int ptp_class)
{
struct ptp_header *hdr;
- unsigned int ptp_class;
u8 msgtype, twostep;
- ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return false;
-
hdr = ptp_parse_header(skb, ptp_class);
if (!hdr)
return false;
@@ -641,10 +743,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 ptp_cmd = ocelot_port->ptp_cmd;
+ unsigned int ptp_class;
+ int err;
+
+ /* Don't do anything if PTP timestamping not enabled */
+ if (!ptp_cmd)
+ return 0;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
- if (ocelot_ptp_is_onestep_sync(skb)) {
+ if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
return 0;
}
@@ -658,8 +770,12 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (!(*clone))
return -ENOMEM;
- ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ if (err)
+ return err;
+
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
}
return 0;
@@ -693,6 +809,17 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
+static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
+{
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
+ if (WARN_ON(!hdr))
+ return false;
+
+ return seqid == ntohs(hdr->sequence_id);
+}
+
void ocelot_get_txtstamp(struct ocelot *ocelot)
{
int budget = OCELOT_PTP_QUEUE_SZ;
@@ -700,10 +827,10 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
while (budget--) {
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
+ u32 val, id, seqid, txport;
struct ocelot_port *port;
struct timespec64 ts;
unsigned long flags;
- u32 val, id, txport;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
@@ -716,10 +843,17 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Retrieve the ts ID and Tx port */
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+ seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
- /* Retrieve its associated skb */
port = ocelot->ports[txport];
+ spin_lock(&ocelot->ts_id_lock);
+ port->ptp_skbs_in_flight--;
+ ocelot->ptp_skbs_in_flight--;
+ spin_unlock(&ocelot->ts_id_lock);
+
+ /* Retrieve its associated skb */
+try_again:
spin_lock_irqsave(&port->tx_skbs.lock, flags);
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
@@ -732,12 +866,20 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+ if (WARN_ON(!skb_match))
+ continue;
+
+ if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
+ dev_err_ratelimited(ocelot->dev,
+ "port %d received stale TX timestamp for seqid %d, discarding\n",
+ txport, seqid);
+ dev_kfree_skb_any(skb);
+ goto try_again;
+ }
+
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
- if (unlikely(!skb_match))
- continue;
-
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
@@ -926,7 +1068,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
ocelot_ifh_set_bypass(ifh, 1);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- ocelot_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb));
ocelot_ifh_set_rew_op(ifh, rew_op);
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
@@ -1303,14 +1445,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
return mask;
}
-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot,
+static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
struct net_device *bridge)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[src_port];
u32 mask = 0;
int port;
+ if (!ocelot_port || ocelot_port->bridge != bridge ||
+ ocelot_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct ocelot_port *ocelot_port = ocelot->ports[port];
+ ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
@@ -1376,7 +1523,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
struct net_device *bridge = ocelot_port->bridge;
struct net_device *bond = ocelot_port->bond;
- mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
mask |= cpu_fwd_mask;
mask &= ~BIT(port);
if (bond) {
@@ -1650,12 +1797,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct ocelot_vlan pvid = {0}, native_vlan = {0};
ocelot_port->bridge = NULL;
- ocelot_port_set_pvid(ocelot, port, pvid);
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
+ ocelot_port_set_pvid(ocelot, port, NULL);
+ ocelot_port_manage_port_tag(ocelot, port);
ocelot_apply_bridge_fwd_mask(ocelot);
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -1953,7 +2099,6 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
struct ocelot_port *ocelot_port = ocelot->ports[port];
skb_queue_head_init(&ocelot_port->tx_skbs);
- spin_lock_init(&ocelot_port->ts_id_lock);
/* Basic L2 initialization */
@@ -2042,9 +2187,10 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
OCELOT_TAG_PREFIX_NONE);
/* Configure the CPU port to be VLAN aware */
- ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
- ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ocelot_write_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
}
@@ -2086,6 +2232,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
+ spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
@@ -2100,6 +2247,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
+ INIT_LIST_HEAD(&ocelot->vlans);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 1952d6a1b98a..e43da09b8f91 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -25,6 +25,7 @@
#include "ocelot_rew.h"
#include "ocelot_qs.h"
+#define OCELOT_VLAN_UNAWARE_PVID 0
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
index edafbd37d12c..b8737efd2a85 100644
--- a/drivers/net/ethernet/mscc/ocelot_devlink.c
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
*/
#include <net/devlink.h>
#include "ocelot.h"
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 8b843d3c9189..769a8159373e 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -142,17 +142,77 @@ ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain)
return NULL;
}
+static int
+ocelot_flower_parse_ingress_vlan_modify(struct ocelot *ocelot, int port,
+ struct ocelot_vcap_filter *filter,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ocelot_port->vlan_aware) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only modify VLAN under VLAN aware bridge");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = a->vlan.vid;
+ filter->action.pcp = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+
+ return 0;
+}
+
+static int
+ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ enum ocelot_tag_tpid_sel tpid;
+
+ switch (ntohs(a->vlan.proto)) {
+ case ETH_P_8021Q:
+ tpid = OCELOT_TAG_TPID_SEL_8021Q;
+ break;
+ case ETH_P_8021AD:
+ tpid = OCELOT_TAG_TPID_SEL_8021AD;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify custom TPID");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.tag_a_tpid_sel = tpid;
+ filter->action.push_outer_tag = OCELOT_ES0_TAG;
+ filter->action.tag_a_vid_sel = OCELOT_ES0_VID_PLUS_CLASSIFIED_VID;
+ filter->action.vid_a_val = a->vlan.vid;
+ filter->action.pcp_a_val = a->vlan.prio;
+ filter->action.tag_a_pcp_sel = OCELOT_ES0_PCP;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+
+ return 0;
+}
+
static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
struct netlink_ext_ack *extack = f->common.extack;
bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
enum ocelot_tag_tpid_sel tpid;
int i, chain, egress_port;
u64 rate;
+ int err;
if (!flow_action_basic_hw_stats_check(&f->rule->action,
f->common.extack))
@@ -273,26 +333,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_VLAN_MANGLE:
- if (filter->block_id != VCAP_IS1) {
- NL_SET_ERR_MSG_MOD(extack,
- "VLAN modify action can only be offloaded to VCAP IS1");
- return -EOPNOTSUPP;
- }
- if (filter->goto_target != -1) {
+ if (filter->block_id == VCAP_IS1) {
+ err = ocelot_flower_parse_ingress_vlan_modify(ocelot, port,
+ filter, a,
+ extack);
+ } else if (filter->block_id == VCAP_ES0) {
+ err = ocelot_flower_parse_egress_vlan_modify(filter, a,
+ extack);
+ } else {
NL_SET_ERR_MSG_MOD(extack,
- "Last action must be GOTO");
- return -EOPNOTSUPP;
+ "VLAN modify action can only be offloaded to VCAP IS1 or ES0");
+ err = -EOPNOTSUPP;
}
- if (!ocelot_port->vlan_aware) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can only modify VLAN under VLAN aware bridge");
- return -EOPNOTSUPP;
- }
- filter->action.vid_replace_ena = true;
- filter->action.pcp_dei_ena = true;
- filter->action.vid = a->vlan.vid;
- filter->action.pcp = a->vlan.prio;
- filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ if (err)
+ return err;
break;
case FLOW_ACTION_PRIORITY:
if (filter->block_id != VCAP_IS1) {
@@ -340,7 +394,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
}
filter->action.tag_a_tpid_sel = tpid;
filter->action.push_outer_tag = OCELOT_ES0_TAG;
- filter->action.tag_a_vid_sel = 1;
+ filter->action.tag_a_vid_sel = OCELOT_ES0_VID;
filter->action.vid_a_val = a->vlan.vid;
filter->action.pcp_a_val = a->vlan.prio;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -678,6 +732,31 @@ static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot,
return 0;
}
+/* If we have an egress VLAN modification rule, we need to actually write the
+ * delta between the input VLAN (from the key) and the output VLAN (from the
+ * action), but the action was parsed first. So we need to patch the delta into
+ * the action here.
+ */
+static int
+ocelot_flower_patch_es0_vlan_modify(struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
+{
+ if (filter->block_id != VCAP_ES0 ||
+ filter->action.tag_a_vid_sel != OCELOT_ES0_VID_PLUS_CLASSIFIED_VID)
+ return 0;
+
+ if (filter->vlan.vid.mask != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 VLAN rewriting needs a full VLAN in the key");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.vid_a_val -= filter->vlan.vid.value;
+ filter->action.vid_a_val &= VLAN_VID_MASK;
+
+ return 0;
+}
+
int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
@@ -701,6 +780,12 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
return ret;
}
+ ret = ocelot_flower_patch_es0_vlan_modify(filter, extack);
+ if (ret) {
+ kfree(filter);
+ return ret;
+ }
+
/* The non-optional GOTOs for the TCAM skeleton don't need
* to be actually offloaded.
*/
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 08b481a93460..1fa58546abdc 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -2,7 +2,7 @@
/* Microsemi Ocelot Switch driver
*
* Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
*/
#include <linux/if_bridge.h>
@@ -116,16 +116,16 @@ static void ocelot_mrp_save_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac,
- port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac,
- port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
}
static void ocelot_mrp_del_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
- ocelot_mact_forget(ocelot, mrp_test_dmac, port->pvid_vlan.vid);
- ocelot_mact_forget(ocelot, mrp_control_dmac, port->pvid_vlan.vid);
+ ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_VLAN_UNAWARE_PVID);
}
int ocelot_mrp_add(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index c0c465a4a981..e3fc4548f642 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -5,9 +5,10 @@
* mscc_ocelot_switch_lib.
*
* Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
*/
+#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
#include <linux/of_net.h>
#include <linux/phy/phy.h>
@@ -417,7 +418,7 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
* with VLAN filtering feature. We need to keep it to receive
* untagged traffic.
*/
- if (vid == 0)
+ if (vid == OCELOT_VLAN_UNAWARE_PVID)
return 0;
ret = ocelot_vlan_del(ocelot, port, vid);
@@ -552,7 +553,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.forget.addr, addr);
- w.forget.vid = ocelot_port->pvid_vlan.vid;
+ w.forget.vid = OCELOT_VLAN_UNAWARE_PVID;
w.type = OCELOT_MACT_FORGET;
return ocelot_enqueue_mact_action(ocelot, &w);
@@ -566,7 +567,7 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.learn.addr, addr);
- w.learn.vid = ocelot_port->pvid_vlan.vid;
+ w.learn.vid = OCELOT_VLAN_UNAWARE_PVID;
w.learn.pgid = PGID_CPU;
w.learn.entry_type = ENTRYTYPE_LOCKED;
w.type = OCELOT_MACT_LEARN;
@@ -601,11 +602,11 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
/* Learn the new net device MAC address in the mac table. */
ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
- ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid_vlan.vid);
+ ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -1625,7 +1626,7 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
ocelot_port_rmwl(ocelot_port, 0,
DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG);
ocelot_port->phy_mode = phy_mode;
@@ -1704,10 +1705,9 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
NETIF_F_HW_TC;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
- dev->dev_addr[ETH_ALEN - 1] += port;
+ eth_hw_addr_gen(dev, ocelot->base_mac, port);
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
- ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 7945393a0655..99d7376a70a7 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -998,8 +998,8 @@ ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
}
struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie,
- bool tc_offload)
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
+ unsigned long cookie, bool tc_offload)
{
struct ocelot_vcap_filter *filter;
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 2b8ea48d2fc4..38103b0255b0 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
err = PTR_ERR(target);
+ of_node_put(portnp);
goto out_teardown;
}
@@ -1134,7 +1135,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_put_ports;
- devlink_register(devlink);
err = mscc_ocelot_init_ports(pdev, ports);
if (err)
goto out_ocelot_devlink_unregister;
@@ -1157,6 +1157,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
of_node_put(ports);
+ devlink_register(devlink);
dev_info(&pdev->dev, "Ocelot switch probed\n");
@@ -1166,7 +1167,6 @@ out_ocelot_release_ports:
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
out_ocelot_devlink_unregister:
- devlink_unregister(devlink);
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
@@ -1179,11 +1179,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ devlink_unregister(ocelot->devlink);
ocelot_deinit_timestamp(ocelot);
ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
- devlink_unregister(ocelot->devlink);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c1a75b08ced7..5736fcdafd7a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -796,7 +796,8 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
return status;
}
-static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
+static int myri10ge_update_mac_address(struct myri10ge_priv *mgp,
+ const u8 * addr)
{
struct myri10ge_cmd cmd;
int status;
@@ -3022,7 +3023,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
}
/* change the dev structure */
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
return 0;
}
@@ -3738,7 +3739,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct myri10ge_priv *mgp;
struct device *dev = &pdev->dev;
- int i;
int status = -ENXIO;
int dac_enabled;
unsigned hdr_offset, ss_offset;
@@ -3828,8 +3828,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (status)
goto abort_with_ioremap;
- for (i = 0; i < ETH_ALEN; i++)
- netdev->dev_addr[i] = mgp->mac_addr[i];
+ eth_hw_addr_set(netdev, mgp->mac_addr);
myri10ge_select_firmware(mgp);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 3f982033944b..82a22711ce45 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -809,6 +809,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long iosize;
void __iomem *ioaddr;
const int pcibar = 1; /* PCI base address register */
+ u8 addr[ETH_ALEN];
int prev_eedata;
u32 tmp;
@@ -859,10 +860,11 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
prev_eedata = eeprom_read(ioaddr, 6);
for (i = 0; i < 3; i++) {
int eedata = eeprom_read(ioaddr, i + 7);
- dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
- dev->dev_addr[i*2+1] = eedata >> 7;
+ addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+ addr[i*2+1] = eedata >> 7;
prev_eedata = eedata;
}
+ eth_hw_addr_set(dev, addr);
np = netdev_priv(dev);
np->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 72794d158871..49ea130c9067 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1649,9 +1649,11 @@ failed:
return ret;
}
-static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
+static void ns83820_getmac(struct ns83820 *dev, struct net_device *ndev)
{
+ u8 mac[ETH_ALEN];
unsigned i;
+
for (i=0; i<3; i++) {
u32 data;
@@ -1661,9 +1663,10 @@ static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
writel(i*2, dev->base + RFCR);
data = readl(dev->base + RFDR);
- *mac++ = data;
- *mac++ = data >> 8;
+ mac[i * 2] = data;
+ mac[i * 2 + 1] = data >> 8;
}
+ eth_hw_addr_set(ndev, mac);
}
static void ns83820_set_multicast(struct net_device *ndev)
@@ -2136,7 +2139,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
/* Disable Wake On Lan */
writel(0, dev->base + WCSR);
- ns83820_getmac(dev, ndev->dev_addr);
+ ns83820_getmac(dev, ndev);
/* Yes, we support dumb IP checksum on transmit */
ndev->features |= NETIF_F_SG;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 09c0e839cca5..d1c32c65db05 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5202,7 +5202,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/* store the MAC address in CAM */
return do_s2io_prog_unicast(dev, dev->dev_addr);
@@ -5217,7 +5217,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
* as defined in errno.h file on failure.
*/
-static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
+static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
{
struct s2io_nic *sp = netdev_priv(dev);
register u64 mac_addr = 0, perm_addr = 0;
@@ -7954,7 +7954,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Set the factory defined MAC address initially */
dev->addr_len = ETH_ALEN;
- memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
/* initialize number of multicast & unicast MAC entries variables */
if (sp->device_type == XFRAME_I_DEVICE) {
@@ -8566,7 +8566,7 @@ static void s2io_io_resume(struct pci_dev *pdev)
return;
}
- if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
+ if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
s2io_card_down(sp);
pr_err("Can't restore mac addr after reset.\n");
return;
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 5a6032212c19..a4266d1544ab 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1073,7 +1073,7 @@ static void s2io_reset(struct s2io_nic * sp);
static int s2io_poll_msix(struct napi_struct *napi, int budget);
static int s2io_poll_inta(struct napi_struct *napi, int budget);
static void s2io_init_pci(struct s2io_nic * sp);
-static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
+static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr);
static void s2io_alarm_handle(struct timer_list *t);
static irqreturn_t
s2io_msix_ring_handle(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index df4a3f3da83a..1969009a91e7 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1328,7 +1328,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
}
if (unlikely(!is_vxge_card_up(vdev))) {
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return VXGE_HW_OK;
}
@@ -1341,7 +1341,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
return -EINVAL;
}
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return status;
}
@@ -4663,7 +4663,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Store the fw version for ethttool option */
strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
- memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
+ eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr);
/* Copy the station mac address to the list */
for (i = 0; i < vdev->no_of_vpath; i++) {
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 605a1617b195..5d3df28c648f 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -305,7 +305,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
return;
}
- ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(nn->dp.netdev, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
index 2473fb5f75e5..2a5cc64227e9 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
static void
nfp_abm_stats_calculate(struct nfp_alink_stats *new,
struct nfp_alink_stats *old,
- struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_sync *bstats,
struct gnet_stats_queue *qstats)
{
_bstats_update(bstats, new->tx_bytes - old->tx_bytes,
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index 36491835ac65..db297ee4d7ad 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -233,13 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf)
if (err <= 0)
return err;
- err = devlink_params_register(devlink, nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
- if (err)
- return err;
-
- devlink_params_publish(devlink);
- return 0;
+ return devlink_params_register(devlink, nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
void nfp_devlink_params_unregister(struct nfp_pf *pf)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index c029950a81e2..ac1dcfa1d179 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -830,10 +830,6 @@ static int nfp_flower_init(struct nfp_app *app)
if (err)
goto err_cleanup;
- err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
- if (err)
- goto err_cleanup;
-
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_init(app);
@@ -942,7 +938,20 @@ static int nfp_flower_start(struct nfp_app *app)
return err;
}
- return nfp_tunnel_config_start(app);
+ err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
+ if (err)
+ return err;
+
+ err = nfp_tunnel_config_start(app);
+ if (err)
+ goto err_tunnel_config;
+
+ return 0;
+
+err_tunnel_config:
+ flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
+ nfp_flower_setup_indr_tc_release);
+ return err;
}
static void nfp_flower_stop(struct nfp_app *app)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index ab70179728f6..dfb4468fe287 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -837,7 +837,7 @@ nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
}
static int
-__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
+__nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del)
{
struct nfp_tun_mac_addr_offload payload;
@@ -886,7 +886,7 @@ static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
}
static struct nfp_tun_offloaded_mac *
-nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
+nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac)
{
struct nfp_flower_priv *priv = app->priv;
@@ -1005,7 +1005,7 @@ err_free_ida:
static int
nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
- u8 *mac, bool mod)
+ const u8 *mac, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 2643ea5948f4..154399c5453f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
@@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 616872928ada..751f76cd4f79 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -55,7 +55,7 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
return;
}
- ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
+ eth_hw_addr_set(netdev, eth_port->mac_addr);
ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
}
@@ -701,7 +701,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
- devlink_register(devlink);
err = nfp_shared_buf_register(pf);
if (err)
goto err_devlink_unreg;
@@ -731,6 +730,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
goto err_stop_app;
mutex_unlock(&pf->lock);
+ devlink_register(devlink);
return 0;
@@ -748,7 +748,6 @@ err_shared_buf_unreg:
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work);
- devlink_unregister(devlink);
nfp_net_pf_app_clean(pf);
err_unmap:
nfp_net_pci_unmap_mem(pf);
@@ -759,6 +758,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct nfp_net *nn, *next;
+ devlink_unregister(priv_to_devlink(pf));
mutex_lock(&pf->lock);
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
@@ -775,7 +775,6 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
- devlink_unregister(priv_to_devlink(pf));
nfp_net_pf_free_irqs(pf);
nfp_net_pf_app_clean(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index c0e2f4394aef..87f2268b16d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -58,7 +58,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
return;
}
- ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(nn->dp.netdev, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 346145d3180e..cfeb7620ae20 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1283,7 +1283,7 @@ static int nixge_probe(struct platform_device *pdev)
mac_addr = nixge_get_nvmem_address(&pdev->dev);
if (mac_addr && is_valid_ether_addr(mac_addr)) {
- ether_addr_copy(ndev->dev_addr, mac_addr);
+ eth_hw_addr_set(ndev, mac_addr);
kfree(mac_addr);
} else {
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index ef3fb4cc90af..9b530d7509a4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3175,7 +3175,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL;
/* synchronized against open : rtnl_lock() held by caller */
- memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, macaddr->sa_data);
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
@@ -5711,6 +5711,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
u32 phystate_orig = 0, phystate;
int phyinitialized = 0;
static int printed_version;
+ u8 mac[ETH_ALEN];
if (!printed_version++)
pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
@@ -5884,50 +5885,52 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
txreg = readl(base + NvRegTransmitPoll);
if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
/* mac address is already in correct order */
- dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[0] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[1] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[4] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[5] = (np->orig_mac[1] >> 8) & 0xff;
} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
/* mac address is already in correct order */
- dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[0] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[1] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[4] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[5] = (np->orig_mac[1] >> 8) & 0xff;
/*
* Set orig mac address back to the reversed version.
* This flag will be cleared during low power transition.
* Therefore, we should always put back the reversed address.
*/
- np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
- (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
- np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
+ np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) +
+ (mac[3] << 16) + (mac[2] << 24);
+ np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8);
} else {
/* need to reverse mac address to correct order */
- dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[0] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[1] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[4] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[5] = (np->orig_mac[0] >> 0) & 0xff;
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
dev_dbg(&pci_dev->dev,
"%s: set workaround bit for reversed mac addr\n",
__func__);
}
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (is_valid_ether_addr(mac)) {
+ eth_hw_addr_set(dev, mac);
+ } else {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
*/
dev_err(&pci_dev->dev,
"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
- dev->dev_addr);
+ mac);
eth_hw_addr_random(dev);
dev_err(&pci_dev->dev,
"Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index d29fe562b3de..a63cc295b979 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -419,7 +419,7 @@ struct netdata_local {
/*
* MAC support functions
*/
-static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
+static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac)
{
u32 tmp;
@@ -1093,7 +1093,7 @@ static int lpc_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@ -1232,6 +1232,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
dma_addr_t dma_handle;
struct resource *res;
+ u8 addr[ETH_ALEN];
int irq, ret;
/* Setup network interface for RMII or MII mode */
@@ -1347,10 +1348,11 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */
- __lpc_get_mac(pldat, ndev->dev_addr);
+ __lpc_get_mac(pldat, addr);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- of_get_mac_address(np, ndev->dev_addr);
+ of_get_ethdev_address(np, ndev);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ec3e558f890e..71d234291fc5 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2137,7 +2137,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(skaddr->sa_data)) {
ret_val = -EADDRNOTAVAIL;
} else {
- memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, skaddr->sa_data);
memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
ret_val = 0;
@@ -2555,7 +2555,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
goto err_free_adapter;
}
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
/*
* If the MAC is invalid (or just missing), display a warning
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 1a6336a56d3d..9c408328be0d 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -592,6 +592,7 @@ static int hamachi_init_one(struct pci_dev *pdev,
void *ring_space;
dma_addr_t ring_dma;
int ret = -ENOMEM;
+ u8 addr[ETH_ALEN];
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -628,8 +629,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
- : readb(ioaddr + StationAddr + i);
+ addr[i] = read_eeprom(ioaddr, 4 + i);
+ eth_hw_addr_set(dev, addr);
#if ! defined(final_version)
if (hamachi_debug > 4)
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index f5cd8f51be7c..12105f62cbdd 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -384,6 +384,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
#else
int bar = 1;
#endif
+ u8 addr[ETH_ALEN];
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -416,12 +417,13 @@ static int yellowfin_init_one(struct pci_dev *pdev,
if (drv_flags & DontUseEeprom)
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
+ addr[i] = ioread8(ioaddr + StnAddr + i);
else {
int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
+ addr[i] = read_eeprom(ioaddr, ee_offset + i);
}
+ eth_hw_addr_set(dev, addr);
/* Reset the chip. */
iowrite32(0x80000000, ioaddr + DMACtrl);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 7e096b2888b9..f0ace3a0e85c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -221,7 +221,7 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
adr0 = dev->dev_addr[2] << 24 |
dev->dev_addr[3] << 16 |
@@ -1722,7 +1722,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENODEV;
goto out;
}
- memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+ eth_hw_addr_set(dev, mac->mac_addr);
ret = mac_to_intf(mac);
if (ret < 0) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 66204106f83e..5e25411ff02f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -19,6 +19,7 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
#define DEVCMD_TIMEOUT 10
+#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
#define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */
#define NORMAL_PPB 1000000000 /* one billion parts per billion */
@@ -69,8 +70,13 @@ struct ionic_admin_ctx {
};
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
-int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err);
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ const int err, const bool do_msg);
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
+ u8 status, int err);
+
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_set_dma_mask(struct ionic *ionic);
int ionic_setup(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 39f59849720d..c58217027564 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -143,8 +143,6 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index);
debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type);
debugfs_create_u64("drop", 0400, q_dentry, &q->drop);
- debugfs_create_u64("stop", 0400, q_dentry, &q->stop);
- debugfs_create_u64("wake", 0400, q_dentry, &q->wake);
debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops);
debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops);
@@ -228,6 +226,50 @@ static int netdev_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(netdev);
+static int lif_filters_show(struct seq_file *seq, void *v)
+{
+ struct ionic_lif *lif = seq->private;
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ seq_puts(seq, "id flow state type filter\n");
+ spin_lock_bh(&lif->rx_filters.lock);
+ for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
+ head = &lif->rx_filters.by_id[i];
+ hlist_for_each_entry_safe(f, tmp, head, by_id) {
+ switch (le16_to_cpu(f->cmd.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x vlan 0x%04x\n",
+ f->filter_id, f->flow_id, f->state,
+ le16_to_cpu(f->cmd.vlan.vlan));
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x mac %pM\n",
+ f->filter_id, f->flow_id, f->state,
+ f->cmd.mac.addr);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC_VLAN:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x macvl 0x%04x %pM\n",
+ f->filter_id, f->flow_id, f->state,
+ le16_to_cpu(f->cmd.vlan.vlan),
+ f->cmd.mac.addr);
+ break;
+ case IONIC_RX_FILTER_STEER_PKTCLASS:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x rxstr 0x%llx\n",
+ f->filter_id, f->flow_id, f->state,
+ le64_to_cpu(f->cmd.pkt_class));
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(lif_filters);
+
void ionic_debugfs_add_lif(struct ionic_lif *lif)
{
struct dentry *lif_dentry;
@@ -239,6 +281,8 @@ void ionic_debugfs_add_lif(struct ionic_lif *lif)
debugfs_create_file("netdev", 0400, lif->dentry,
lif->netdev, &netdev_fops);
+ debugfs_create_file("filters", 0400, lif->dentry,
+ lif, &lif_filters_fops);
}
void ionic_debugfs_del_lif(struct ionic_lif *lif)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 0d6858ab511c..d57e80d44c9d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -581,7 +581,6 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq_info = &cq->info[cq->tail_idx];
- DEBUG_STATS_CQE_CNT(cq);
if (++work_done >= work_to_do)
break;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 8311086fb1f4..e5acf3bd62b2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -220,9 +220,6 @@ struct ionic_queue {
unsigned int num_descs;
unsigned int max_sg_elems;
u64 features;
- u64 dbell_count;
- u64 stop;
- u64 wake;
u64 drop;
struct ionic_dev *idev;
unsigned int type;
@@ -269,7 +266,6 @@ struct ionic_cq {
bool done_color;
unsigned int num_descs;
unsigned int desc_size;
- u64 compl_count;
void *base;
dma_addr_t base_pa;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index 93282394d332..4297ed9024c0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -82,17 +82,16 @@ int ionic_devlink_register(struct ionic *ionic)
struct devlink_port_attrs attrs = {};
int err;
- devlink_register(dl);
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
devlink_port_attrs_set(&ionic->dl_port, &attrs);
err = devlink_port_register(dl, &ionic->dl_port, 0);
if (err) {
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
- devlink_unregister(dl);
return err;
}
devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
+ devlink_register(dl);
return 0;
}
@@ -100,6 +99,6 @@ void ionic_devlink_unregister(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
- devlink_port_unregister(&ionic->dl_port);
devlink_unregister(dl);
+ devlink_port_unregister(&ionic->dl_port);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 3de1a03839e2..6b45cae39a20 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -11,13 +11,6 @@
#include "ionic_ethtool.h"
#include "ionic_stats.h"
-static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define IONIC_PRIV_F_SW_DBG_STATS BIT(0)
- "sw-dbg-stats",
-};
-
-#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
-
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
u32 i;
@@ -59,9 +52,6 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_STATS:
count = ionic_get_stats_count(lif);
break;
- case ETH_SS_PRIV_FLAGS:
- count = IONIC_PRIV_FLAGS_COUNT;
- break;
}
return count;
}
@@ -75,10 +65,6 @@ static void ionic_get_strings(struct net_device *netdev,
case ETH_SS_STATS:
ionic_get_stats_strings(lif, buf);
break;
- case ETH_SS_PRIV_FLAGS:
- memcpy(buf, ionic_priv_flags_strings,
- IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
- break;
}
}
@@ -691,28 +677,6 @@ static int ionic_set_channels(struct net_device *netdev,
return err;
}
-static u32 ionic_get_priv_flags(struct net_device *netdev)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- u32 priv_flags = 0;
-
- if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- priv_flags |= IONIC_PRIV_F_SW_DBG_STATS;
-
- return priv_flags;
-}
-
-static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
-
- clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
- if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS)
- set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
-
- return 0;
-}
-
static int ionic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *info, u32 *rules)
{
@@ -1013,8 +977,6 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_strings = ionic_get_strings,
.get_ethtool_stats = ionic_get_stats,
.get_sset_count = ionic_get_sset_count,
- .get_priv_flags = ionic_get_priv_flags,
- .set_priv_flags = ionic_set_priv_flags,
.get_rxnfc = ionic_get_rxnfc,
.get_rxfh_indir_size = ionic_get_rxfh_indir_size,
.get_rxfh_key_size = ionic_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 381966e8f557..63f8a8163b5f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -287,11 +287,9 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
-static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
+static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
{
struct ionic_queue *q;
- struct ionic_lif *lif;
- int err = 0;
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
@@ -301,11 +299,12 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
},
};
- if (!qcq)
+ if (!qcq) {
+ netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
return -ENXIO;
+ }
q = &qcq->q;
- lif = q->lif;
if (qcq->flags & IONIC_QCQ_F_INTR) {
struct ionic_dev *idev = &lif->ionic->idev;
@@ -318,17 +317,19 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
napi_disable(&qcq->napi);
}
- if (send_to_hw) {
- ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
- ctx.cmd.q_control.type = q->type;
- ctx.cmd.q_control.index = cpu_to_le32(q->index);
- dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
- ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ /* If there was a previous fw communcation error, don't bother with
+ * sending the adminq command and just return the same error value.
+ */
+ if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
+ return fw_err;
- err = ionic_adminq_post_wait(lif, &ctx);
- }
+ ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
+ ctx.cmd.q_control.type = q->type;
+ ctx.cmd.q_control.index = cpu_to_le32(q->index);
+ dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
+ ctx.cmd.q_control.index, ctx.cmd.q_control.type);
- return err;
+ return ionic_adminq_post_wait(lif, &ctx);
}
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -1241,135 +1242,6 @@ void ionic_get_stats64(struct net_device *netdev,
ns->tx_errors = ns->tx_aborted_errors;
}
-int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_add = {
- .opcode = IONIC_CMD_RX_FILTER_ADD,
- .lif_index = cpu_to_le16(lif->index),
- .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
- },
- };
- int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
- bool mc = is_multicast_ether_addr(addr);
- struct ionic_rx_filter *f;
- int err = 0;
-
- memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
-
- spin_lock_bh(&lif->rx_filters.lock);
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f) {
- /* don't bother if we already have it and it is sync'd */
- if (f->state == IONIC_FILTER_STATE_SYNCED) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return 0;
- }
-
- /* mark preemptively as sync'd to block any parallel attempts */
- f->state = IONIC_FILTER_STATE_SYNCED;
- } else {
- /* save as SYNCED to catch any DEL requests while processing */
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- }
- spin_unlock_bh(&lif->rx_filters.lock);
- if (err)
- return err;
-
- netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
-
- /* Don't bother with the write to FW if we know there's no room,
- * we can try again on the next sync attempt.
- */
- if ((lif->nucast + lif->nmcast) >= nfilters)
- err = -ENOSPC;
- else
- err = ionic_adminq_post_wait(lif, &ctx);
-
- spin_lock_bh(&lif->rx_filters.lock);
- if (err && err != -EEXIST) {
- /* set the state back to NEW so we can try again later */
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f && f->state == IONIC_FILTER_STATE_SYNCED)
- f->state = IONIC_FILTER_STATE_NEW;
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- if (err == -ENOSPC)
- return 0;
- else
- return err;
- }
-
- if (mc)
- lif->nmcast++;
- else
- lif->nucast++;
-
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f && f->state == IONIC_FILTER_STATE_OLD) {
- /* Someone requested a delete while we were adding
- * so update the filter info with the results from the add
- * and the data will be there for the delete on the next
- * sync cycle.
- */
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_OLD);
- } else {
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- }
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- return err;
-}
-
-int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .lif_index = cpu_to_le16(lif->index),
- },
- };
- struct ionic_rx_filter *f;
- int state;
- int err;
-
- spin_lock_bh(&lif->rx_filters.lock);
- f = ionic_rx_filter_by_addr(lif, addr);
- if (!f) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return -ENOENT;
- }
-
- netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
- addr, f->filter_id);
-
- state = f->state;
- ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
- ionic_rx_filter_free(lif, f);
-
- if (is_multicast_ether_addr(addr) && lif->nmcast)
- lif->nmcast--;
- else if (!is_multicast_ether_addr(addr) && lif->nucast)
- lif->nucast--;
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- if (state != IONIC_FILTER_STATE_NEW) {
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err && err != -EEXIST)
- return err;
- }
-
- return 0;
-}
-
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
@@ -1377,6 +1249,10 @@ static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
+ /* Don't delete our own address from the uc list */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
}
@@ -1401,7 +1277,7 @@ void ionic_lif_rx_mode(struct ionic_lif *lif)
rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
- /* sync the mac filters */
+ /* sync the filters */
ionic_rx_filter_sync(lif);
/* check for overflow state
@@ -1411,14 +1287,12 @@ void ionic_lif_rx_mode(struct ionic_lif *lif)
* to see if we can disable NIC PROMISC
*/
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
- if ((lif->nucast + lif->nmcast) >= nfilters) {
+
+ if (((lif->nucast + lif->nmcast) >= nfilters) ||
+ (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
- lif->uc_overflow = true;
- lif->mc_overflow = true;
- } else if (lif->uc_overflow) {
- lif->uc_overflow = false;
- lif->mc_overflow = false;
+ } else {
if (!(nd_flags & IFF_PROMISC))
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
if (!(nd_flags & IFF_ALLMULTI))
@@ -1803,59 +1677,30 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
u16 vid)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_add = {
- .opcode = IONIC_CMD_RX_FILTER_ADD,
- .lif_index = cpu_to_le16(lif->index),
- .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
- .vlan.vlan = cpu_to_le16(vid),
- },
- };
int err;
- netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
- err = ionic_adminq_post_wait(lif, &ctx);
+ err = ionic_lif_vlan_add(lif, vid);
if (err)
return err;
- spin_lock_bh(&lif->rx_filters.lock);
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- spin_unlock_bh(&lif->rx_filters.lock);
+ ionic_lif_rx_mode(lif);
- return err;
+ return 0;
}
static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
u16 vid)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .lif_index = cpu_to_le16(lif->index),
- },
- };
- struct ionic_rx_filter *f;
-
- spin_lock_bh(&lif->rx_filters.lock);
-
- f = ionic_rx_filter_by_vlan(lif, vid);
- if (!f) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return -ENOENT;
- }
+ int err;
- netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
- vid, f->filter_id);
+ err = ionic_lif_vlan_del(lif, vid);
+ if (err)
+ return err;
- ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
- ionic_rx_filter_free(lif, f);
- spin_unlock_bh(&lif->rx_filters.lock);
+ ionic_lif_rx_mode(lif);
- return ionic_adminq_post_wait(lif, &ctx);
+ return 0;
}
int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
@@ -1947,19 +1792,19 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
if (lif->txqcqs) {
for (i = 0; i < lif->nxqs; i++)
- err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
}
if (lif->hwstamp_txq)
- err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++)
- err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
}
if (lif->hwstamp_rxq)
- err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
ionic_lif_quiesce(lif);
}
@@ -2159,7 +2004,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err = ionic_qcq_enable(lif->txqcqs[i]);
if (err) {
- derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
goto err_out;
}
}
@@ -2181,13 +2026,13 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out_hwstamp_tx:
if (lif->hwstamp_rxq)
- derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
err_out_hwstamp_rx:
i = lif->nxqs;
err_out:
while (i--) {
- derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
- derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
+ derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
return err;
@@ -2890,6 +2735,9 @@ int ionic_lif_alloc(struct ionic *ionic)
snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
+ mutex_init(&lif->queue_lock);
+ mutex_init(&lif->config_lock);
+
spin_lock_init(&lif->adminq_lock);
spin_lock_init(&lif->deferred.lock);
@@ -2903,7 +2751,7 @@ int ionic_lif_alloc(struct ionic *ionic)
if (!lif->info) {
dev_err(dev, "Failed to allocate lif info, aborting\n");
err = -ENOMEM;
- goto err_out_free_netdev;
+ goto err_out_free_mutex;
}
ionic_debugfs_add_lif(lif);
@@ -2938,6 +2786,9 @@ err_out_free_lif_info:
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
lif->info_pa = 0;
+err_out_free_mutex:
+ mutex_destroy(&lif->config_lock);
+ mutex_destroy(&lif->queue_lock);
err_out_free_netdev:
free_netdev(lif->netdev);
lif = NULL;
@@ -2968,11 +2819,10 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
netif_device_detach(lif->netdev);
+ mutex_lock(&lif->queue_lock);
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
- mutex_lock(&lif->queue_lock);
ionic_stop_queues(lif);
- mutex_unlock(&lif->queue_lock);
}
if (netif_running(lif->netdev)) {
@@ -2983,6 +2833,8 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
ionic_reset(ionic);
ionic_qcqs_free(lif);
+ mutex_unlock(&lif->queue_lock);
+
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
}
@@ -3006,9 +2858,12 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
err = ionic_port_init(ionic);
if (err)
goto err_out;
+
+ mutex_lock(&lif->queue_lock);
+
err = ionic_qcqs_alloc(lif);
if (err)
- goto err_out;
+ goto err_unlock;
err = ionic_lif_init(lif);
if (err)
@@ -3029,6 +2884,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
goto err_txrx_free;
}
+ mutex_unlock(&lif->queue_lock);
+
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
@@ -3045,6 +2902,8 @@ err_lifs_deinit:
ionic_lif_deinit(lif);
err_qcqs_free:
ionic_qcqs_free(lif);
+err_unlock:
+ mutex_unlock(&lif->queue_lock);
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
@@ -3078,6 +2937,9 @@ void ionic_lif_free(struct ionic_lif *lif)
kfree(lif->dbid_inuse);
lif->dbid_inuse = NULL;
+ mutex_destroy(&lif->config_lock);
+ mutex_destroy(&lif->queue_lock);
+
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
free_netdev(lif->netdev);
@@ -3100,8 +2962,6 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
- mutex_destroy(&lif->config_lock);
- mutex_destroy(&lif->queue_lock);
ionic_lif_reset(lif);
}
@@ -3267,8 +3127,6 @@ int ionic_lif_init(struct ionic_lif *lif)
return err;
lif->hw_index = le16_to_cpu(comp.hw_index);
- mutex_init(&lif->queue_lock);
- mutex_init(&lif->config_lock);
/* now that we have the hw_index we can figure out our doorbell page */
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 4915184f3efb..9f7ab2f17f93 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -14,9 +14,6 @@
#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
-#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
-#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
-
#define ADD_ADDR true
#define DEL_ADDR false
#define CAN_SLEEP true
@@ -37,7 +34,6 @@ struct ionic_tx_stats {
u64 clean;
u64 linearize;
u64 crc32_csum;
- u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
u64 dma_map_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
@@ -48,7 +44,6 @@ struct ionic_rx_stats {
u64 bytes;
u64 csum_none;
u64 csum_complete;
- u64 buffers_posted;
u64 dropped;
u64 vlan_stripped;
u64 csum_error;
@@ -65,11 +60,6 @@ struct ionic_rx_stats {
#define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5)
-struct ionic_napi_stats {
- u64 poll_count;
- u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
-};
-
struct ionic_qcq {
void *q_base;
dma_addr_t q_base_pa;
@@ -85,7 +75,6 @@ struct ionic_qcq {
struct ionic_cq cq;
struct ionic_intr_info intr;
struct napi_struct napi;
- struct ionic_napi_stats napi_stats;
unsigned int flags;
struct dentry *dentry;
};
@@ -142,7 +131,6 @@ struct ionic_lif_sw_stats {
enum ionic_lif_state_flags {
IONIC_LIF_F_INITED,
- IONIC_LIF_F_SW_DEBUG_STATS,
IONIC_LIF_F_UP,
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FILTER_SYNC_NEEDED,
@@ -201,11 +189,11 @@ struct ionic_lif {
u16 rx_mode;
u64 hw_features;
bool registered;
- bool mc_overflow;
- bool uc_overflow;
u16 lif_type;
unsigned int nmcast;
unsigned int nucast;
+ unsigned int nvlans;
+ unsigned int max_vlans;
char name[IONIC_LIF_NAME_MAX_SZ];
union ionic_lif_identity *identity;
@@ -350,37 +338,4 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
void ionic_lif_rx_mode(struct ionic_lif *lif);
int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_queue_params *qparam);
-
-static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
-{
- struct ionic_txq_desc *desc = &q->txq[q->head_idx];
- u8 num_sg_elems;
-
- q->dbell_count += dbell;
-
- num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
- & IONIC_TXQ_DESC_NSGE_MASK);
- if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
- num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
-
- q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
-}
-
-static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
- unsigned int work_done)
-{
- qcq->napi_stats.poll_count++;
-
- if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1))
- work_done = IONIC_MAX_NUM_NAPI_CNTR - 1;
-
- qcq->napi_stats.work_done_cntr[work_done]++;
-}
-
-#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
-#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
-#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
-#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
- debug_stats_napi_poll(qcq, work_done)
-
#endif /* _IONIC_LIF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 6f07bf509efe..875f4ec42efe 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/utsname.h>
#include <generated/utsrelease.h>
+#include <linux/ctype.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -211,24 +212,28 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
}
+void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
+ u8 status, int err)
+{
+ netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n",
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(status), err);
+}
+
static int ionic_adminq_check_err(struct ionic_lif *lif,
struct ionic_admin_ctx *ctx,
- bool timeout)
+ const bool timeout,
+ const bool do_msg)
{
- struct net_device *netdev = lif->netdev;
- const char *opcode_str;
- const char *status_str;
int err = 0;
if (ctx->comp.comp.status || timeout) {
- opcode_str = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
- status_str = ionic_error_to_str(ctx->comp.comp.status);
err = timeout ? -ETIMEDOUT :
ionic_error_to_errno(ctx->comp.comp.status);
- netdev_err(netdev, "%s (%d) failed: %s (%d)\n",
- opcode_str, ctx->cmd.cmd.opcode,
- timeout ? "TIMEOUT" : status_str, err);
+ if (do_msg)
+ ionic_adminq_netdev_err_print(lif, ctx->cmd.cmd.opcode,
+ ctx->comp.comp.status, err);
if (timeout)
ionic_adminq_flush(lif);
@@ -297,24 +302,52 @@ err_out:
return err;
}
-int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err)
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ const int err, const bool do_msg)
{
struct net_device *netdev = lif->netdev;
+ unsigned long time_limit;
+ unsigned long time_start;
+ unsigned long time_done;
unsigned long remaining;
const char *name;
+ name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+
if (err) {
- if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
- name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+ if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
name, ctx->cmd.cmd.opcode, err);
- }
return err;
}
- remaining = wait_for_completion_timeout(&ctx->work,
- HZ * (ulong)DEVCMD_TIMEOUT);
- return ionic_adminq_check_err(lif, ctx, (remaining == 0));
+ time_start = jiffies;
+ time_limit = time_start + HZ * (ulong)DEVCMD_TIMEOUT;
+ do {
+ remaining = wait_for_completion_timeout(&ctx->work,
+ IONIC_ADMINQ_TIME_SLICE);
+
+ /* check for done */
+ if (remaining)
+ break;
+
+ /* interrupt the wait if FW stopped */
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ if (do_msg)
+ netdev_err(netdev, "%s (%d) interrupted, FW in reset\n",
+ name, ctx->cmd.cmd.opcode);
+ return -ENXIO;
+ }
+
+ } while (time_before(jiffies, time_limit));
+ time_done = jiffies;
+
+ dev_dbg(lif->ionic->dev, "%s: elapsed %d msecs\n",
+ __func__, jiffies_to_msecs(time_done - time_start));
+
+ return ionic_adminq_check_err(lif, ctx,
+ time_after_eq(time_done, time_limit),
+ do_msg);
}
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
@@ -323,7 +356,16 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
err = ionic_adminq_post(lif, ctx);
- return ionic_adminq_wait(lif, ctx, err);
+ return ionic_adminq_wait(lif, ctx, err, true);
+}
+
+int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+{
+ int err;
+
+ err = ionic_adminq_post(lif, ctx);
+
+ return ionic_adminq_wait(lif, ctx, err, false);
}
static void ionic_dev_cmd_clean(struct ionic *ionic)
@@ -450,13 +492,23 @@ int ionic_identify(struct ionic *ionic)
}
mutex_unlock(&ionic->dev_cmd_lock);
- dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version);
-
if (err) {
- dev_err(ionic->dev, "Cannot identify ionic: %dn", err);
+ dev_err(ionic->dev, "Cannot identify ionic: %d\n", err);
goto err_out;
}
+ if (isprint(idev->dev_info.fw_version[0]) &&
+ isascii(idev->dev_info.fw_version[0]))
+ dev_info(ionic->dev, "FW: %.*s\n",
+ (int)(sizeof(idev->dev_info.fw_version) - 1),
+ idev->dev_info.fw_version);
+ else
+ dev_info(ionic->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n",
+ (u8)idev->dev_info.fw_version[0],
+ (u8)idev->dev_info.fw_version[1],
+ (u8)idev->dev_info.fw_version[2],
+ (u8)idev->dev_info.fw_version[3]);
+
err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
&ionic->ident.lif);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index eed2db69d708..887046838b3b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -348,7 +348,7 @@ static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
@@ -373,7 +373,7 @@ static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_settime64(struct ptp_clock_info *info,
@@ -402,7 +402,7 @@ static int ionic_phc_settime64(struct ptp_clock_info *info,
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_gettimex64(struct ptp_clock_info *info,
@@ -459,7 +459,7 @@ static long ionic_phc_aux_work(struct ptp_clock_info *info)
spin_unlock_irqrestore(&phc->lock, irqflags);
- ionic_adminq_wait(phc->lif, &ctx, err);
+ ionic_adminq_wait(phc->lif, &ctx, err, true);
return phc->aux_work_delay;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 25ecfcfa1281..f6e785f949f9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -239,6 +239,21 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
return NULL;
}
+static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
+ case IONIC_RX_FILTER_MATCH_MAC:
+ return ionic_rx_filter_by_addr(lif, ac->mac.addr);
+ default:
+ netdev_err(lif->netdev, "unsupported filter match %d",
+ le16_to_cpu(ac->match));
+ return NULL;
+ }
+}
+
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
{
struct ionic_rx_filter *f;
@@ -286,6 +301,228 @@ int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
return 0;
}
+static int ionic_lif_filter_add(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ };
+ struct ionic_rx_filter *f;
+ int nfilters;
+ int err = 0;
+
+ ctx.cmd.rx_filter_add = *ac;
+ ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
+ ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
+
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f) {
+ /* don't bother if we already have it and it is sync'd */
+ if (f->state == IONIC_FILTER_STATE_SYNCED) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return 0;
+ }
+
+ /* mark preemptively as sync'd to block any parallel attempts */
+ f->state = IONIC_FILTER_STATE_SYNCED;
+ } else {
+ /* save as SYNCED to catch any DEL requests while processing */
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_SYNCED);
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
+ if (err)
+ return err;
+
+ /* Don't bother with the write to FW if we know there's no room,
+ * we can try again on the next sync attempt.
+ * Since the FW doesn't have a way to tell us the vlan limit,
+ * we start max_vlans at 0 until we hit the ENOSPC error.
+ */
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n",
+ __func__, ctx.cmd.rx_filter_add.vlan.vlan);
+ if (lif->max_vlans && lif->nvlans >= lif->max_vlans)
+ err = -ENOSPC;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n",
+ __func__, ctx.cmd.rx_filter_add.mac.addr);
+ nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
+ if ((lif->nucast + lif->nmcast) >= nfilters)
+ err = -ENOSPC;
+ break;
+ }
+
+ if (err != -ENOSPC)
+ err = ionic_adminq_post_wait_nomsg(lif, &ctx);
+
+ spin_lock_bh(&lif->rx_filters.lock);
+
+ if (err && err != -EEXIST) {
+ /* set the state back to NEW so we can try again later */
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
+ f->state = IONIC_FILTER_STATE_NEW;
+
+ /* If -ENOSPC we won't waste time trying to sync again
+ * until there is a delete that might make room
+ */
+ if (err != -ENOSPC)
+ set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
+ }
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ if (err == -ENOSPC) {
+ if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
+ lif->max_vlans = lif->nvlans;
+ return 0;
+ }
+
+ ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
+ ctx.comp.comp.status, err);
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n",
+ ctx.cmd.rx_filter_add.vlan.vlan);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n",
+ ctx.cmd.rx_filter_add.mac.addr);
+ break;
+ }
+
+ return err;
+ }
+
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ lif->nvlans++;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr))
+ lif->nmcast++;
+ else
+ lif->nucast++;
+ break;
+ }
+
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f && f->state == IONIC_FILTER_STATE_OLD) {
+ /* Someone requested a delete while we were adding
+ * so update the filter info with the results from the add
+ * and the data will be there for the delete on the next
+ * sync cycle.
+ */
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_OLD);
+ } else {
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_SYNCED);
+ }
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return err;
+}
+
+int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
+ };
+
+ memcpy(&ac.mac.addr, addr, ETH_ALEN);
+
+ return ionic_lif_filter_add(lif, &ac);
+}
+
+int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
+ .vlan.vlan = cpu_to_le16(vid),
+ };
+
+ return ionic_lif_filter_add(lif, &ac);
+}
+
+static int ionic_lif_filter_del(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_del = {
+ .opcode = IONIC_CMD_RX_FILTER_DEL,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ struct ionic_rx_filter *f;
+ int state;
+ int err;
+
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_find(lif, ac);
+ if (!f) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return -ENOENT;
+ }
+
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n",
+ __func__, ac->vlan.vlan, f->filter_id);
+ lif->nvlans--;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n",
+ __func__, ac->mac.addr, f->filter_id);
+ if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast)
+ lif->nmcast--;
+ else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast)
+ lif->nucast--;
+ break;
+ }
+
+ state = f->state;
+ ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
+ ionic_rx_filter_free(lif, f);
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ if (state != IONIC_FILTER_STATE_NEW) {
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err && err != -EEXIST)
+ return err;
+ }
+
+ return 0;
+}
+
+int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
+ };
+
+ memcpy(&ac.mac.addr, addr, ETH_ALEN);
+
+ return ionic_lif_filter_del(lif, &ac);
+}
+
+int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
+ .vlan.vlan = cpu_to_le16(vid),
+ };
+
+ return ionic_lif_filter_del(lif, &ac);
+}
+
struct sync_item {
struct list_head list;
struct ionic_rx_filter f;
@@ -340,17 +577,14 @@ loop_out:
* they can clear room for some new filters
*/
list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
- (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr);
+ (void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
- (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
-
- if (sync_item->f.state != IONIC_FILTER_STATE_SYNCED)
- set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
+ (void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
index a66e35f0833b..87b2666f248b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
@@ -44,5 +44,7 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif);
void ionic_rx_filter_sync(struct ionic_lif *lif);
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode);
int ionic_rx_filters_need_sync(struct ionic_lif *lif);
+int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid);
+int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid);
#endif /* _IONIC_RX_FILTER_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 58a854666c62..fd6806b4a1b9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -151,33 +151,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(vlan_stripped),
};
-static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
- IONIC_TX_Q_STAT_DESC(stop),
- IONIC_TX_Q_STAT_DESC(wake),
- IONIC_TX_Q_STAT_DESC(drop),
- IONIC_TX_Q_STAT_DESC(dbell_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = {
- IONIC_CQ_STAT_DESC(compl_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = {
- IONIC_INTR_STAT_DESC(rearm_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
- IONIC_NAPI_STAT_DESC(poll_count),
-};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
-#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
-#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc)
-#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc)
-#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc)
#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues)
@@ -253,21 +231,6 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
total += tx_queues * IONIC_NUM_TX_STATS;
total += rx_queues * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_F_UP, lif->state) &&
- test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- /* tx debug stats */
- total += tx_queues * (IONIC_NUM_DBG_CQ_STATS +
- IONIC_NUM_TX_Q_STATS +
- IONIC_NUM_DBG_INTR_STATS +
- IONIC_MAX_NUM_SG_CNTR);
-
- /* rx debug stats */
- total += rx_queues * (IONIC_NUM_DBG_CQ_STATS +
- IONIC_NUM_DBG_INTR_STATS +
- IONIC_NUM_DBG_NAPI_STATS +
- IONIC_MAX_NUM_NAPI_CNTR);
- }
-
return total;
}
@@ -279,22 +242,6 @@ static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf,
for (i = 0; i < IONIC_NUM_TX_STATS; i++)
ethtool_sprintf(buf, "tx_%d_%s", q_num,
ionic_tx_stats_desc[i].name);
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_%s", q_num,
- ionic_txq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_cq_%s", q_num,
- ionic_dbg_cq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_intr_%s", q_num,
- ionic_dbg_intr_stats_desc[i].name);
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++)
- ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i);
}
static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
@@ -305,22 +252,6 @@ static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
for (i = 0; i < IONIC_NUM_RX_STATS; i++)
ethtool_sprintf(buf, "rx_%d_%s", q_num,
ionic_rx_stats_desc[i].name);
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num,
- ionic_dbg_cq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num,
- ionic_dbg_intr_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num,
- ionic_dbg_napi_stats_desc[i].name);
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++)
- ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i);
}
static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
@@ -350,7 +281,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_tx_stats *txstats;
- struct ionic_qcq *txqcq;
int i;
txstats = &lif->txqstats[q_num];
@@ -359,47 +289,12 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
**buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]);
(*buf)++;
}
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- txqcq = lif->txqcqs[q_num];
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->q,
- &ionic_txq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->napi_stats,
- &ionic_dbg_napi_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
- **buf = txqcq->napi_stats.work_done_cntr[i];
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
- **buf = txstats->sg_cntr[i];
- (*buf)++;
- }
}
static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_rx_stats *rxstats;
- struct ionic_qcq *rxqcq;
int i;
rxstats = &lif->rxqstats[q_num];
@@ -408,31 +303,6 @@ static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
**buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]);
(*buf)++;
}
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- rxqcq = lif->rxqcqs[q_num];
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->napi_stats,
- &ionic_dbg_napi_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
- **buf = rxqcq->napi_stats.work_done_cntr[i];
- (*buf)++;
- }
}
static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 37c39581b659..94384f5d2a22 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -14,8 +14,6 @@
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
- DEBUG_STATS_TXQ_POST(q, ring_dbell);
-
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
@@ -23,8 +21,6 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
-
- DEBUG_STATS_RX_BUFF_CNT(q);
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
@@ -507,8 +503,6 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
return work_done;
}
@@ -546,8 +540,6 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
return work_done;
}
@@ -591,9 +583,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
- DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
-
return rx_work_done;
}
@@ -735,7 +724,6 @@ static void ionic_tx_clean(struct ionic_queue *q,
} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
netif_wake_subqueue(q->lif->netdev, qi);
- q->wake++;
}
desc_info->bytes = skb->len;
@@ -1174,7 +1162,6 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
if (unlikely(!ionic_q_has_space(q, ndescs))) {
netif_stop_subqueue(q->lif->netdev, q->index);
- q->stop++;
stopped = 1;
/* Might race with ionic_tx_clean, check again */
@@ -1269,7 +1256,6 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
err_out_drop:
- q->stop++;
q->drop++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 344ea1143454..4cfab4434e80 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -463,6 +463,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
u64 mac_addr;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u8 addr[ETH_ALEN];
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
@@ -474,7 +475,8 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
p = (unsigned char *)&mac_addr;
for (i = 0; i < 6; i++)
- netdev->dev_addr[i] = *(p + 5 - i);
+ addr[i] = *(p + 5 - i);
+ eth_hw_addr_set(netdev, addr);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -500,7 +502,7 @@ static int netxen_nic_set_mac(struct net_device *netdev, void *p)
}
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
adapter->macaddr_set(adapter, addr->sa_data);
if (netif_running(netdev)) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index d58e021614cd..d613095b78e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -23,6 +23,8 @@
#include <linux/qed/qed_if.h>
#include "qed_debug.h"
#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
+#include "qed_mfw_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
@@ -89,14 +91,14 @@ static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
}
#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
- ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+ ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \
~((1 << (p_hwfn->cdev->cache_shift)) - 1))
-#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
+#define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++)
#define D_TRINE(val, cond1, cond2, true1, true2, def) \
- (val == (cond1) ? true1 : \
- (val == (cond2) ? true2 : def))
+ ((val) == (cond1) ? true1 : \
+ ((val) == (cond2) ? true2 : def))
/* forward */
struct qed_ptt_pool;
@@ -510,7 +512,7 @@ enum qed_hsi_def_type {
struct qed_simd_fp_handler {
void *token;
- void (*func)(void *);
+ void (*func)(void *cookie);
};
enum qed_slowpath_wq_flag {
@@ -703,8 +705,6 @@ struct qed_dev {
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)
-#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev))
-#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5)
u16 vendor_id;
@@ -875,14 +875,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
#define NUM_OF_BTB_BLOCKS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
-
/**
- * @brief qed_concrete_to_sw_fid - get the sw function id from
- * the concrete value.
+ * qed_concrete_to_sw_fid(): Get the sw function id from
+ * the concrete value.
*
- * @param concrete_fid
+ * @cdev: Qed dev pointer.
+ * @concrete_fid: Concrete fid.
*
- * @return inline u8
+ * Return: inline u8.
*/
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid)
@@ -902,7 +902,6 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
}
#define PKT_LB_TC 9
-#define MAX_NUM_VOQS_E4 20
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
@@ -914,7 +913,7 @@ int qed_device_num_engines(struct qed_dev *cdev);
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
-#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+#define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0])
#define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
/* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
#define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
@@ -935,7 +934,7 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
#define PQ_FLAGS_LLT (BIT(7))
#define PQ_FLAGS_MTC (BIT(8))
-/* physical queue index for cm context intialization */
+/* physical queue index for cm context initialization */
u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
@@ -947,12 +946,18 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
+#define GET_GTT_REG_ADDR(__base, __offset, __idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx)))
+
+#define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx)))
+
/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)
-#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
- (cdev->regview) + \
- (offset))
+#define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\
+ ((cdev)->regview) + \
+ (offset)))
#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
@@ -960,7 +965,7 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
#define DOORBELL(cdev, db_addr, val) \
writel((u32)val, (void __iomem *)((u8 __iomem *)\
- (cdev->doorbells) + (db_addr)))
+ ((cdev)->doorbells) + (db_addr)))
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
qed_device_num_ports((_p_hwfn)->cdev))
@@ -998,4 +1003,5 @@ int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
void qed_llh_clear_all_filters(struct qed_dev *cdev);
+unsigned long qed_get_epoch_time(void);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index cb0f2a3a1ac9..452494f8c298 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -54,22 +54,22 @@
/* connection context union */
union conn_context {
- struct e4_core_conn_context core_ctx;
- struct e4_eth_conn_context eth_ctx;
- struct e4_iscsi_conn_context iscsi_ctx;
- struct e4_fcoe_conn_context fcoe_ctx;
- struct e4_roce_conn_context roce_ctx;
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+ struct iscsi_conn_context iscsi_ctx;
+ struct fcoe_conn_context fcoe_ctx;
+ struct roce_conn_context roce_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context {
- struct e4_iscsi_task_context iscsi_ctx;
- struct e4_fcoe_task_context fcoe_ctx;
+ struct iscsi_task_context iscsi_ctx;
+ struct fcoe_task_context fcoe_ctx;
};
/* TYPE-1 task context - ROCE */
union type1_task_context {
- struct e4_rdma_task_context roce_ctx;
+ struct rdma_task_context roce_ctx;
};
struct src_ent {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8adb7ed0c12d..168ce2c50385 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -28,24 +28,23 @@ struct qed_tid_mem {
};
/**
- * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ * qed_cxt_get_cid_info(): Returns the context info for a specific cidi.
*
+ * @p_hwfn: HW device data.
+ * @p_info: In/out.
*
- * @param p_hwfn
- * @param p_info in/out
- *
- * @return int
+ * Return: Int.
*/
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info);
/**
- * @brief qed_cxt_get_tid_mem_info
+ * qed_cxt_get_tid_mem_info(): Returns the tid mem info.
*
- * @param p_hwfn
- * @param p_info
+ * @p_hwfn: HW device data.
+ * @p_info: in/out.
*
- * @return int
+ * Return: int.
*/
int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
struct qed_tid_mem *p_info);
@@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid);
/**
- * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ * qed_cxt_set_pf_params(): Set the PF params for cxt init.
+ *
+ * @p_hwfn: HW device data.
+ * @rdma_tasks: Requested maximum.
*
- * @param p_hwfn
- * @param rdma_tasks - requested maximum
- * @return int
+ * Return: int.
*/
int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
/**
- * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters.
*
- * @param p_hwfn
- * @param last_line
+ * @p_hwfn: HW device data.
+ * @last_line: Last_line.
*
- * @return int
+ * Return: Int
*/
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
/**
- * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased.
+ *
+ * @p_hwfn: HW device data.
+ * @used_lines: Used lines.
*
- * @param p_hwfn
- * @param used_lines
+ * Return: Int.
*/
u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
/**
- * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ * qed_cxt_mngr_alloc(): Allocate and init the context manager struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_mngr_free
+ * qed_cxt_mngr_free() - Context manager free.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ * qed_cxt_mngr_setup(): Reset the acquired CIDs.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*/
void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
- *
+ * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ * qed_qm_init_pf(): Initailze the QM PF phase, per path.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @is_pf_loading: Is pf pending.
*
- * @param p_hwfn
- * @param p_ptt
- * @param is_pf_loading
+ * Return: Void.
*/
void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool is_pf_loading);
/**
- * @brief Reconfigures QM pf on the fly
+ * qed_qm_reconf(): Reconfigures QM pf on the fly.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_CXT_PF_CID (0xff)
/**
- * @brief qed_cxt_release - Release a cid
+ * qed_cxt_release_cid(): Release a cid.
*
- * @param p_hwfn
- * @param cid
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ *
+ * Return: Void.
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
/**
- * @brief qed_cxt_release - Release a cid belonging to a vf-queue
+ * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue.
+ *
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
*
- * @param p_hwfn
- * @param cid
- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ * Return: Void.
*/
void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
/**
- * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type.
*
- * @param p_hwfn
- * @param type
- * @param p_cid
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid);
/**
- * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
- * for a vf-queue
+ * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type
+ * for a vf-queue.
*
- * @param p_hwfn
- * @param type
- * @param p_cid
- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
*
- * @return int
+ * Return: Int.
*/
int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid, u8 vfid);
@@ -334,7 +346,10 @@ struct qed_cxt_mngr {
/* Maximal number of L2 steering filters */
u32 arfs_count;
- u8 task_type_id;
+ u16 iscsi_task_pages;
+ u16 fcoe_task_pages;
+ u16 roce_task_pages;
+ u16 eth_task_pages;
u16 task_ctx_size;
u16 conn_ctx_size;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
new file mode 100644
index 000000000000..9d5a0c9e1ca0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -0,0 +1,1491 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+#ifndef _QED_DBG_HSI_H
+#define _QED_DBG_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_DMAE,
+ BLOCK_PTU,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_MSTAT,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_PXPREQBUS,
+ BLOCK_BAR0_MAP,
+ BLOCK_MCP_FIO,
+ BLOCK_LAST_INIT,
+ BLOCK_PRS_FC,
+ BLOCK_PBF_FC,
+ BLOCK_NIG_LB_FC,
+ BLOCK_NIG_LB_FC_PLLH,
+ BLOCK_NIG_TX_FC_PLLH,
+ BLOCK_NIG_TX_FC,
+ BLOCK_NIG_RX_FC_PLLH,
+ BLOCK_NIG_RX_FC,
+ MAX_BLOCK_ID
+};
+
+/* binary debug buffer types */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE,
+ BIN_BUF_DBG_DUMP_REG,
+ BIN_BUF_DBG_DUMP_MEM,
+ BIN_BUF_DBG_IDLE_CHK_REGS,
+ BIN_BUF_DBG_IDLE_CHK_IMMS,
+ BIN_BUF_DBG_IDLE_CHK_RULES,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
+ BIN_BUF_DBG_ATTN_BLOCKS,
+ BIN_BUF_DBG_ATTN_REGS,
+ BIN_BUF_DBG_ATTN_INDEXES,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_BLOCKS,
+ BIN_BUF_DBG_BLOCKS_CHIP_DATA,
+ BIN_BUF_DBG_BUS_LINES,
+ BIN_BUF_DBG_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
+ BIN_BUF_DBG_RESET_REGS,
+ BIN_BUF_DBG_PARSING_STRINGS,
+ MAX_BIN_DBG_BUFFER_TYPE
+};
+
+/* Attention bit mapping */
+struct dbg_attn_bit_mapping {
+ u16 data;
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
+};
+
+/* Attention block per-type data */
+struct dbg_attn_block_type_data {
+ u16 names_offset;
+ u16 reserved1;
+ u8 num_regs;
+ u8 reserved2;
+ u16 regs_offset;
+
+};
+
+/* Block attentions */
+struct dbg_attn_block {
+ struct dbg_attn_block_type_data per_type_data[2];
+};
+
+/* Attention register result */
+struct dbg_attn_reg_result {
+ u32 data;
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+ u16 block_attn_offset;
+ u16 reserved;
+ u32 sts_val;
+ u32 mask_val;
+};
+
+/* Attention block result */
+struct dbg_attn_block_result {
+ u8 block_id;
+ u8 data;
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+ u16 names_offset;
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+/* Mode header */
+struct dbg_mode_hdr {
+ u16 data;
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/* Attention register */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode;
+ u16 block_attn_offset;
+ u32 data;
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
+ u32 sts_clr_address;
+ u32 mask_address;
+};
+
+/* Attention types */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+/* Block debug data */
+struct dbg_block {
+ u8 name[15];
+ u8 associated_storm_letter;
+};
+
+/* Chip-specific block debug data */
+struct dbg_block_chip {
+ u8 flags;
+#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
+#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
+#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
+#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
+ u8 dbg_client_id;
+ u8 reset_reg_id;
+ u8 reset_reg_bit_offset;
+ struct dbg_mode_hdr dbg_bus_mode;
+ u16 reserved1;
+ u8 reserved2;
+ u8 num_of_dbg_bus_lines;
+ u16 dbg_bus_lines_offset;
+ u32 dbg_select_reg_addr;
+ u32 dbg_dword_enable_reg_addr;
+ u32 dbg_shift_reg_addr;
+ u32 dbg_force_valid_reg_addr;
+ u32 dbg_force_frame_reg_addr;
+};
+
+/* Chip-specific block user debug data */
+struct dbg_block_chip_user {
+ u8 num_of_dbg_bus_lines;
+ u8 has_latency_events;
+ u16 names_offset;
+};
+
+/* Block user debug data */
+struct dbg_block_user {
+ u8 name[16];
+};
+
+/* Block Debug line data */
+struct dbg_bus_line {
+ u8 data;
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+ u8 group_sizes;
+};
+
+/* Condition header for registers dump */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u8 block_id; /* block ID */
+ u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* Memory data for registers dump */
+struct dbg_dump_mem {
+ u32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ u32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+};
+
+/* Register data for registers dump */
+struct dbg_dump_reg {
+ u32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+/* Split header for registers dump */
+struct dbg_dump_split_hdr {
+ u32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* Condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+ u32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ u16 num_entries;
+ u8 entry_size;
+ u8 start_entry;
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+ u32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ u16 size; /* register size in dwords */
+ struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+ struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+ u16 rule_id; /* Failing rule index */
+ u16 mem_entry_id; /* Failing memory entry index */
+ u8 num_dumped_cond_regs; /* number of dumped condition registers */
+ u8 num_dumped_info_regs; /* number of dumped condition registers */
+ u8 severity; /* from dbg_idle_chk_severity_types enum */
+ u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry; /* index of the first checked entry */
+ u16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+ u16 rule_id; /* Idle Check rule ID */
+ u8 severity; /* value from dbg_idle_chk_severity_types enum */
+ u8 cond_id; /* Condition ID */
+ u8 num_cond_regs; /* number of condition registers */
+ u8 num_info_regs; /* number of info registers */
+ u8 num_imms; /* number of immediates in the condition */
+ u8 reserved1;
+ u16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ u16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+ u32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* Idle check severity types */
+enum dbg_idle_chk_severity_types {
+ /* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+ /* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ /* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Reset register */
+struct dbg_reset_reg {
+ u32 data;
+#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
+#define DBG_RESET_REG_ADDR_SHIFT 0
+#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
+#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
+#define DBG_RESET_REG_RESERVED_MASK 0x7F
+#define DBG_RESET_REG_RESERVED_SHIFT 25
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+ u8 enable_mask;
+ u8 right_shift;
+ u8 force_valid_mask;
+ u8 force_frame_mask;
+ u8 dword_mask;
+ u8 line_num;
+ u8 hw_id;
+ u8 flags;
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
+};
+
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/* Debug Bus constraint operation types */
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ,
+ DBG_BUS_CONSTRAINT_OP_NE,
+ DBG_BUS_CONSTRAINT_OP_LT,
+ DBG_BUS_CONSTRAINT_OP_LTC,
+ DBG_BUS_CONSTRAINT_OP_LE,
+ DBG_BUS_CONSTRAINT_OP_LEC,
+ DBG_BUS_CONSTRAINT_OP_GT,
+ DBG_BUS_CONSTRAINT_OP_GTC,
+ DBG_BUS_CONSTRAINT_OP_GE,
+ DBG_BUS_CONSTRAINT_OP_GEC,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+/* Debug Bus trigger state data */
+struct dbg_bus_trigger_state_data {
+ u8 msg_len;
+ u8 constraint_dword_mask;
+ u8 storm_id;
+ u8 reserved;
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+ u32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+ u8 min; /* Minimal event ID to filter on */
+ u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val; /* Event ID value */
+ u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range;
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+ u8 enabled;
+ u8 mode;
+ u8 hw_id;
+ u8 eid_filter_en;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ u32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+ u32 app_version;
+ u8 state;
+ u8 mode_256b_en;
+ u8 num_enabled_blocks;
+ u8 num_enabled_storms;
+ u8 target;
+ u8 one_shot_en;
+ u8 grc_input_en;
+ u8 timestamp_input_en;
+ u8 filter_en;
+ u8 adding_filter;
+ u8 filter_pre_trigger;
+ u8 filter_post_trigger;
+ u8 trigger_en;
+ u8 filter_constraint_dword_mask;
+ u8 next_trigger_state;
+ u8 next_constraint_id;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_msg_len;
+ u8 rcv_from_other_engine;
+ u8 blocks_dword_mask;
+ u8 blocks_dword_overlap;
+ u32 hw_id_mask;
+ struct dbg_bus_pci_buf_data pci_buf;
+ struct dbg_bus_block_data blocks[132];
+ struct dbg_bus_storm_data storms[6];
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE,
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING,
+ DBG_BUS_STATE_STOPPED,
+ MAX_DBG_BUS_STATES
+};
+
+/* Debug Bus Storm modes */
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF,
+ DBG_BUS_STORM_MODE_PRAM_ADDR,
+ DBG_BUS_STORM_MODE_DRA_RW,
+ DBG_BUS_STORM_MODE_DRA_W,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR,
+ DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_FAST_DBGMUX,
+ DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_RH_WITH_STORE,
+ DBG_BUS_STORM_MODE_FOC,
+ DBG_BUS_STORM_MODE_EXT_STORE,
+ MAX_DBG_BUS_STORM_MODES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+ DBG_BUS_TARGET_ID_INT_BUF,
+ DBG_BUS_TARGET_ID_NIG,
+ DBG_BUS_TARGET_ID_PCI,
+ MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+ u8 params_initialized;
+ u8 reserved1;
+ u16 reserved2;
+ u32 param_val[48];
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM,
+ DBG_GRC_PARAM_DUMP_MSTORM,
+ DBG_GRC_PARAM_DUMP_USTORM,
+ DBG_GRC_PARAM_DUMP_XSTORM,
+ DBG_GRC_PARAM_DUMP_YSTORM,
+ DBG_GRC_PARAM_DUMP_PSTORM,
+ DBG_GRC_PARAM_DUMP_REGS,
+ DBG_GRC_PARAM_DUMP_RAM,
+ DBG_GRC_PARAM_DUMP_PBUF,
+ DBG_GRC_PARAM_DUMP_IOR,
+ DBG_GRC_PARAM_DUMP_VFC,
+ DBG_GRC_PARAM_DUMP_CM_CTX,
+ DBG_GRC_PARAM_DUMP_PXP,
+ DBG_GRC_PARAM_DUMP_RSS,
+ DBG_GRC_PARAM_DUMP_CAU,
+ DBG_GRC_PARAM_DUMP_QM,
+ DBG_GRC_PARAM_DUMP_MCP,
+ DBG_GRC_PARAM_DUMP_DORQ,
+ DBG_GRC_PARAM_DUMP_CFC,
+ DBG_GRC_PARAM_DUMP_IGU,
+ DBG_GRC_PARAM_DUMP_BRB,
+ DBG_GRC_PARAM_DUMP_BTB,
+ DBG_GRC_PARAM_DUMP_BMB,
+ DBG_GRC_PARAM_RESERVD1,
+ DBG_GRC_PARAM_DUMP_MULD,
+ DBG_GRC_PARAM_DUMP_PRS,
+ DBG_GRC_PARAM_DUMP_DMAE,
+ DBG_GRC_PARAM_DUMP_TM,
+ DBG_GRC_PARAM_DUMP_SDM,
+ DBG_GRC_PARAM_DUMP_DIF,
+ DBG_GRC_PARAM_DUMP_STATIC,
+ DBG_GRC_PARAM_UNSTALL,
+ DBG_GRC_PARAM_RESERVED2,
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+ DBG_GRC_PARAM_CRASH,
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM,
+ DBG_GRC_PARAM_DUMP_PHY,
+ DBG_GRC_PARAM_NO_MCP,
+ DBG_GRC_PARAM_NO_FW_VER,
+ DBG_GRC_PARAM_RESERVED3,
+ DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
+ DBG_GRC_PARAM_DUMP_ILT_CDUC,
+ DBG_GRC_PARAM_DUMP_ILT_CDUT,
+ DBG_GRC_PARAM_DUMP_CAU_EXT,
+ MAX_DBG_GRC_PARAMS
+};
+
+/* Debug status codes */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
+ DBG_STATUS_NO_MATCHING_FRAMING_MODE,
+ DBG_STATUS_VFC_READ_ERROR,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_256B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_RESERVED0,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_RESERVED1,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INSUFFICIENT_HW_IDS,
+ DBG_STATUS_DBG_BUS_IN_USE,
+ DBG_STATUS_INVALID_STORM_DBG_MODE,
+ DBG_STATUS_OTHER_ENGINE_BB_ONLY,
+ DBG_STATUS_FILTER_SINGLE_HW_ID,
+ DBG_STATUS_TRIGGER_SINGLE_HW_ID,
+ DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
+ MAX_DBG_STATUS
+};
+
+/* Debug Storms IDs */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+ u32 buf_size;
+ u8 buf_size_set;
+ u8 reserved1;
+ u16 reserved2;
+};
+
+struct pretend_params {
+ u8 split_type;
+ u8 reserved;
+ u16 split_id;
+};
+
+/* Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+ struct dbg_grc_data grc;
+ struct dbg_bus_data bus;
+ struct idle_chk_data idle_chk;
+ u8 mode_enable[40];
+ u8 block_in_reset[132];
+ u8 chip_id;
+ u8 hw_type;
+ u8 num_ports;
+ u8 num_pfs_per_port;
+ u8 num_vfs;
+ u8 initialized;
+ u8 use_dmae;
+ u8 reserved;
+ struct pretend_params pretend;
+ u32 num_regs_read;
+};
+
+/* ILT Clients */
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_RGFS,
+ ILT_CLI_TGFS,
+ MAX_ILT_CLIENTS
+};
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug
+ * arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: A pointer to the binary data with debug arrays.
+ *
+ * Return: enum dbg status.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_read_regs(): Reads registers into a buffer (using GRC).
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf: Destination buffer.
+ * @addr: Source GRC address in dwords.
+ * @len: Number of registers to read.
+ *
+ * Return: Void.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
+ * qed_read_fw_info(): Reads FW info from the chip.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_info: (Out) a pointer to write the FW info into.
+ *
+ * Return: True if the FW info was read successfully from one of the Storms,
+ * or false if all Storms are in reset.
+ *
+ * The FW info contains FW-related information, such as the FW version,
+ * FW image (main/L2B/kuku), FW timestamp, etc.
+ * The FW info is read from the internal RAM of the first Storm that is not in
+ * reset.
+ */
+bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct fw_info *fw_info);
+/**
+ * qed_dbg_grc_config(): Sets the value of a GRC parameter.
+ *
+ * @p_hwfn: HW device data.
+ * @grc_param: GRC parameter.
+ * @val: Value to set.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - Grc_param is invalid.
+ * - Val is outside the allowed boundaries.
+ */
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val);
+
+/**
+ * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their
+ * default value.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
+ * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for
+ * GRC Dump.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the collected GRC data into.
+ * @buf_size_in_dwords:Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified dump buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size
+ * for idle check results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the idle check
+ * data.
+ *
+ * return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_idle_chk_dump: Performs idle check and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the idle check data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size
+ * for mcp trace results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the mcp trace data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * - The trace meta data cannot be read (from NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for grc trace fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the reg fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for the IGU fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the IGU fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * - The specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_protection_override_get_dump_buf_size(): Returns the required
+ * buffer size for protection override window results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for protection
+ * override data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_protection_override_dump(): Reads protection override window
+ * entries and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the protection override data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * @return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer
+ * size for FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the FW Asserts data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_read_attn(): Reads the attention registers of the specified
+ * block and type, and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @block: Block ID.
+ * @attn_type: Attention type.
+ * @clear_status: Indicates if the attention status should be cleared.
+ * @results: (OUT) Pointer to write the read results into.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results);
+
+/**
+ * qed_dbg_print_attn(): Prints attention registers values in the
+ * specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_OFFSET 24
+
+ char *format_str;
+};
+
+/* MCP Trace Meta data structure */
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+ bool is_allocated;
+};
+
+/* Debug Tools user data */
+struct dbg_tools_user_data {
+ struct mcp_trace_meta mcp_trace_meta;
+ const u32 *mcp_trace_user_meta_buf;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_NAME_LEN 16
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with
+ * debug arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: a pointer to the binary data with debug arrays.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_dbg_alloc_user_data(): Allocates user debug data.
+ *
+ * @p_hwfn: HW device data.
+ * @user_data_ptr: (OUT) a pointer to the allocated memory.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr);
+
+/**
+ * qed_dbg_get_status_str(): Returns a string for the specified status.
+ *
+ * @status: A debug status code.
+ *
+ * Return: A string for the specified status.
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+
+/**
+ * qed_get_idle_chk_results_buf_size(): Returns the required buffer size
+ * for idle check results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * qed_print_idle_chk_results(): Prints idle check results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the idle check results.
+ * @num_errors: (OUT) number of errors found in idle check.
+ * @num_warnings: (OUT) number of warnings found in idle check.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings);
+
+/**
+ * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @meta_buf: Meta buffer.
+ *
+ * Return: Void.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ */
+void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf);
+
+/**
+ * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size
+ * for MCP Trace results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP Trace dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Rrror if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_mcp_trace_results(): Prints MCP Trace results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_dwords: Member of dwords that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
+ * keeps the MCP trace meta data allocated, to support continuous MCP Trace
+ * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
+ * be called to free the meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MVP trace dump buffer, starting from the header.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_bytes: Number of bytes that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ u8 *dump_buf,
+ u32 num_dumped_bytes,
+ char *results_buf);
+
+/**
+ * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
+ * Should be called after continuous MCP Trace parsing.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size
+ * for reg_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_reg_fifo_results(): Prints reg fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size
+ * for igu_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_igu_fifo_results(): Prints IGU fifo results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the IGU fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_protection_override_results_buf_size(): Returns the required
+ * buffer size for protection override results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_protection_override_results(): Prints protection override
+ * results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size
+ * for FW Asserts results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_fw_asserts_results(): Prints FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer, starting from the header.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the FW Asserts results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_dbg_parse_attn(): Parses and prints attention registers values in
+ * the specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index e1798925b444..ea839e605577 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -84,16 +84,17 @@ struct qed_dcbx_mib_meta_data {
extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
#ifdef CONFIG_DCB
-int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params);
-int qed_dcbx_config_params(struct qed_hwfn *,
- struct qed_ptt *, struct qed_dcbx_set *, bool);
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit);
#endif
/* QED local interface routines */
int
-qed_dcbx_mib_update_event(struct qed_hwfn *,
- struct qed_ptt *, enum qed_mib_read_type);
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type);
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 6ab3e60d4928..e3edca187ddf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#include <linux/module.h>
@@ -10,6 +10,7 @@
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -121,6 +122,11 @@ static u32 cond0(const u32 *r, const u32 *imm)
return (r[0] & ~r[1]) != imm[0];
}
+static u32 cond14(const u32 *r, const u32 *imm)
+{
+ return (r[0] | imm[0]) != imm[1];
+}
+
static u32 cond1(const u32 *r, const u32 *imm)
{
return r[0] != imm[0];
@@ -172,6 +178,7 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond11,
cond12,
cond13,
+ cond14,
};
#define NUM_PHYS_BLOCKS 84
@@ -208,10 +215,61 @@ enum dbg_bus_frame_modes {
DBG_BUS_NUM_FRAME_MODES
};
+/* Debug bus SEMI frame modes */
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */
+ DBG_BUS_SEMI_NUM_FRAME_MODES
+};
+
+/* Debug bus filter types */
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */
+ DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */
+ DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */
+ DBG_BUS_FILTER_TYPE_ON /* Filter always on */
+};
+
+/* Debug bus pre-trigger recording types */
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */
+ DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */
+};
+
+/* Debug bus post-trigger recording types */
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */
+ DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */
+};
+
+/* Debug bus other engine mode */
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
+};
+
+/* DBG block Framing mode definitions */
+struct framing_mode_defs {
+ u8 id;
+ u8 blocks_dword_mask;
+ u8 storms_dword_mask;
+ u8 semi_framing_mode_id;
+ u8 full_buf_thr;
+};
+
/* Chip constant definitions */
struct chip_defs {
const char *name;
+ u8 dwords_per_cycle;
+ u8 num_framing_modes;
u32 num_ilt_pages;
+ struct framing_mode_defs *framing_modes;
};
/* HW type constant definitions */
@@ -334,7 +392,7 @@ struct split_type_defs {
#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
#define FIELD_DWORD_OFFSET(type, field) \
- (int)(FIELD_BIT_OFFSET(type, field) / 32)
+ ((int)(FIELD_BIT_OFFSET(type, field) / 32))
#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
#define FIELD_BIT_MASK(type, field) \
(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
@@ -431,11 +489,13 @@ struct split_type_defs {
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_COMMON_GLOBAL_PARAMS 9
+#define NUM_COMMON_GLOBAL_PARAMS 11
#define MAX_RECURSION_DEPTH 10
+#define FW_IMG_KUKU 0
#define FW_IMG_MAIN 1
+#define FW_IMG_L2B 2
#define REG_FIFO_ELEMENT_DWORDS 2
#define REG_FIFO_DEPTH_ELEMENTS 32
@@ -464,10 +524,25 @@ struct split_type_defs {
/***************************** Constant Arrays *******************************/
+/* DBG block framing mode definitions, in descending preference order */
+static struct framing_mode_defs s_framing_mode_defs[4] = {
+ {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
+ DBG_BUS_SEMI_FRAME_MODE_4FAST,
+ 10},
+ {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
+ 10},
+ {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
+ {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
+};
+
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
- {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
+ {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
+ s_framing_mode_defs},
+ {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
+ s_framing_mode_defs}
};
/* Storm constant definitions array */
@@ -477,8 +552,8 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
true,
TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+ TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
TCM_REG_CTX_RBC_ACCS,
{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
@@ -491,10 +566,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
false,
MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE_BB_K2,
- MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- MSEM_REG_SLOW_DBG_MODE_BB_K2,
- MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_DBG_FRAME_MODE,
+ MSEM_REG_SLOW_DBG_ACTIVE,
+ MSEM_REG_SLOW_DBG_MODE,
+ MSEM_REG_DBG_MODE1_CFG,
MSEM_REG_SYNC_DBG_EMPTY,
MSEM_REG_DBG_GPRE_VECT,
MCM_REG_CTX_RBC_ACCS,
@@ -508,10 +583,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
false,
USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE_BB_K2,
- USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- USEM_REG_SLOW_DBG_MODE_BB_K2,
- USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_DBG_FRAME_MODE,
+ USEM_REG_SLOW_DBG_ACTIVE,
+ USEM_REG_SLOW_DBG_MODE,
+ USEM_REG_DBG_MODE1_CFG,
USEM_REG_SYNC_DBG_EMPTY,
USEM_REG_DBG_GPRE_VECT,
UCM_REG_CTX_RBC_ACCS,
@@ -525,10 +600,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
false,
XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE_BB_K2,
- XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- XSEM_REG_SLOW_DBG_MODE_BB_K2,
- XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_DBG_FRAME_MODE,
+ XSEM_REG_SLOW_DBG_ACTIVE,
+ XSEM_REG_SLOW_DBG_MODE,
+ XSEM_REG_DBG_MODE1_CFG,
XSEM_REG_SYNC_DBG_EMPTY,
XSEM_REG_DBG_GPRE_VECT,
XCM_REG_CTX_RBC_ACCS,
@@ -541,10 +616,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
false,
YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE_BB_K2,
- YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- YSEM_REG_SLOW_DBG_MODE_BB_K2,
- YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_DBG_FRAME_MODE,
+ YSEM_REG_SLOW_DBG_ACTIVE,
+ YSEM_REG_SLOW_DBG_MODE,
+ YSEM_REG_DBG_MODE1_CFG,
YSEM_REG_SYNC_DBG_EMPTY,
YSEM_REG_DBG_GPRE_VECT,
YCM_REG_CTX_RBC_ACCS,
@@ -558,10 +633,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
true,
PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE_BB_K2,
- PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- PSEM_REG_SLOW_DBG_MODE_BB_K2,
- PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_DBG_FRAME_MODE,
+ PSEM_REG_SLOW_DBG_ACTIVE,
+ PSEM_REG_SLOW_DBG_MODE,
+ PSEM_REG_DBG_MODE1_CFG,
PSEM_REG_SYNC_DBG_EMPTY,
PSEM_REG_DBG_GPRE_VECT,
PCM_REG_CTX_RBC_ACCS,
@@ -575,7 +650,8 @@ static struct hw_type_defs s_hw_type_defs[] = {
{"asic", 1, 256, 32768},
{"reserved", 0, 0, 0},
{"reserved2", 0, 0, 0},
- {"reserved3", 0, 0, 0}
+ {"reserved3", 0, 0, 0},
+ {"reserved4", 0, 0, 0}
};
static struct grc_param_defs s_grc_param_defs[] = {
@@ -772,25 +848,25 @@ static struct rbc_reset_defs s_rbc_reset_defs[] = {
static struct phy_defs s_phy_defs[] = {
{"nw_phy", NWS_REG_NWS_CMU_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
- {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
- {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
- {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
+ {"sgmii_phy", MS_REG_MS_CMU_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
};
static struct split_type_defs s_split_type_defs[] = {
@@ -810,8 +886,17 @@ static struct split_type_defs s_split_type_defs[] = {
{"vf"}
};
+/******************************** Variables **********************************/
+
+/* The version of the calling app */
+static u32 s_app_ver;
+
/**************************** Private Functions ******************************/
+static void qed_static_asserts(void)
+{
+}
+
/* Reads and returns a single dword from the specified unaligned buffer */
static u32 qed_read_unaligned_dword(u8 *buf)
{
@@ -870,6 +955,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
if (dev_data->initialized)
return DBG_STATUS_OK;
+ if (!s_app_ver)
+ return DBG_STATUS_APP_VERSION_NOT_SET;
+
/* Set chip */
if (QED_IS_K2(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_K2;
@@ -990,11 +1078,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
- /* qed_rq() fetches data in CPU byteorder. Swap it back to
- * the device's to get right structure layout.
- */
- cpu_to_le32_array(dest, size);
-
/* Read FW version info from Storm RAM */
size = le32_to_cpu(fw_info_location.size);
if (!size || size > sizeof(*fw_info))
@@ -1006,8 +1089,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
-
- cpu_to_le32_array(dest, size);
}
/* Dumps the specified string to the specified buffer.
@@ -1117,9 +1198,15 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid FW version string\n");
switch (fw_info.ver.image_id) {
+ case FW_IMG_KUKU:
+ strcpy(fw_img_str, "kuku");
+ break;
case FW_IMG_MAIN:
strcpy(fw_img_str, "main");
break;
+ case FW_IMG_L2B:
+ strcpy(fw_img_str, "l2b");
+ break;
default:
strcpy(fw_img_str, "unknown");
break;
@@ -1255,6 +1342,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
s_hw_type_defs[dev_data->hw_type].name);
offset += qed_dump_num_param(dump_buf + offset,
dump, "pci-func", p_hwfn->abs_pf_id);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "epoch", qed_get_epoch_time());
if (dev_data->chip_id == CHIP_BB)
offset += qed_dump_num_param(dump_buf + offset,
dump, "path", QED_PATH_ID(p_hwfn));
@@ -1590,7 +1679,7 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
continue;
reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STALL_0_BB_K2;
+ SEM_FAST_REG_STALL_0;
qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
}
@@ -1703,8 +1792,8 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
const struct dbg_attn_reg *attn_reg_arr;
+ u32 block_id, sts_clr_address;
u8 reg_idx, num_attn_regs;
- u32 block_id;
for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id])
@@ -1728,16 +1817,103 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
+ sts_clr_address = reg_data->sts_clr_address;
/* If Mode match: clear parity status */
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset))
qed_rd(p_hwfn, p_ptt,
- DWORDS_TO_BYTES(reg_data->
- sts_clr_address));
+ DWORDS_TO_BYTES(sts_clr_address));
}
}
}
+/* Finds the meta data image in NVRAM */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ struct mcp_file_att file_att;
+ int nvm_result;
+
+ /* Call NVRAM get file command */
+ nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size,
+ (u32 *)&file_att, false);
+
+ /* Check response */
+ if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Update return values */
+ *nvram_offset_bytes = file_att.nvm_start_addr;
+ *nvram_size_bytes = file_att.len;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+ image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+ /* Check alignment */
+ if (*nvram_size_bytes & 0x3)
+ return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+
+ return DBG_STATUS_OK;
+}
+
+/* Reads data from NVRAM */
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
+ s32 bytes_left = nvram_size_bytes;
+ u32 read_offset = 0, param = 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "nvram_read: reading image of size %d bytes from NVRAM\n",
+ nvram_size_bytes);
+
+ do {
+ bytes_to_copy =
+ (bytes_left >
+ MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+ /* Call NVRAM read command */
+ SET_MFW_FIELD(param,
+ DRV_MB_PARAM_NVM_OFFSET,
+ nvram_offset_bytes + read_offset);
+ SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM, param,
+ &ret_mcp_resp,
+ &ret_mcp_param, &ret_read_size,
+ (u32 *)((u8 *)ret_buf + read_offset),
+ false))
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Update read offset */
+ read_offset += ret_read_size;
+ bytes_left -= ret_read_size;
+ } while (bytes_left > 0);
+
+ return DBG_STATUS_OK;
+}
+
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* the following parameters are dumped:
* - count: no. of dumped entries
@@ -3189,17 +3365,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
return offset;
}
-static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 image_type,
- u32 *nvram_offset_bytes,
- u32 *nvram_size_bytes);
-
-static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 nvram_offset_bytes,
- u32 nvram_size_bytes, u32 *ret_buf);
-
/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3283,10 +3448,6 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
has_dbg_bus = GET_FIELD(block_per_chip->flags,
DBG_BLOCK_CHIP_HAS_DBG_BUS);
- /* read+clear for NWS parity is not working, skip NWS block */
- if (block_id == BLOCK_NWS)
- continue;
-
if (!is_removed && has_dbg_bus &&
GET_FIELD(block_per_chip->dbg_bus_mode.data,
DBG_MODE_HDR_EVAL_MODE) > 0) {
@@ -3375,8 +3536,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 dwords_read, offset = 0;
bool parities_masked = false;
+ u32 dwords_read, offset = 0;
u8 i;
*num_dumped_dwords = 0;
@@ -3545,8 +3706,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
*/
static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 *
- dump_buf,
+ u32 *dump_buf,
bool dump,
u16 rule_id,
const struct dbg_idle_chk_rule *rule,
@@ -3894,91 +4054,6 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
return offset;
}
-/* Finds the meta data image in NVRAM */
-static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 image_type,
- u32 *nvram_offset_bytes,
- u32 *nvram_size_bytes)
-{
- u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
- struct mcp_file_att file_att;
- int nvm_result;
-
- /* Call NVRAM get file command */
- nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
- p_ptt,
- DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type,
- &ret_mcp_resp,
- &ret_mcp_param,
- &ret_txn_size, (u32 *)&file_att);
-
- /* Check response */
- if (nvm_result ||
- (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
- return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
-
- /* Update return values */
- *nvram_offset_bytes = file_att.nvm_start_addr;
- *nvram_size_bytes = file_att.len;
-
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
- image_type, *nvram_offset_bytes, *nvram_size_bytes);
-
- /* Check alignment */
- if (*nvram_size_bytes & 0x3)
- return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
-
- return DBG_STATUS_OK;
-}
-
-/* Reads data from NVRAM */
-static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 nvram_offset_bytes,
- u32 nvram_size_bytes, u32 *ret_buf)
-{
- u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
- s32 bytes_left = nvram_size_bytes;
- u32 read_offset = 0, param = 0;
-
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "nvram_read: reading image of size %d bytes from NVRAM\n",
- nvram_size_bytes);
-
- do {
- bytes_to_copy =
- (bytes_left >
- MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
-
- /* Call NVRAM read command */
- SET_MFW_FIELD(param,
- DRV_MB_PARAM_NVM_OFFSET,
- nvram_offset_bytes + read_offset);
- SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
- if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NVM_READ_NVRAM, param,
- &ret_mcp_resp,
- &ret_mcp_param, &ret_read_size,
- (u32 *)((u8 *)ret_buf + read_offset)))
- return DBG_STATUS_NVRAM_READ_FAILED;
-
- /* Check response */
- if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
- return DBG_STATUS_NVRAM_READ_FAILED;
-
- /* Update read offset */
- read_offset += ret_read_size;
- bytes_left -= ret_read_size;
- } while (bytes_left > 0);
-
- return DBG_STATUS_OK;
-}
-
/* Get info on the MCP Trace data in the scratchpad:
* - trace_data_grc_addr (OUT): trace data GRC address in bytes
* - trace_data_size (OUT): trace data size in bytes (without the header)
@@ -4480,14 +4555,18 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
/* Dumps the specified ILT pages to the specified buffer.
* Returns the dumped size in dwords.
*/
-static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
- bool dump,
- u32 start_page_id,
+static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset,
+ bool *dump, u32 start_page_id,
u32 num_pages,
struct phys_mem_desc *ilt_pages,
- bool dump_page_ids)
+ bool dump_page_ids, u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
{
- u32 page_id, end_page_id, offset = 0;
+ u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
+ u32 page_id, end_page_id, offset = *given_offset;
+ struct phys_mem_desc *mem_desc = NULL;
+ bool continue_dump = *dump;
+ u32 partial_page_size = 0;
if (num_pages == 0)
return offset;
@@ -4495,31 +4574,51 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
end_page_id = start_page_id + num_pages - 1;
for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
- struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
-
- /**
- *
- * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
- * break;
- */
-
+ mem_desc = &ilt_pages[page_id];
if (!ilt_pages[page_id].virt_addr)
continue;
if (dump_page_ids) {
- /* Copy page ID to dump buffer */
- if (dump)
+ /* Copy page ID to dump buffer
+ * (if dump is needed and buffer is not full)
+ */
+ if ((continue_dump) &&
+ (offset + 1 > buf_size_in_dwords)) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ if (continue_dump)
*(dump_buf + offset) = page_id;
offset++;
} else {
/* Copy page memory to dump buffer */
- if (dump)
+ if ((continue_dump) &&
+ (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords)) {
+ if (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords) {
+ partial_page_size =
+ buf_size_in_dwords - offset;
+ memcpy(dump_buf + offset,
+ mem_desc->virt_addr,
+ partial_page_size);
+ continue_dump = false;
+ actual_dump_size_in_dwords =
+ offset + partial_page_size;
+ }
+ }
+
+ if (continue_dump)
memcpy(dump_buf + offset,
mem_desc->virt_addr, mem_desc->size);
offset += BYTES_TO_DWORDS(mem_desc->size);
}
}
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
+
return offset;
}
@@ -4528,21 +4627,30 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
*/
static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
- bool dump,
+ u32 *given_offset,
+ bool *dump,
u32 valid_conn_pf_pages,
u32 valid_conn_vf_pages,
struct phys_mem_desc *ilt_pages,
- bool dump_page_ids)
+ bool dump_page_ids,
+ u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
{
struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
- u32 pf_start_line, start_page_id, offset = 0;
+ u32 pf_start_line, start_page_id, offset = *given_offset;
u32 cdut_pf_init_pages, cdut_vf_init_pages;
u32 cdut_pf_work_pages, cdut_vf_work_pages;
u32 base_data_offset, size_param_offset;
+ u32 src_pages;
+ u32 section_header_and_param_size;
u32 cdut_pf_pages, cdut_vf_pages;
+ u32 actual_dump_size_in_dwords;
+ bool continue_dump = *dump;
+ bool update_size = *dump;
const char *section_name;
- u8 i;
+ u32 i;
+ actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
@@ -4551,13 +4659,26 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
+ section_header_and_param_size = qed_dump_section_hdr(NULL,
+ false,
+ section_name,
+ 1) +
+ qed_dump_num_param(NULL, false, "size", 0);
+
+ if ((continue_dump) &&
+ (offset + section_header_and_param_size > buf_size_in_dwords)) {
+ continue_dump = false;
+ update_size = false;
+ actual_dump_size_in_dwords = offset;
+ }
- offset +=
- qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ continue_dump, section_name, 1);
/* Dump size parameter (0 for now, overwritten with real size later) */
size_param_offset = offset;
- offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+ offset += qed_dump_num_param(dump_buf + offset,
+ continue_dump, "size", 0);
base_data_offset = offset;
/* CDUC pages are ordered as follows:
@@ -4570,22 +4691,22 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
/* Dump connection PF pages */
start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- valid_conn_pf_pages,
- ilt_pages, dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, valid_conn_pf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump connection VF pages */
start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- valid_conn_vf_pages,
- ilt_pages,
- dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ valid_conn_vf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
}
/* CDUT pages are ordered as follows:
@@ -4599,63 +4720,84 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
/* Dump task PF pages */
start_page_id = clients[ILT_CLI_CDUT].first.val +
cdut_pf_init_pages - pf_start_line;
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- cdut_pf_work_pages,
- ilt_pages, dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, cdut_pf_work_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump task VF pages */
start_page_id = clients[ILT_CLI_CDUT].first.val +
cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
i++, start_page_id += cdut_vf_pages)
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- cdut_vf_work_pages,
- ilt_pages,
- dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ cdut_vf_work_pages, ilt_pages,
+ dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+ }
+
+ /*Dump Searcher pages */
+ if (clients[ILT_CLI_SRC].active) {
+ start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line;
+ src_pages = clients[ILT_CLI_SRC].last.val -
+ clients[ILT_CLI_SRC].first.val + 1;
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, src_pages, ilt_pages,
+ dump_page_ids, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
}
/* Overwrite size param */
- if (dump)
- qed_dump_num_param(dump_buf + size_param_offset,
- dump, "size", offset - base_data_offset);
+ if (update_size) {
+ u32 section_size = (*dump == continue_dump) ?
+ offset - base_data_offset :
+ actual_dump_size_in_dwords - base_data_offset;
+ if (section_size > 0)
+ qed_dump_num_param(dump_buf + size_param_offset,
+ *dump, "size", section_size);
+ else if ((section_size == 0) && (*dump != continue_dump))
+ actual_dump_size_in_dwords -=
+ section_header_and_param_size;
+ }
+
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
return offset;
}
-/* Performs ILT Dump to the specified buffer.
+/* Dumps a section containing the global parameters.
+ * Part of ilt dump process
* Returns the dumped size in dwords.
*/
-static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+static u32
+qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 cduc_page_size,
+ u32 conn_ctx_size,
+ u32 cdut_page_size,
+ u32 *full_dump_size_param_offset,
+ u32 *actual_dump_size_param_offset)
{
struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
- u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
- u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
- u32 num_cids_per_page, conn_ctx_size;
- u32 cduc_page_size, cdut_page_size;
- struct phys_mem_desc *ilt_pages;
- u8 conn_type;
-
- cduc_page_size = 1 <<
- (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
- cdut_page_size = 1 <<
- (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
- conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
- num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
- ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 offset = 0;
- /* Dump global params - 22 must match number of params below */
offset += qed_dump_common_global_params(p_hwfn, p_ptt,
- dump_buf + offset, dump, 22);
+ dump_buf + offset,
+ dump, 30);
offset += qed_dump_str_param(dump_buf + offset,
- dump, "dump-type", "ilt-dump");
+ dump,
+ "dump-type", "ilt-dump");
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "cduc-page-size", cduc_page_size);
+ "cduc-page-size",
+ cduc_page_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-first-page-id",
@@ -4667,20 +4809,19 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-num-pf-pages",
- clients
- [ILT_CLI_CDUC].pf_total_lines);
+ clients[ILT_CLI_CDUC].pf_total_lines);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-num-vf-pages",
- clients
- [ILT_CLI_CDUC].vf_total_lines);
+ clients[ILT_CLI_CDUC].vf_total_lines);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"max-conn-ctx-size",
conn_ctx_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "cdut-page-size", cdut_page_size);
+ "cdut-page-size",
+ cdut_page_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cdut-first-page-id",
@@ -4711,19 +4852,16 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
p_hwfn->p_cxt_mngr->task_ctx_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "task-type-id",
- p_hwfn->p_cxt_mngr->task_type_id);
- offset += qed_dump_num_param(dump_buf + offset,
- dump,
"first-vf-id-in-pf",
p_hwfn->p_cxt_mngr->first_vf_in_pf);
- offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
- dump,
- "num-vfs-in-pf",
- p_hwfn->p_cxt_mngr->vf_count);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "ptr-size-bytes", sizeof(void *));
+ "num-vfs-in-pf",
+ p_hwfn->p_cxt_mngr->vf_count);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ptr-size-bytes",
+ sizeof(void *));
offset += qed_dump_num_param(dump_buf + offset,
dump,
"pf-start-line",
@@ -4736,58 +4874,281 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
dump,
"ilt-shadow-size",
p_hwfn->p_cxt_mngr->ilt_shadow_size);
+
+ *full_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "dump-size-full", 0);
+
+ *actual_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "dump-size-actual", 0);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "iscsi_task_pages",
+ p_hwfn->p_cxt_mngr->iscsi_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "fcoe_task_pages",
+ p_hwfn->p_cxt_mngr->fcoe_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "roce_task_pages",
+ p_hwfn->p_cxt_mngr->roce_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "eth_task_pages",
+ p_hwfn->p_cxt_mngr->eth_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-first-page-id",
+ clients[ILT_CLI_SRC].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-last-page-id",
+ clients[ILT_CLI_SRC].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-is-active",
+ clients[ILT_CLI_SRC].active);
+
/* Additional/Less parameters require matching of number in call to
* dump_common_global_params()
*/
- /* Dump section containing number of PF CIDs per connection type */
+ return offset;
+}
+
+/* Dump section containing number of PF CIDs per connection type.
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_pf_cids)
+{
+ u32 num_pf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "num_pf_cids_per_conn_type", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump, "size", NUM_OF_CONNECTION_TYPES_E4);
- for (conn_type = 0, valid_conn_pf_cids = 0;
- conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
- u32 num_pf_cids =
- p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
-
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_pf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
if (dump)
*(dump_buf + offset) = num_pf_cids;
- valid_conn_pf_cids += num_pf_cids;
+ *valid_conn_pf_cids += num_pf_cids;
}
- /* Dump section containing number of VF CIDs per connection type */
- offset += qed_dump_section_hdr(dump_buf + offset,
- dump, "num_vf_cids_per_conn_type", 1);
+ return offset;
+}
+
+/* Dump section containing number of VF CIDs per connection type
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_vf_cids)
+{
+ u32 num_vf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
+ offset += qed_dump_section_hdr(dump_buf + offset, dump,
+ "num_vf_cids_per_conn_type", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump, "size", NUM_OF_CONNECTION_TYPES_E4);
- for (conn_type = 0, valid_conn_vf_cids = 0;
- conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
- u32 num_vf_cids =
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_vf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_vf_cids =
p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
-
if (dump)
*(dump_buf + offset) = num_vf_cids;
- valid_conn_vf_cids += num_vf_cids;
+ *valid_conn_vf_cids += num_vf_cids;
+ }
+
+ return offset;
+}
+
+/* Performs ILT Dump to the specified buffer.
+ * buf_size_in_dwords - The dumped buffer size.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, u32 buf_size_in_dwords, bool dump)
+{
+#if ((!defined VMWARE) && (!defined UEFI))
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+#endif
+ u32 valid_conn_vf_cids = 0,
+ valid_conn_vf_pages, offset = 0, real_dumped_size = 0;
+ u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages;
+ u32 num_cids_per_page, conn_ctx_size;
+ u32 cduc_page_size, cdut_page_size;
+ u32 actual_dump_size_in_dwords = 0;
+ struct phys_mem_desc *ilt_pages;
+ u32 actul_dump_off = 0;
+ u32 last_section_size;
+ u32 full_dump_off = 0;
+ u32 section_size = 0;
+ bool continue_dump;
+ u32 page_id;
+
+ last_section_size = qed_dump_last_section(NULL, 0, false);
+ cduc_page_size = 1 <<
+ (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ cdut_page_size = 1 <<
+ (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
+ num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
+ ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+ continue_dump = dump;
+
+ /* if need to dump then save memory for the last section
+ * (last section calculates CRC of dumped data)
+ */
+ if (dump) {
+ if (buf_size_in_dwords >= last_section_size) {
+ buf_size_in_dwords -= last_section_size;
+ } else {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
}
- /* Dump section containing physical memory descs for each ILT page */
+ /* Dump global params */
+
+ /* if need to dump then first check that there is enough memory
+ * in dumped buffer for this section calculate the size of this
+ * section without dumping. if there is not enough memory - then
+ * stop the dumping.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ NULL,
+ false,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ continue_dump,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+
+ /* Dump section containing number of PF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_pf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_pf_cids);
+
+ /* Dump section containing number of VF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_vf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_vf_cids);
+
+ /* Dump section containing physical memory descriptors for each
+ * ILT page.
+ */
num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
+
+ /* If need to dump then first check that there is enough memory
+ * in dumped buffer for the section header.
+ */
+ if (continue_dump) {
+ section_size = qed_dump_section_hdr(NULL,
+ false,
+ "ilt_page_desc",
+ 1) +
+ qed_dump_num_param(NULL,
+ false,
+ "size",
+ num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
offset += qed_dump_section_hdr(dump_buf + offset,
- dump, "ilt_page_desc", 1);
+ continue_dump, "ilt_page_desc", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump,
+ continue_dump,
"size",
num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
- /* Copy memory descriptors to dump buffer */
- if (dump) {
- u32 page_id;
-
+ /* Copy memory descriptors to dump buffer
+ * If need to dump then dump till the dump buffer size
+ */
+ if (continue_dump) {
for (page_id = 0; page_id < num_pages;
- page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
- memcpy(dump_buf + offset,
- &ilt_pages[page_id],
- DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
+ page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) {
+ if (continue_dump &&
+ (offset + PAGE_MEM_DESC_SIZE_DWORDS <=
+ buf_size_in_dwords)) {
+ memcpy(dump_buf + offset,
+ &ilt_pages[page_id],
+ DWORDS_TO_BYTES
+ (PAGE_MEM_DESC_SIZE_DWORDS));
+ } else {
+ if (continue_dump) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+ }
} else {
offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
}
@@ -4798,25 +5159,31 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
num_cids_per_page);
/* Dump ILT pages IDs */
- offset += qed_ilt_dump_pages_section(p_hwfn,
- dump_buf + offset,
- dump,
- valid_conn_pf_pages,
- valid_conn_vf_pages,
- ilt_pages, true);
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, true, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump ILT pages memory */
- offset += qed_ilt_dump_pages_section(p_hwfn,
- dump_buf + offset,
- dump,
- valid_conn_pf_pages,
- valid_conn_vf_pages,
- ilt_pages, false);
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, false, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+
+ real_dumped_size =
+ (continue_dump == dump) ? offset : actual_dump_size_in_dwords;
+ qed_dump_num_param(dump_buf + full_dump_off, dump,
+ "full-dump-size", offset + last_section_size);
+ qed_dump_num_param(dump_buf + actul_dump_off,
+ dump,
+ "actual-dump-size",
+ real_dumped_size + last_section_size);
/* Dump last section */
- offset += qed_dump_last_section(dump_buf, offset, dump);
+ real_dumped_size += qed_dump_last_section(dump_buf,
+ real_dumped_size, dump);
- return offset;
+ return real_dumped_size;
}
/***************************** Public Functions *******************************/
@@ -4837,6 +5204,16 @@ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
+static enum dbg_status qed_dbg_set_app_ver(u32 ver)
+{
+ if (ver < TOOLS_VERSION)
+ return DBG_STATUS_UNSUPPORTED_APP_VERSION;
+
+ s_app_ver = ver;
+
+ return DBG_STATUS_OK;
+}
+
bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct fw_info *fw_info)
{
@@ -4975,6 +5352,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_static_asserts();
+
/* GRC Dump */
status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
@@ -5296,7 +5676,7 @@ static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
if (status != DBG_STATUS_OK)
return status;
- *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
+ *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false);
return DBG_STATUS_OK;
}
@@ -5307,21 +5687,9 @@ static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
- enum dbg_status status;
-
- *num_dumped_dwords = 0;
-
- status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
- p_ptt,
- &needed_buf_size_in_dwords);
- if (status != DBG_STATUS_OK)
- return status;
-
- if (buf_size_in_dwords < needed_buf_size_in_dwords)
- return DBG_STATUS_DUMP_BUF_TOO_SMALL;
-
- *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
+ *num_dumped_dwords = qed_ilt_dump(p_hwfn,
+ p_ptt,
+ dump_buf, buf_size_in_dwords, true);
/* Reveret GRC params to their default */
qed_dbg_grc_set_params_default(p_hwfn);
@@ -5724,7 +6092,46 @@ static const char * const s_status_str[] = {
"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
- "When triggering on Storm data, the Storm to trigger on must be specified"
+ "When triggering on Storm data, the Storm to trigger on must be specified",
+
+ /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */
+ "Failed to request MDUMP2 Offsize",
+
+ /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */
+ "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data",
+
+ /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */
+ "Invalid Signature found at start of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */
+ "Invalid Log Size of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */
+ "Invalid Log Header of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */
+ "Invalid Log Data of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */
+ "Could not extract number of ports from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */
+ "Could not extract MFW (link) status from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */
+ "Could not display linkdump of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */
+ "Could not read PHY CFG of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */
+ "Could not read PLL Mode of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */
+ "Could not read TSCF/TSCE Lane Regs of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */
+ "Could not allocate MDUMP2 reg-val internal buffer"
};
/* Idle check severity names array */
@@ -5874,6 +6281,10 @@ static char s_temp_buf[MAX_MSG_LEN];
/**************************** Private Functions ******************************/
+static void qed_user_static_asserts(void)
+{
+}
+
static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
{
return (a + b) % size;
@@ -6153,9 +6564,8 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
/* Skip register names until the required reg_id is
* reached.
*/
- for (; reg_id > curr_reg_id;
- curr_reg_id++,
- parsing_str += strlen(parsing_str) + 1);
+ for (; reg_id > curr_reg_id; curr_reg_id++)
+ parsing_str += strlen(parsing_str) + 1;
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
@@ -6208,9 +6618,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
u32 *num_errors,
u32 *num_warnings)
{
+ u32 num_section_params = 0, num_rules, num_rules_not_dumped;
const char *section_name, *param_name, *param_str_val;
u32 *dump_buf_end = dump_buf + num_dumped_dwords;
- u32 num_section_params = 0, num_rules;
/* Offset in results_buf in bytes */
u32 results_offset = 0;
@@ -6234,15 +6644,31 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
num_section_params,
results_buf, &results_offset);
- /* Read idle_chk section */
+ /* Read idle_chk section
+ * There may be 1 or 2 idle_chk section parameters:
+ * - 1st is "num_rules"
+ * - 2nd is "num_rules_not_dumped" (optional)
+ */
+
dump_buf += qed_read_section_hdr(dump_buf,
&section_name, &num_section_params);
- if (strcmp(section_name, "idle_chk") || num_section_params != 1)
+ if (strcmp(section_name, "idle_chk") ||
+ (num_section_params != 2 && num_section_params != 1))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
dump_buf += qed_read_param(dump_buf,
&param_name, &param_str_val, &num_rules);
if (strcmp(param_name, "num_rules"))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ if (num_section_params > 1) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &num_rules_not_dumped);
+ if (strcmp(param_name, "num_rules_not_dumped"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ } else {
+ num_rules_not_dumped = 0;
+ }
if (num_rules) {
u32 rules_print_size;
@@ -6309,6 +6735,13 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"\nIdle Check completed successfully\n");
+ if (num_rules_not_dumped)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n",
+ num_rules_not_dumped);
+
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
@@ -7160,6 +7593,9 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_user_static_asserts();
+
return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf,
results_buf, &parsed_buf_size, true);
@@ -7336,7 +7772,7 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
reg_result->block_attn_offset;
/* Go over attention status bits */
- for (j = 0; j < num_reg_attn; j++, bit_idx++) {
+ for (j = 0; j < num_reg_attn; j++) {
u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
@@ -7353,35 +7789,36 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Check current bit index */
- if (!(reg_result->sts_val & BIT(bit_idx)))
- continue;
+ if (reg_result->sts_val & BIT(bit_idx)) {
+ /* An attention bit with value=1 was found
+ * Find attention name
+ */
+ attn_name_offset =
+ block_attn_name_offsets[attn_idx_val];
+ attn_name = attn_name_base + attn_name_offset;
+ attn_type_str =
+ (attn_type ==
+ ATTN_TYPE_INTERRUPT ? "Interrupt" :
+ "Parity");
+ masked_str = reg_result->mask_val &
+ BIT(bit_idx) ?
+ " [masked]" : "";
+ sts_addr =
+ GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS);
+ DP_NOTICE(p_hwfn,
+ "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
+ block_name, attn_type_str, attn_name,
+ sts_addr * 4, bit_idx, masked_str);
+ }
- /* An attention bit with value=1 was found
- * Find attention name
- */
- attn_name_offset =
- block_attn_name_offsets[attn_idx_val];
- attn_name = attn_name_base + attn_name_offset;
- attn_type_str =
- (attn_type ==
- ATTN_TYPE_INTERRUPT ? "Interrupt" :
- "Parity");
- masked_str = reg_result->mask_val & BIT(bit_idx) ?
- " [masked]" : "";
- sts_addr = GET_FIELD(reg_result->data,
- DBG_ATTN_REG_RESULT_STS_ADDRESS);
- DP_NOTICE(p_hwfn,
- "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
- block_name, attn_type_str, attn_name,
- sts_addr * 4, bit_idx, masked_str);
+ bit_idx++;
}
}
return DBG_STATUS_OK;
}
-static DEFINE_MUTEX(qed_dbg_lock);
-
/* Wrapper for unifying the idle_chk and mcp_trace api */
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
@@ -7396,9 +7833,26 @@ qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
&num_warnnings);
}
+static DEFINE_MUTEX(qed_dbg_lock);
+
+#define MAX_PHY_RESULT_BUFFER 9000
+
+/******************************** Feature Meta data section ******************/
+
+#define GRC_NUM_STR_FUNCS 2
+#define IDLE_CHK_NUM_STR_FUNCS 1
+#define MCP_TRACE_NUM_STR_FUNCS 1
+#define REG_FIFO_NUM_STR_FUNCS 1
+#define IGU_FIFO_NUM_STR_FUNCS 1
+#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1
+#define FW_ASSERTS_NUM_STR_FUNCS 1
+#define ILT_NUM_STR_FUNCS 1
+#define PHY_NUM_STR_FUNCS 20
+
/* Feature meta data lookup table */
static struct {
char *name;
+ u32 num_funcs;
enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *size);
enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
@@ -7411,40 +7865,46 @@ static struct {
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+ const struct qed_func_lookup *hsi_func_lookup;
} qed_features_lookup[] = {
{
- "grc", qed_dbg_grc_get_dump_buf_size,
- qed_dbg_grc_dump, NULL, NULL}, {
- "idle_chk",
+ "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size,
+ qed_dbg_grc_dump, NULL, NULL, NULL}, {
+ "idle_chk", IDLE_CHK_NUM_STR_FUNCS,
qed_dbg_idle_chk_get_dump_buf_size,
qed_dbg_idle_chk_dump,
qed_print_idle_chk_results_wrapper,
- qed_get_idle_chk_results_buf_size}, {
- "mcp_trace",
+ qed_get_idle_chk_results_buf_size,
+ NULL}, {
+ "mcp_trace", MCP_TRACE_NUM_STR_FUNCS,
qed_dbg_mcp_trace_get_dump_buf_size,
qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
- qed_get_mcp_trace_results_buf_size}, {
- "reg_fifo",
+ qed_get_mcp_trace_results_buf_size,
+ NULL}, {
+ "reg_fifo", REG_FIFO_NUM_STR_FUNCS,
qed_dbg_reg_fifo_get_dump_buf_size,
qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
- qed_get_reg_fifo_results_buf_size}, {
- "igu_fifo",
+ qed_get_reg_fifo_results_buf_size,
+ NULL}, {
+ "igu_fifo", IGU_FIFO_NUM_STR_FUNCS,
qed_dbg_igu_fifo_get_dump_buf_size,
qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
- qed_get_igu_fifo_results_buf_size}, {
- "protection_override",
+ qed_get_igu_fifo_results_buf_size,
+ NULL}, {
+ "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS,
qed_dbg_protection_override_get_dump_buf_size,
qed_dbg_protection_override_dump,
qed_print_protection_override_results,
- qed_get_protection_override_results_buf_size}, {
- "fw_asserts",
+ qed_get_protection_override_results_buf_size,
+ NULL}, {
+ "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS,
qed_dbg_fw_asserts_get_dump_buf_size,
qed_dbg_fw_asserts_dump,
qed_print_fw_asserts_results,
- qed_get_fw_asserts_results_buf_size}, {
- "ilt",
- qed_dbg_ilt_get_dump_buf_size,
- qed_dbg_ilt_dump, NULL, NULL},};
+ qed_get_fw_asserts_results_buf_size,
+ NULL}, {
+ "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size,
+ qed_dbg_ilt_dump, NULL, NULL, NULL},};
static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
{
@@ -7466,7 +7926,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_features[feature_idx];
- u32 text_size_bytes, null_char_pos, i;
+ u32 txt_size_bytes, null_char_pos, i;
+ u32 *dbuf, dwords;
enum dbg_status rc;
char *text_buf;
@@ -7474,33 +7935,43 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
if (!qed_features_lookup[feature_idx].results_buf_size)
return DBG_STATUS_OK;
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = feature->dumped_dwords;
+
/* Obtain size of formatted output */
- rc = qed_features_lookup[feature_idx].
- results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
- feature->dumped_dwords, &text_size_bytes);
+ rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
+ dbuf,
+ dwords,
+ &txt_size_bytes);
if (rc != DBG_STATUS_OK)
return rc;
- /* Make sure that the allocated size is a multiple of dword (4 bytes) */
- null_char_pos = text_size_bytes - 1;
- text_size_bytes = (text_size_bytes + 3) & ~0x3;
+ /* Make sure that the allocated size is a multiple of dword
+ * (4 bytes).
+ */
+ null_char_pos = txt_size_bytes - 1;
+ txt_size_bytes = (txt_size_bytes + 3) & ~0x3;
- if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+ if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
DP_NOTICE(p_hwfn->cdev,
"formatted size of feature was too small %d. Aborting\n",
- text_size_bytes);
+ txt_size_bytes);
return DBG_STATUS_INVALID_ARGS;
}
- /* Allocate temp text buf */
- text_buf = vzalloc(text_size_bytes);
- if (!text_buf)
+ /* allocate temp text buf */
+ text_buf = vzalloc(txt_size_bytes);
+ if (!text_buf) {
+ DP_NOTICE(p_hwfn->cdev,
+ "failed to allocate text buffer. Aborting\n");
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
/* Decode feature opcodes to string on temp buf */
- rc = qed_features_lookup[feature_idx].
- print_results(p_hwfn, (u32 *)feature->dump_buf,
- feature->dumped_dwords, text_buf);
+ rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
+ dbuf,
+ dwords,
+ text_buf);
if (rc != DBG_STATUS_OK) {
vfree(text_buf);
return rc;
@@ -7510,26 +7981,27 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
* The bytes that were added as a result of the dword alignment are also
* padded with '\n' characters.
*/
- for (i = null_char_pos; i < text_size_bytes; i++)
+ for (i = null_char_pos; i < txt_size_bytes; i++)
text_buf[i] = '\n';
/* Dump printable feature to log */
if (p_hwfn->cdev->print_dbg_data)
- qed_dbg_print_feature(text_buf, text_size_bytes);
+ qed_dbg_print_feature(text_buf, txt_size_bytes);
- /* Just return the original binary buffer if requested */
+ /* Dump binary data as is to the output file */
if (p_hwfn->cdev->dbg_bin_dump) {
vfree(text_buf);
- return DBG_STATUS_OK;
+ return rc;
}
- /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ /* Free the old dump_buf and point the dump_buf to the newly allocated
* and formatted text buffer.
*/
vfree(feature->dump_buf);
feature->dump_buf = text_buf;
- feature->buf_size = text_size_bytes;
- feature->dumped_dwords = text_size_bytes / 4;
+ feature->buf_size = txt_size_bytes;
+ feature->dumped_dwords = txt_size_bytes / 4;
+
return rc;
}
@@ -7542,7 +8014,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_features[feature_idx];
- u32 buf_size_dwords;
+ u32 buf_size_dwords, *dbuf, *dwords;
enum dbg_status rc;
DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
@@ -7580,13 +8052,16 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
if (!feature->dump_buf)
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
- rc = qed_features_lookup[feature_idx].
- perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
- feature->buf_size / sizeof(u32),
- &feature->dumped_dwords);
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = &feature->dumped_dwords;
+ rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
+ dbuf,
+ feature->buf_size /
+ sizeof(u32),
+ dwords);
/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
- * In this case the buffer holds valid binary data, but we wont able
+ * In this case the buffer holds valid binary data, but we won't able
* to parse it (since parsing relies on data in NVRAM which is only
* accessible when MFW is responsive). skip the formatting but return
* success so that binary data is provided.
@@ -7777,7 +8252,8 @@ enum debug_print_features {
static u32 qed_calc_regdump_header(struct qed_dev *cdev,
enum debug_print_features feature,
- int engine, u32 feature_size, u8 omit_engine)
+ int engine, u32 feature_size,
+ u8 omit_engine, u8 dbg_bin_dump)
{
u32 res = 0;
@@ -7788,7 +8264,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
feature, feature_size);
SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
- SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
+ SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump);
SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
@@ -7798,12 +8274,10 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- int grc_params[MAX_DBG_GRC_PARAMS], i;
+ int grc_params[MAX_DBG_GRC_PARAMS], rc, i;
u32 offset = 0, feature_size;
- int rc;
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
grc_params[i] = dev_data->grc.param_val[i];
@@ -7811,8 +8285,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!QED_IS_CMT(cdev))
omit_engine = 1;
+ cdev->dbg_bin_dump = 1;
mutex_lock(&qed_dbg_lock);
- cdev->dbg_bin_dump = true;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7826,8 +8300,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
@@ -7838,8 +8315,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
@@ -7850,8 +8330,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, REG_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
@@ -7862,8 +8345,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IGU_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
@@ -7875,9 +8361,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
+ qed_calc_regdump_header(cdev,
+ PROTECTION_OVERRIDE,
cur_engine,
- feature_size, omit_engine);
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev,
@@ -7891,8 +8380,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, FW_ASSERTS,
- cur_engine, feature_size,
- omit_engine);
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
@@ -7900,8 +8391,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
}
feature_size = qed_dbg_ilt_size(cdev);
- if (!cdev->disable_ilt_dump &&
- feature_size < ILT_DUMP_MAX_SIZE) {
+ if (!cdev->disable_ilt_dump && feature_size <
+ ILT_DUMP_MAX_SIZE) {
rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
@@ -7909,15 +8400,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
qed_calc_regdump_header(cdev, ILT_DUMP,
cur_engine,
feature_size,
- omit_engine);
- offset += feature_size + REGDUMP_HEADER_SIZE;
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
rc);
}
}
- /* GRC dump - must be last because when mcp stuck it will
+ /* Grc dump - must be last because when mcp stuck it will
* clutter idle_chk, reg_fifo, ...
*/
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
@@ -7929,7 +8421,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, GRC_DUMP,
cur_engine,
- feature_size, omit_engine);
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
@@ -7944,16 +8438,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
- feature_size, omit_engine);
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
}
- /* Re-populate nvm attribute info */
- qed_mcp_nvm_info_free(p_hwfn);
- qed_mcp_nvm_info_populate(p_hwfn);
-
/* nvm cfg1 */
rc = qed_dbg_nvm_image(cdev,
(u8 *)buffer + offset +
@@ -7962,43 +8453,51 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
- feature_size, omit_engine);
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
+ QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1",
+ rc);
}
- /* nvm default */
+ /* nvm default */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_DEFAULT_CFG);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, DEFAULT_CFG,
+ cur_engine, feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
- rc);
+ QED_NVM_IMAGE_DEFAULT_CFG,
+ "QED_NVM_IMAGE_DEFAULT_CFG", rc);
}
/* nvm meta */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_NVM_META);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_NVM_META);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, NVM_META, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, NVM_META, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
+ QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META",
+ rc);
}
/* nvm mdump */
@@ -8007,8 +8506,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_MDUMP);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, MDUMP, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, MDUMP, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
@@ -8016,17 +8516,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
}
- cdev->dbg_bin_dump = false;
mutex_unlock(&qed_dbg_lock);
+ cdev->dbg_bin_dump = 0;
return 0;
}
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
u8 cur_engine, org_engine;
cdev->disable_ilt_dump = false;
@@ -8037,14 +8536,13 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
"calculating idle_chk and grcdump register length for current engine\n");
qed_set_debug_engine(cdev, cur_engine);
regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
- REGDUMP_HEADER_SIZE +
- qed_dbg_protection_override_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
-
+ REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE +
+ qed_dbg_protection_override_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
if (ilt_len < ILT_DUMP_MAX_SIZE) {
total_ilt_len += ilt_len;
@@ -8055,7 +8553,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
qed_set_debug_engine(cdev, org_engine);
/* Engine common */
- regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev);
qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
if (image_len)
regs_len += REGDUMP_HEADER_SIZE + image_len;
@@ -8083,10 +8582,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
enum qed_dbg_features feature, u32 *num_dumped_bytes)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
- struct qed_dbg_feature *qed_feature =
- &cdev->dbg_features[feature];
+ struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
enum dbg_status dbg_rc;
struct qed_ptt *p_ptt;
int rc = 0;
@@ -8119,9 +8616,8 @@ out:
int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 buf_size_dwords;
enum dbg_status rc;
@@ -8143,6 +8639,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
return qed_feature->buf_size;
}
+int qed_dbg_phy_size(struct qed_dev *cdev)
+{
+ /* return max size of phy info and
+ * phy mac_stat multiplied by the number of ports
+ */
+ return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev));
+}
+
u8 qed_get_debug_engine(struct qed_dev *cdev)
{
return cdev->engine_for_debug;
@@ -8160,6 +8664,9 @@ void qed_dbg_pf_init(struct qed_dev *cdev)
const u8 *dbg_values = NULL;
int i;
+ /* Sync ver with debugbus qed code */
+ qed_dbg_set_app_ver(TOOLS_VERSION);
+
/* Debug values are after init values.
* The offset is the first dword of the file.
*/
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index e71af82d3200..b0d4b937cf4a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
-#ifndef _QED_DEBUGFS_H
-#define _QED_DEBUGFS_H
+#ifndef _QED_DEBUG_H
+#define _QED_DEBUG_H
enum qed_dbg_features {
DBG_FEATURE_GRC,
@@ -45,6 +45,7 @@ int qed_dbg_ilt_size(struct qed_dev *cdev);
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_phy_size(struct qed_dev *cdev);
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
int qed_dbg_all_data_size(struct qed_dev *cdev);
u8 qed_get_debug_engine(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 0410c3604abd..18f3bf7c4dfe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -25,6 +25,7 @@
#include "qed_dev_api.h"
#include "qed_fcoe.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -951,7 +952,7 @@ qed_llh_remove_filter(struct qed_hwfn *p_hwfn,
}
int qed_llh_add_mac_filter(struct qed_dev *cdev,
- u8 ppfid, u8 mac_addr[ETH_ALEN])
+ u8 ppfid, const u8 mac_addr[ETH_ALEN])
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1396,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev)
qed_rdma_info_free(p_hwfn);
}
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
- qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
+ qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
/* Destroy doorbell recovery mechanism */
qed_db_recovery_teardown(p_hwfn);
@@ -1483,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
/* num RLs can't exceed resource amount of rls or vports */
- num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
- RESC_NUM(p_hwfn, QED_VPORT));
+ num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+ RESC_NUM(p_hwfn, QED_VPORT));
/* Make sure after we reserve there's something left */
if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
@@ -1532,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
bool four_port;
/* pq and vport bases for this PF */
- qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
- qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
/* rate limiting and weighted fair queueing are always enabled */
qm_info->vport_rl_en = true;
@@ -1628,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
*/
/* flags for pq init */
-#define PQ_INIT_SHARE_VPORT (1 << 0)
-#define PQ_INIT_PF_RL (1 << 1)
-#define PQ_INIT_VF_RL (1 << 2)
+#define PQ_INIT_SHARE_VPORT BIT(0)
+#define PQ_INIT_PF_RL BIT(1)
+#define PQ_INIT_VF_RL BIT(2)
/* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP 1
@@ -2290,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_no_mem;
}
- rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ rc = qed_eq_alloc(p_hwfn, (u16)n_eqes);
if (rc)
goto alloc_err;
@@ -2375,6 +2377,49 @@ alloc_err:
return rc;
}
+static int qed_fw_err_handler(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data, u8 fw_return_code)
+{
+ if (fw_return_code != COMMON_ERR_CODE_ERROR)
+ goto eqe_unexpected;
+
+ if (data->err_data.recovery_scope == ERR_SCOPE_FUNC &&
+ le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) {
+ qed_sriov_vfpf_malicious(p_hwfn, &data->err_data);
+ return 0;
+ }
+
+eqe_unexpected:
+ DP_ERR(p_hwfn,
+ "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n",
+ opcode, fw_return_code, echo);
+ return -EINVAL;
+}
+
+static int qed_common_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ case COMMON_EVENT_VF_FLR:
+ return qed_sriov_eqe_event(p_hwfn, opcode, echo, data,
+ fw_return_code);
+ case COMMON_EVENT_FW_ERROR:
+ return qed_fw_err_handler(p_hwfn, opcode,
+ le16_to_cpu(echo), data,
+ fw_return_code);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n",
+ opcode, echo);
+ return -EINVAL;
+ }
+}
+
void qed_resc_setup(struct qed_dev *cdev)
{
int i;
@@ -2403,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_l2_setup(p_hwfn);
qed_iov_setup(p_hwfn);
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ qed_common_eqe_event);
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn);
@@ -2430,9 +2477,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
-
+ addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id);
if (is_vf)
id += 0x10;
@@ -2592,7 +2638,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
cache_line_size);
}
- if (L1_CACHE_BYTES > wr_mbs)
+ if (wr_mbs < L1_CACHE_BYTES)
DP_INFO(p_hwfn,
"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
L1_CACHE_BYTES, wr_mbs);
@@ -2608,13 +2654,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- struct qed_qm_common_rt_init_params params;
+ struct qed_qm_common_rt_init_params *params;
struct qed_dev *cdev = p_hwfn->cdev;
u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to allocate common init params\n");
+
+ return -ENOMEM;
+ }
+
qed_init_cau_rt_data(cdev);
/* Program GTT windows */
@@ -2627,16 +2681,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qm_info->pf_wfq_en = true;
}
- memset(&params, 0, sizeof(params));
- params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
- params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
- params.pf_rl_en = qm_info->pf_rl_en;
- params.pf_wfq_en = qm_info->pf_wfq_en;
- params.global_rl_en = qm_info->vport_rl_en;
- params.vport_wfq_en = qm_info->vport_wfq_en;
- params.port_params = qm_info->qm_port_params;
+ params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
+ params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params->pf_rl_en = qm_info->pf_rl_en;
+ params->pf_wfq_en = qm_info->pf_wfq_en;
+ params->global_rl_en = qm_info->vport_rl_en;
+ params->vport_wfq_en = qm_info->vport_wfq_en;
+ params->port_params = qm_info->qm_port_params;
- qed_qm_common_rt_init(p_hwfn, &params);
+ qed_qm_common_rt_init(p_hwfn, params);
qed_cxt_hw_init_common(p_hwfn);
@@ -2644,7 +2697,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc)
- return rc;
+ goto out;
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
@@ -2663,7 +2716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
- qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -2672,6 +2725,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+out:
+ kfree(params);
+
return rc;
}
@@ -2784,7 +2840,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_bar(p_hwfn, p_ptt);
}
- p_hwfn->wid_count = (u16) n_cpus;
+ p_hwfn->wid_count = (u16)n_cpus;
DP_INFO(p_hwfn,
"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
@@ -3503,8 +3559,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void get_function_id(struct qed_hwfn *p_hwfn)
{
/* ME Register */
- p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
- PXP_PF_ME_OPAQUE_ADDR);
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@@ -3670,12 +3726,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
return qed_hsi_def_val[type][chip_id];
}
+
static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resc_max_val, mcp_resp;
u8 res_id;
int rc;
+
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) {
case QED_LL2_RAM_QUEUE:
@@ -3921,7 +3979,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
* resources allocation queries should be atomic. Since several PFs can
* run in parallel - a resource lock is needed.
* If either the resource lock or resource set value commands are not
- * supported - skip the the max values setting, release the lock if
+ * supported - skip the max values setting, release the lock if
* needed, and proceed to the queries. Other failures, including a
* failure to acquire the lock, will cause this function to fail.
*/
@@ -4775,7 +4833,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
u16 min, max;
- min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
+ min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
DP_NOTICE(p_hwfn,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",
@@ -4909,7 +4967,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
goto out;
address = BAR0_MAP_REG_USDM_RAM +
- USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset);
@@ -4948,7 +5006,7 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
goto out;
address = BAR0_MAP_REG_XSDM_RAM +
- XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d3c1f3879be8..f8682356d0cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -15,44 +15,52 @@
#include "qed_int.h"
/**
- * @brief qed_init_dp - initialize the debug level
+ * qed_init_dp(): Initialize the debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * @cdev: Qed dev pointer.
+ * @dp_module: Module debug parameter.
+ * @dp_level: Module debug level.
+ *
+ * Return: Void.
*/
void qed_init_dp(struct qed_dev *cdev,
u32 dp_module,
u8 dp_level);
/**
- * @brief qed_init_struct - initialize the device structure to
- * its defaults
+ * qed_init_struct(): Initialize the device structure to
+ * its defaults.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_init_struct(struct qed_dev *cdev);
/**
- * @brief qed_resc_free -
+ * qed_resc_free: Free device resources.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_resc_free(struct qed_dev *cdev);
/**
- * @brief qed_resc_alloc -
+ * qed_resc_alloc(): Alloc device resources.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_resc_alloc(struct qed_dev *cdev);
/**
- * @brief qed_resc_setup -
+ * qed_resc_setup(): Setup device resources.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
*/
void qed_resc_setup(struct qed_dev *cdev);
@@ -105,94 +113,96 @@ struct qed_hw_init_params {
};
/**
- * @brief qed_hw_init -
+ * qed_hw_init(): Init Qed hardware.
*
- * @param cdev
- * @param p_params
+ * @cdev: Qed dev pointer.
+ * @p_params: Pointers to params.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
/**
- * @brief qed_hw_timers_stop_all - stop the timers HW block
+ * qed_hw_timers_stop_all(): Stop the timers HW block.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return void
+ * Return: void.
*/
void qed_hw_timers_stop_all(struct qed_dev *cdev);
/**
- * @brief qed_hw_stop -
+ * qed_hw_stop(): Stop Qed hardware.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: int.
*/
int qed_hw_stop(struct qed_dev *cdev);
/**
- * @brief qed_hw_stop_fastpath -should be called incase
- * slowpath is still required for the device,
- * but fastpath is not.
+ * qed_hw_stop_fastpath(): Should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_stop_fastpath(struct qed_dev *cdev);
/**
- * @brief qed_hw_start_fastpath -restart fastpath traffic,
- * only if hw_stop_fastpath was called
+ * qed_hw_start_fastpath(): Restart fastpath traffic,
+ * only if hw_stop_fastpath was called.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-
/**
- * @brief qed_hw_prepare -
+ * qed_hw_prepare(): Prepare Qed hardware.
*
- * @param cdev
- * @param personality - personality to initialize
+ * @cdev: Qed dev pointer.
+ * @personality: Personality to initialize.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_prepare(struct qed_dev *cdev,
int personality);
/**
- * @brief qed_hw_remove -
+ * qed_hw_remove(): Remove Qed hardware.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_hw_remove(struct qed_dev *cdev);
/**
- * @brief qed_ptt_acquire - Allocate a PTT window
+ * qed_ptt_acquire(): Allocate a PTT window.
*
- * Should be called at the entry point to the driver (at the beginning of an
- * exported function)
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: struct qed_ptt.
*
- * @return struct qed_ptt
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function).
*/
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_release - Release PTT Window
+ * qed_ptt_release(): Release PTT Window.
*
- * Should be called at the end of a flow - at the end of the function that
- * acquired the PTT.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
+ * Return: Void.
*
- * @param p_hwfn
- * @param p_ptt
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
*/
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -205,15 +215,17 @@ enum qed_dmae_address_type_t {
};
/**
- * @brief qed_dmae_host2grc - copy data from source addr to
- * dmae registers using the given ptt
+ * qed_dmae_host2grc(): Copy data from source addr to
+ * dmae registers using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
*
- * @param p_hwfn
- * @param p_ptt
- * @param source_addr
- * @param grc_addr (dmae_data_offset)
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * Return: Int.
*/
int
qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
@@ -224,29 +236,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_dmae_params *p_params);
/**
- * @brief qed_dmae_grc2host - Read data from dmae data offset
- * to source address using the given ptt
+ * qed_dmae_grc2host(): Read data from dmae data offset
+ * to source address using the given ptt.
+ *
+ * @p_ptt: P_ptt.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @dest_addr: Destination Address.
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
*
- * @param p_ptt
- * @param grc_addr (dmae_data_offset)
- * @param dest_addr
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * Return: Int.
*/
int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
struct qed_dmae_params *p_params);
/**
- * @brief qed_dmae_host2host - copy data from to source address
- * to a destination adress (for SRIOV) using the given ptt
+ * qed_dmae_host2host(): Copy data from to source address
+ * to a destination adrress (for SRIOV) using the given
+ * ptt.
*
- * @param p_hwfn
- * @param p_ptt
- * @param source_addr
- * @param dest_addr
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @dest_addr: Destination address.
+ * @size_in_dwords: size.
+ * @p_params: (default parameters will be used in case of NULL).
+ *
+ * Return: Int.
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -259,51 +276,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain);
/**
- * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ * qed_fw_l2_queue(): Get absolute L2 queue ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id,
u16 *dst_id);
/**
- * @@brief qed_fw_vport - Get absolute vport ID
+ * qed_fw_vport(): Get absolute vport ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_vport(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
- * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ * qed_fw_rss_eng(): Get absolute RSS engine ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
- * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter
- * banks that are allocated to the PF.
+ * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter
+ * banks that are allocated to the PF.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return u8 - Number of LLH filter banks
+ * Return: u8 Number of LLH filter banks.
*/
u8 qed_llh_get_num_ppfid(struct qed_dev *cdev);
@@ -314,45 +331,50 @@ enum qed_eng {
};
/**
- * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given
- * LLH filter bank.
+ * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given
+ * LLH filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param eng
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @eng: Engine.
*
- * @return int
+ * Return: Int.
*/
int qed_llh_set_ppfid_affinity(struct qed_dev *cdev,
u8 ppfid, enum qed_eng eng);
/**
- * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity
+ * qed_llh_set_roce_affinity(): Set the RoCE engine affinity.
*
- * @param cdev
- * @param eng
+ * @cdev: Qed dev pointer.
+ * @eng: Engine.
*
- * @return int
+ * Return: Int.
*/
int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng);
/**
- * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter
- * bank.
+ * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter
+ * bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @mac_addr: MAC to add.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param mac_addr - MAC to add
+ * Return: Int.
*/
int qed_llh_add_mac_filter(struct qed_dev *cdev,
- u8 ppfid, u8 mac_addr[ETH_ALEN]);
+ u8 ppfid, const u8 mac_addr[ETH_ALEN]);
/**
- * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given
- * filter bank.
+ * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given
+ * filter bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Ppfid.
+ * @mac_addr: MAC to remove
*
- * @param p_ptt
- * @param p_filter - MAC to remove
+ * Return: Void.
*/
void qed_llh_remove_mac_filter(struct qed_dev *cdev,
u8 ppfid, u8 mac_addr[ETH_ALEN]);
@@ -368,15 +390,16 @@ enum qed_llh_prot_filter_type_t {
};
/**
- * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the
- * given filter bank.
+ * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the
+ * given filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param type - type of filters and comparing
- * @param source_port_or_eth_type - source port or ethertype to add
- * @param dest_port - destination port to add
- * @param type - type of filters and comparing
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
+ *
+ * Return: Int.
*/
int
qed_llh_add_protocol_filter(struct qed_dev *cdev,
@@ -385,14 +408,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev,
u16 source_port_or_eth_type, u16 dest_port);
/**
- * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from
- * the given filter bank.
+ * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from
+ * the given filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param type - type of filters and comparing
- * @param source_port_or_eth_type - source port or ethertype to add
- * @param dest_port - destination port to add
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
*/
void
qed_llh_remove_protocol_filter(struct qed_dev *cdev,
@@ -401,31 +424,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev,
u16 source_port_or_eth_type, u16 dest_port);
/**
- * *@brief Cleanup of previous driver remains prior to load
+ * qed_final_cleanup(): Cleanup of previous driver remains prior to load.
*
- * @param p_hwfn
- * @param p_ptt
- * @param id - For PF, engine-relative. For VF, PF-relative.
- * @param is_vf - true iff cleanup is made for a VF.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @id: For PF, engine-relative. For VF, PF-relative.
+ * @is_vf: True iff cleanup is made for a VF.
*
- * @return int
+ * Return: Int.
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf);
/**
- * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue.
+ * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue.
*
- * @param p_hwfn
- * @param p_coal - store coalesce value read from the hardware.
- * @param p_handle
+ * @p_hwfn: HW device data.
+ * @coal: Store coalesce value read from the hardware.
+ * @handle: P_handle.
*
- * @return int
+ * Return: Int.
**/
int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
/**
- * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on
* varying accuracy [the bigger the value the less accurate] up to a mistake
* of 3usec for the highest values.
@@ -433,37 +456,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
+ * @rx_coal: Rx Coalesce value in micro seconds.
+ * @tx_coal: TX Coalesce value in micro seconds.
+ * @p_handle: P_handle.
*
- * @param rx_coal - Rx Coalesce value in micro seconds.
- * @param tx_coal - TX Coalesce value in micro seconds.
- * @param p_handle
- *
- * @return int
+ * Return: Int.
**/
int
qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
/**
- * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER.
*
- * @param p_hwfn
- * @param p_ptt
- * @param b_enable - true/false
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_enable: True/False.
*
- * @return int
+ * Return: Int.
*/
int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool b_enable);
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * qed_db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address of where db_data is stored.
+ * @db_width: Doorbell is 32b pr 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_width - doorbell is 32b pr 64b
- * @param db_space - doorbell recovery addresses are user or kernel space
+ * Return: Int.
*/
int qed_db_recovery_add(struct qed_dev *cdev,
void __iomem *db_addr,
@@ -472,17 +496,18 @@ int qed_db_recovery_add(struct qed_dev *cdev,
enum qed_db_rec_space db_space);
/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
+ * qed_db_recovery_del() - remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
+ * @cdev: Qed dev pointer.
+ * @db_addr: doorbell address.
+ * @db_data: address where db_data is stored. Serves as key for the
* entry to delete.
+ *
+ * Return: Int.
*/
int qed_db_recovery_del(struct qed_dev *cdev,
void __iomem *db_addr, void *db_data);
-
const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index c51f9590fe19..6bb4e165b592 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -215,7 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
qdevlink = devlink_priv(dl);
qdevlink->cdev = cdev;
- devlink_register(dl);
rc = devlink_params_register(dl, qed_devlink_params,
ARRAY_SIZE(qed_devlink_params));
if (rc)
@@ -226,15 +225,13 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
QED_DEVLINK_PARAM_ID_IWARP_CMT,
value);
- devlink_params_publish(dl);
cdev->iwarp_cmt = false;
qed_fw_reporters_create(dl);
-
+ devlink_register(dl);
return dl;
err_unregister:
- devlink_unregister(dl);
devlink_free(dl);
return ERR_PTR(rc);
@@ -245,11 +242,11 @@ void qed_devlink_unregister(struct devlink *devlink)
if (!devlink)
return;
+ devlink_unregister(devlink);
qed_fw_reporters_destroy(devlink);
devlink_params_unregister(devlink, qed_devlink_params,
ARRAY_SIZE(qed_devlink_params));
- devlink_unregister(devlink);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index b768f0698170..3764190b948e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -30,6 +30,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_iro_hsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -89,7 +90,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
struct fcoe_init_ramrod_params *p_ramrod = NULL;
struct fcoe_init_func_ramrod_data *p_data;
- struct e4_fcoe_conn_context *p_cxt = NULL;
+ struct fcoe_conn_context *p_cxt = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_cxt_info cxt_info;
@@ -144,7 +145,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
memset(p_cxt, 0, sizeof(*p_cxt));
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
- E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+ TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
fcoe_pf_params->dummy_icid = (u16)dummy_cid;
@@ -506,10 +507,9 @@ static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -521,10 +521,9 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -549,7 +548,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
- struct e4_fcoe_task_context *p_task_ctx = NULL;
+ struct fcoe_task_context *p_task_ctx = NULL;
u32 i, lc;
int rc;
@@ -561,7 +560,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
if (rc)
continue;
- memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
lc = 0;
SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1);
@@ -572,7 +571,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc);
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
- E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+ TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index fb1baa2da2d0..f2cedbd9489c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _QED_HSI_H
@@ -38,7 +38,7 @@ enum common_event_opcode {
COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_VF_FLR,
COMMON_EVENT_PF_UPDATE,
- COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_FW_ERROR,
COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
@@ -84,6 +84,13 @@ enum core_l4_pseudo_checksum_mode {
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
+/* LL2 SP error code */
+enum core_ll2_error_code {
+ LL2_OK = 0,
+ LL2_ERROR,
+ MAX_CORE_LL2_ERROR_CODE
+};
+
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
@@ -123,6 +130,15 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+struct core_ll2_rx_per_queue_stat {
+ struct core_ll2_tstorm_per_queue_stat tstorm_stat;
+ struct core_ll2_ustorm_per_queue_stat ustorm_stat;
+};
+
+struct core_ll2_tx_per_queue_stat {
+ struct core_ll2_pstorm_per_queue_stat pstorm_stat;
+};
+
/* Structure for doorbell data, in PWM mode, for RX producers update. */
struct core_pwm_prod_update_data {
__le16 icid; /* internal CID */
@@ -135,6 +151,15 @@ struct core_pwm_prod_update_data {
struct core_ll2_rx_prod prod; /* Producers */
};
+/* Ramrod data for rx/tx queue statistics query ramrod */
+struct core_queue_stats_query_ramrod_data {
+ u8 rx_stat;
+ u8 tx_stat;
+ __le16 reserved[3];
+ struct regpair rx_stat_addr;
+ struct regpair tx_stat_addr;
+};
+
/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
@@ -210,7 +235,8 @@ struct core_rx_fast_path_cqe {
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
struct parsing_err_flags err_flags;
- __le16 reserved0;
+ u8 packet_source;
+ u8 reserved0;
__le32 reserved1[3];
};
@@ -226,7 +252,8 @@ struct core_rx_gsi_offload_cqe {
__le16 qp_id;
__le32 src_qp;
struct core_rx_cqe_opaque_data opaque_data;
- __le32 reserved;
+ u8 packet_source;
+ u8 reserved[3];
};
/* Core RX CQE for Light L2 */
@@ -245,6 +272,15 @@ union core_rx_cqe_union {
struct core_rx_slow_path_cqe rx_cqe_sp;
};
+/* RX packet source. */
+enum core_rx_pkt_source {
+ CORE_RX_PKT_SOURCE_NETWORK = 0,
+ CORE_RX_PKT_SOURCE_LB,
+ CORE_RX_PKT_SOURCE_TX,
+ CORE_RX_PKT_SOURCE_LL2_TX,
+ MAX_CORE_RX_PKT_SOURCE
+};
+
/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {
struct regpair bd_base;
@@ -362,7 +398,7 @@ struct core_tx_update_ramrod_data {
u8 update_qm_pq_id_flg;
u8 reserved0;
__le16 qm_pq_id;
- __le32 reserved1;
+ __le32 reserved1[1];
};
/* Enum flag for what type of dcb data to update */
@@ -386,224 +422,222 @@ struct pstorm_core_conn_st_ctx {
/* Core Slowpath Connection storm context of Xstorm */
struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo;
- __le32 spq_base_hi;
- struct regpair consolid_base_addr;
+ struct regpair spq_base_addr;
+ __le32 reserved0[2];
__le16 spq_cons;
- __le16 consolid_cons;
- __le32 reserved0[55];
+ __le16 reserved1[111];
};
-struct e4_xstorm_core_conn_ag_ctx {
+struct xstorm_core_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 consolid_prod;
@@ -657,89 +691,89 @@ struct e4_xstorm_core_conn_ag_ctx {
__le16 word15;
};
-struct e4_tstorm_core_conn_ag_ctx {
+struct tstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -761,63 +795,63 @@ struct e4_tstorm_core_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_core_conn_ag_ctx {
+struct ustorm_core_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -846,15 +880,15 @@ struct tstorm_core_conn_st_ctx {
};
/* core connection context */
-struct e4_core_conn_context {
+struct core_conn_context {
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_core_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
@@ -930,12 +964,12 @@ struct eth_rx_rate_limit {
/* Update RSS indirection table entry command */
struct eth_tstorm_rss_update_data {
- u8 valid;
u8 vport_id;
u8 ind_table_index;
- u8 reserved;
__le16 ind_table_value;
__le16 reserved1;
+ u8 reserved;
+ u8 valid;
};
struct eth_ustorm_per_pf_stat {
@@ -967,19 +1001,20 @@ struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
-/* Event Ring malicious VF data */
-struct malicious_vf_eqe_data {
- u8 vf_id;
- u8 err_id;
- __le16 reserved[3];
-};
-
/* Event Ring initial cleanup data */
struct initial_cleanup_eqe_data {
u8 vf_id;
u8 reserved[7];
};
+/* FW error data */
+struct fw_err_data {
+ u8 recovery_scope;
+ u8 err_id;
+ __le16 entity_id;
+ u8 reserved[4];
+};
+
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
@@ -987,8 +1022,8 @@ union event_ring_data {
struct iscsi_eqe_data iscsi_info;
struct iscsi_connect_done_results iscsi_conn_done_info;
union rdma_eqe_data rdma_data;
- struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
+ struct fw_err_data err_data;
};
/* Event Ring Entry */
@@ -1042,6 +1077,15 @@ struct hsi_fp_ver_struct {
u8 major_ver_arr[2];
};
+/* Integration Phase */
+enum integ_phase {
+ INTEG_PHASE_BB_A0_LATEST = 3,
+ INTEG_PHASE_BB_B0_NO_MCP = 10,
+ INTEG_PHASE_BB_B0_WITH_MCP = 11,
+ MAX_INTEG_PHASE
+};
+
+/* Ports mode */
enum iwarp_ll2_tx_queues {
IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
IWARP_LL2_ALIGNED_TX_QUEUE,
@@ -1050,9 +1094,9 @@ enum iwarp_ll2_tx_queues {
MAX_IWARP_LL2_TX_QUEUES
};
-/* Malicious VF error ID */
-enum malicious_vf_error_id {
- MALICIOUS_VF_NO_ERROR,
+/* Function error ID */
+enum func_err_id {
+ FUNC_NO_ERROR,
VF_PF_CHANNEL_NOT_READY,
VF_ZONE_MSG_NOT_VALID,
VF_ZONE_FUNC_NOT_ENABLED,
@@ -1087,13 +1131,33 @@ enum malicious_vf_error_id {
CORE_PACKET_SIZE_TOO_LARGE,
CORE_ILLEGAL_BD_FLAGS,
CORE_GSI_PACKET_VIOLATION,
- MAX_MALICIOUS_VF_ERROR_ID,
+ MAX_FUNC_ERR_ID
+};
+
+/* FW error handling mode */
+enum fw_err_mode {
+ FW_ERR_FATAL_ASSERT,
+ FW_ERR_DRV_REPORT,
+ MAX_FW_ERR_MODE
+};
+
+/* FW error recovery scope */
+enum fw_err_recovery_scope {
+ ERR_SCOPE_INVALID,
+ ERR_SCOPE_TX_Q,
+ ERR_SCOPE_RX_Q,
+ ERR_SCOPE_QP,
+ ERR_SCOPE_VPORT,
+ ERR_SCOPE_FUNC,
+ ERR_SCOPE_PORT,
+ ERR_SCOPE_ENGINE,
+ MAX_FW_ERR_RECOVERY_SCOPE
};
/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
- struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_RXQ_VF_QUAD];
};
/* Mstorm VF zone */
@@ -1148,7 +1212,7 @@ struct pf_start_tunnel_config {
/* Ramrod data for PF start ramrod */
struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
- struct regpair consolid_q_pbl_addr;
+ struct regpair consolid_q_pbl_base_addr;
struct pf_start_tunnel_config tunnel_config;
__le16 event_ring_sb_id;
u8 base_vf_id;
@@ -1166,6 +1230,9 @@ struct pf_start_ramrod_data {
u8 reserved0;
struct hsi_fp_ver_struct hsi_fp_ver;
struct outer_tag_config_struct outer_tag_config;
+ u8 pf_fp_err_mode;
+ u8 consolid_q_num_pages;
+ u8 reserved[6];
};
/* Data for port update ramrod */
@@ -1230,6 +1297,13 @@ enum ports_mode {
MAX_PORTS_MODE
};
+/* Protocol-common error code */
+enum protocol_common_error_code {
+ COMMON_ERR_CODE_OK = 0,
+ COMMON_ERR_CODE_ERROR,
+ MAX_PROTOCOL_COMMON_ERROR_CODE
+};
+
/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
enum protocol_version_array_key {
ETH_VER_KEY = 0,
@@ -1525,74 +1599,74 @@ enum dmae_cmd_src_enum {
MAX_DMAE_CMD_SRC_ENUM
};
-struct e4_mstorm_core_conn_ag_ctx {
+struct mstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_ystorm_core_conn_ag_ctx {
+struct ystorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -1704,6 +1778,7 @@ struct igu_msix_vector {
#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
+
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
@@ -1778,22 +1853,22 @@ struct qm_rf_opportunistic_mask {
};
/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map_e4 {
+struct qm_rf_pq_map {
__le32 reg;
-#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF
-#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F
-#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3
-#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
};
/* Completion params for aggregated interrupt completion */
@@ -1831,769 +1906,6 @@ struct virt_mem_desc {
u32 size; /* In bytes */
};
-/****************************************/
-/* Debug Tools HSI constants and macros */
-/****************************************/
-
-enum block_id {
- BLOCK_GRC,
- BLOCK_MISCS,
- BLOCK_MISC,
- BLOCK_DBU,
- BLOCK_PGLUE_B,
- BLOCK_CNIG,
- BLOCK_CPMU,
- BLOCK_NCSI,
- BLOCK_OPTE,
- BLOCK_BMB,
- BLOCK_PCIE,
- BLOCK_MCP,
- BLOCK_MCP2,
- BLOCK_PSWHST,
- BLOCK_PSWHST2,
- BLOCK_PSWRD,
- BLOCK_PSWRD2,
- BLOCK_PSWWR,
- BLOCK_PSWWR2,
- BLOCK_PSWRQ,
- BLOCK_PSWRQ2,
- BLOCK_PGLCS,
- BLOCK_DMAE,
- BLOCK_PTU,
- BLOCK_TCM,
- BLOCK_MCM,
- BLOCK_UCM,
- BLOCK_XCM,
- BLOCK_YCM,
- BLOCK_PCM,
- BLOCK_QM,
- BLOCK_TM,
- BLOCK_DORQ,
- BLOCK_BRB,
- BLOCK_SRC,
- BLOCK_PRS,
- BLOCK_TSDM,
- BLOCK_MSDM,
- BLOCK_USDM,
- BLOCK_XSDM,
- BLOCK_YSDM,
- BLOCK_PSDM,
- BLOCK_TSEM,
- BLOCK_MSEM,
- BLOCK_USEM,
- BLOCK_XSEM,
- BLOCK_YSEM,
- BLOCK_PSEM,
- BLOCK_RSS,
- BLOCK_TMLD,
- BLOCK_MULD,
- BLOCK_YULD,
- BLOCK_XYLD,
- BLOCK_PRM,
- BLOCK_PBF_PB1,
- BLOCK_PBF_PB2,
- BLOCK_RPB,
- BLOCK_BTB,
- BLOCK_PBF,
- BLOCK_RDIF,
- BLOCK_TDIF,
- BLOCK_CDU,
- BLOCK_CCFC,
- BLOCK_TCFC,
- BLOCK_IGU,
- BLOCK_CAU,
- BLOCK_UMAC,
- BLOCK_XMAC,
- BLOCK_MSTAT,
- BLOCK_DBG,
- BLOCK_NIG,
- BLOCK_WOL,
- BLOCK_BMBN,
- BLOCK_IPC,
- BLOCK_NWM,
- BLOCK_NWS,
- BLOCK_MS,
- BLOCK_PHY_PCIE,
- BLOCK_LED,
- BLOCK_AVS_WRAP,
- BLOCK_PXPREQBUS,
- BLOCK_BAR0_MAP,
- BLOCK_MCP_FIO,
- BLOCK_LAST_INIT,
- BLOCK_PRS_FC,
- BLOCK_PBF_FC,
- BLOCK_NIG_LB_FC,
- BLOCK_NIG_LB_FC_PLLH,
- BLOCK_NIG_TX_FC_PLLH,
- BLOCK_NIG_TX_FC,
- BLOCK_NIG_RX_FC_PLLH,
- BLOCK_NIG_RX_FC,
- MAX_BLOCK_ID
-};
-
-/* binary debug buffer types */
-enum bin_dbg_buffer_type {
- BIN_BUF_DBG_MODE_TREE,
- BIN_BUF_DBG_DUMP_REG,
- BIN_BUF_DBG_DUMP_MEM,
- BIN_BUF_DBG_IDLE_CHK_REGS,
- BIN_BUF_DBG_IDLE_CHK_IMMS,
- BIN_BUF_DBG_IDLE_CHK_RULES,
- BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
- BIN_BUF_DBG_ATTN_BLOCKS,
- BIN_BUF_DBG_ATTN_REGS,
- BIN_BUF_DBG_ATTN_INDEXES,
- BIN_BUF_DBG_ATTN_NAME_OFFSETS,
- BIN_BUF_DBG_BLOCKS,
- BIN_BUF_DBG_BLOCKS_CHIP_DATA,
- BIN_BUF_DBG_BUS_LINES,
- BIN_BUF_DBG_BLOCKS_USER_DATA,
- BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
- BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
- BIN_BUF_DBG_RESET_REGS,
- BIN_BUF_DBG_PARSING_STRINGS,
- MAX_BIN_DBG_BUFFER_TYPE
-};
-
-
-/* Attention bit mapping */
-struct dbg_attn_bit_mapping {
- u16 data;
-#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
-#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
-#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
-#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
-};
-
-/* Attention block per-type data */
-struct dbg_attn_block_type_data {
- u16 names_offset;
- u16 reserved1;
- u8 num_regs;
- u8 reserved2;
- u16 regs_offset;
-
-};
-
-/* Block attentions */
-struct dbg_attn_block {
- struct dbg_attn_block_type_data per_type_data[2];
-};
-
-/* Attention register result */
-struct dbg_attn_reg_result {
- u32 data;
-#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
-#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
- u16 block_attn_offset;
- u16 reserved;
- u32 sts_val;
- u32 mask_val;
-};
-
-/* Attention block result */
-struct dbg_attn_block_result {
- u8 block_id;
- u8 data;
-#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
-#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
-#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
-#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
- u16 names_offset;
- struct dbg_attn_reg_result reg_results[15];
-};
-
-/* Mode header */
-struct dbg_mode_hdr {
- u16 data;
-#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
-#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
-#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
-#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
-};
-
-/* Attention register */
-struct dbg_attn_reg {
- struct dbg_mode_hdr mode;
- u16 block_attn_offset;
- u32 data;
-#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
-#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
-#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
- u32 sts_clr_address;
- u32 mask_address;
-};
-
-/* Attention types */
-enum dbg_attn_type {
- ATTN_TYPE_INTERRUPT,
- ATTN_TYPE_PARITY,
- MAX_DBG_ATTN_TYPE
-};
-
-/* Block debug data */
-struct dbg_block {
- u8 name[15];
- u8 associated_storm_letter;
-};
-
-/* Chip-specific block debug data */
-struct dbg_block_chip {
- u8 flags;
-#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
-#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
-#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
-#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
-#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
-#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
-#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
-#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
-#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
- u8 dbg_client_id;
- u8 reset_reg_id;
- u8 reset_reg_bit_offset;
- struct dbg_mode_hdr dbg_bus_mode;
- u16 reserved1;
- u8 reserved2;
- u8 num_of_dbg_bus_lines;
- u16 dbg_bus_lines_offset;
- u32 dbg_select_reg_addr;
- u32 dbg_dword_enable_reg_addr;
- u32 dbg_shift_reg_addr;
- u32 dbg_force_valid_reg_addr;
- u32 dbg_force_frame_reg_addr;
-};
-
-/* Chip-specific block user debug data */
-struct dbg_block_chip_user {
- u8 num_of_dbg_bus_lines;
- u8 has_latency_events;
- u16 names_offset;
-};
-
-/* Block user debug data */
-struct dbg_block_user {
- u8 name[16];
-};
-
-/* Block Debug line data */
-struct dbg_bus_line {
- u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK 0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT 4
-#define DBG_BUS_LINE_RESERVED_MASK 0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT 5
- u8 group_sizes;
-};
-
-/* Condition header for registers dump */
-struct dbg_dump_cond_hdr {
- struct dbg_mode_hdr mode; /* Mode header */
- u8 block_id; /* block ID */
- u8 data_size; /* size in dwords of the data following this header */
-};
-
-/* Memory data for registers dump */
-struct dbg_dump_mem {
- u32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
- u32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
-#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT 25
-};
-
-/* Register data for registers dump */
-struct dbg_dump_reg {
- u32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
-};
-
-/* Split header for registers dump */
-struct dbg_dump_split_hdr {
- u32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
-};
-
-/* Condition header for idle check */
-struct dbg_idle_chk_cond_hdr {
- struct dbg_mode_hdr mode; /* Mode header */
- u16 data_size; /* size in dwords of the data following this header */
-};
-
-/* Idle Check condition register */
-struct dbg_idle_chk_cond_reg {
- u32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- u16 num_entries;
- u8 entry_size;
- u8 start_entry;
-};
-
-/* Idle Check info register */
-struct dbg_idle_chk_info_reg {
- u32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
- u16 size; /* register size in dwords */
- struct dbg_mode_hdr mode; /* Mode header */
-};
-
-/* Idle Check register */
-union dbg_idle_chk_reg {
- struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
- struct dbg_idle_chk_info_reg info_reg; /* info register */
-};
-
-/* Idle Check result header */
-struct dbg_idle_chk_result_hdr {
- u16 rule_id; /* Failing rule index */
- u16 mem_entry_id; /* Failing memory entry index */
- u8 num_dumped_cond_regs; /* number of dumped condition registers */
- u8 num_dumped_info_regs; /* number of dumped condition registers */
- u8 severity; /* from dbg_idle_chk_severity_types enum */
- u8 reserved;
-};
-
-/* Idle Check result register header */
-struct dbg_idle_chk_result_reg_hdr {
- u8 data;
-#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
-#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
-#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
-#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
- u8 start_entry; /* index of the first checked entry */
- u16 size; /* register size in dwords */
-};
-
-/* Idle Check rule */
-struct dbg_idle_chk_rule {
- u16 rule_id; /* Idle Check rule ID */
- u8 severity; /* value from dbg_idle_chk_severity_types enum */
- u8 cond_id; /* Condition ID */
- u8 num_cond_regs; /* number of condition registers */
- u8 num_info_regs; /* number of info registers */
- u8 num_imms; /* number of immediates in the condition */
- u8 reserved1;
- u16 reg_offset; /* offset of this rules registers in the idle check
- * register array (in dbg_idle_chk_reg units).
- */
- u16 imm_offset; /* offset of this rules immediate values in the
- * immediate values array (in dwords).
- */
-};
-
-/* Idle Check rule parsing data */
-struct dbg_idle_chk_rule_parsing_data {
- u32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
-};
-
-/* Idle check severity types */
-enum dbg_idle_chk_severity_types {
- /* idle check failure should cause an error */
- IDLE_CHK_SEVERITY_ERROR,
- /* idle check failure should cause an error only if theres no traffic */
- IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
- /* idle check failure should cause a warning */
- IDLE_CHK_SEVERITY_WARNING,
- MAX_DBG_IDLE_CHK_SEVERITY_TYPES
-};
-
-/* Reset register */
-struct dbg_reset_reg {
- u32 data;
-#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
-#define DBG_RESET_REG_ADDR_SHIFT 0
-#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
-#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
-#define DBG_RESET_REG_RESERVED_MASK 0x7F
-#define DBG_RESET_REG_RESERVED_SHIFT 25
-};
-
-/* Debug Bus block data */
-struct dbg_bus_block_data {
- u8 enable_mask;
- u8 right_shift;
- u8 force_valid_mask;
- u8 force_frame_mask;
- u8 dword_mask;
- u8 line_num;
- u8 hw_id;
- u8 flags;
-#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
-#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
-#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
-};
-
-enum dbg_bus_clients {
- DBG_BUS_CLIENT_RBCN,
- DBG_BUS_CLIENT_RBCP,
- DBG_BUS_CLIENT_RBCR,
- DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCF,
- DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCZ,
- DBG_BUS_CLIENT_OTHER_ENGINE,
- DBG_BUS_CLIENT_TIMESTAMP,
- DBG_BUS_CLIENT_CPU,
- DBG_BUS_CLIENT_RBCY,
- DBG_BUS_CLIENT_RBCQ,
- DBG_BUS_CLIENT_RBCM,
- DBG_BUS_CLIENT_RBCB,
- DBG_BUS_CLIENT_RBCW,
- DBG_BUS_CLIENT_RBCV,
- MAX_DBG_BUS_CLIENTS
-};
-
-/* Debug Bus constraint operation types */
-enum dbg_bus_constraint_ops {
- DBG_BUS_CONSTRAINT_OP_EQ,
- DBG_BUS_CONSTRAINT_OP_NE,
- DBG_BUS_CONSTRAINT_OP_LT,
- DBG_BUS_CONSTRAINT_OP_LTC,
- DBG_BUS_CONSTRAINT_OP_LE,
- DBG_BUS_CONSTRAINT_OP_LEC,
- DBG_BUS_CONSTRAINT_OP_GT,
- DBG_BUS_CONSTRAINT_OP_GTC,
- DBG_BUS_CONSTRAINT_OP_GE,
- DBG_BUS_CONSTRAINT_OP_GEC,
- MAX_DBG_BUS_CONSTRAINT_OPS
-};
-
-/* Debug Bus trigger state data */
-struct dbg_bus_trigger_state_data {
- u8 msg_len;
- u8 constraint_dword_mask;
- u8 storm_id;
- u8 reserved;
-};
-
-/* Debug Bus memory address */
-struct dbg_bus_mem_addr {
- u32 lo;
- u32 hi;
-};
-
-/* Debug Bus PCI buffer data */
-struct dbg_bus_pci_buf_data {
- struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
- struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
- u32 size; /* PCI buffer size in bytes */
-};
-
-/* Debug Bus Storm EID range filter params */
-struct dbg_bus_storm_eid_range_params {
- u8 min; /* Minimal event ID to filter on */
- u8 max; /* Maximal event ID to filter on */
-};
-
-/* Debug Bus Storm EID mask filter params */
-struct dbg_bus_storm_eid_mask_params {
- u8 val; /* Event ID value */
- u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
-};
-
-/* Debug Bus Storm EID filter params */
-union dbg_bus_storm_eid_params {
- struct dbg_bus_storm_eid_range_params range;
- struct dbg_bus_storm_eid_mask_params mask;
-};
-
-/* Debug Bus Storm data */
-struct dbg_bus_storm_data {
- u8 enabled;
- u8 mode;
- u8 hw_id;
- u8 eid_filter_en;
- u8 eid_range_not_mask;
- u8 cid_filter_en;
- union dbg_bus_storm_eid_params eid_filter_params;
- u32 cid;
-};
-
-/* Debug Bus data */
-struct dbg_bus_data {
- u32 app_version;
- u8 state;
- u8 mode_256b_en;
- u8 num_enabled_blocks;
- u8 num_enabled_storms;
- u8 target;
- u8 one_shot_en;
- u8 grc_input_en;
- u8 timestamp_input_en;
- u8 filter_en;
- u8 adding_filter;
- u8 filter_pre_trigger;
- u8 filter_post_trigger;
- u8 trigger_en;
- u8 filter_constraint_dword_mask;
- u8 next_trigger_state;
- u8 next_constraint_id;
- struct dbg_bus_trigger_state_data trigger_states[3];
- u8 filter_msg_len;
- u8 rcv_from_other_engine;
- u8 blocks_dword_mask;
- u8 blocks_dword_overlap;
- u32 hw_id_mask;
- struct dbg_bus_pci_buf_data pci_buf;
- struct dbg_bus_block_data blocks[132];
- struct dbg_bus_storm_data storms[6];
-};
-
-/* Debug bus states */
-enum dbg_bus_states {
- DBG_BUS_STATE_IDLE,
- DBG_BUS_STATE_READY,
- DBG_BUS_STATE_RECORDING,
- DBG_BUS_STATE_STOPPED,
- MAX_DBG_BUS_STATES
-};
-
-/* Debug Bus Storm modes */
-enum dbg_bus_storm_modes {
- DBG_BUS_STORM_MODE_PRINTF,
- DBG_BUS_STORM_MODE_PRAM_ADDR,
- DBG_BUS_STORM_MODE_DRA_RW,
- DBG_BUS_STORM_MODE_DRA_W,
- DBG_BUS_STORM_MODE_LD_ST_ADDR,
- DBG_BUS_STORM_MODE_DRA_FSM,
- DBG_BUS_STORM_MODE_FAST_DBGMUX,
- DBG_BUS_STORM_MODE_RH,
- DBG_BUS_STORM_MODE_RH_WITH_STORE,
- DBG_BUS_STORM_MODE_FOC,
- DBG_BUS_STORM_MODE_EXT_STORE,
- MAX_DBG_BUS_STORM_MODES
-};
-
-/* Debug bus target IDs */
-enum dbg_bus_targets {
- DBG_BUS_TARGET_ID_INT_BUF,
- DBG_BUS_TARGET_ID_NIG,
- DBG_BUS_TARGET_ID_PCI,
- MAX_DBG_BUS_TARGETS
-};
-
-/* GRC Dump data */
-struct dbg_grc_data {
- u8 params_initialized;
- u8 reserved1;
- u16 reserved2;
- u32 param_val[48];
-};
-
-/* Debug GRC params */
-enum dbg_grc_params {
- DBG_GRC_PARAM_DUMP_TSTORM,
- DBG_GRC_PARAM_DUMP_MSTORM,
- DBG_GRC_PARAM_DUMP_USTORM,
- DBG_GRC_PARAM_DUMP_XSTORM,
- DBG_GRC_PARAM_DUMP_YSTORM,
- DBG_GRC_PARAM_DUMP_PSTORM,
- DBG_GRC_PARAM_DUMP_REGS,
- DBG_GRC_PARAM_DUMP_RAM,
- DBG_GRC_PARAM_DUMP_PBUF,
- DBG_GRC_PARAM_DUMP_IOR,
- DBG_GRC_PARAM_DUMP_VFC,
- DBG_GRC_PARAM_DUMP_CM_CTX,
- DBG_GRC_PARAM_DUMP_PXP,
- DBG_GRC_PARAM_DUMP_RSS,
- DBG_GRC_PARAM_DUMP_CAU,
- DBG_GRC_PARAM_DUMP_QM,
- DBG_GRC_PARAM_DUMP_MCP,
- DBG_GRC_PARAM_DUMP_DORQ,
- DBG_GRC_PARAM_DUMP_CFC,
- DBG_GRC_PARAM_DUMP_IGU,
- DBG_GRC_PARAM_DUMP_BRB,
- DBG_GRC_PARAM_DUMP_BTB,
- DBG_GRC_PARAM_DUMP_BMB,
- DBG_GRC_PARAM_RESERVD1,
- DBG_GRC_PARAM_DUMP_MULD,
- DBG_GRC_PARAM_DUMP_PRS,
- DBG_GRC_PARAM_DUMP_DMAE,
- DBG_GRC_PARAM_DUMP_TM,
- DBG_GRC_PARAM_DUMP_SDM,
- DBG_GRC_PARAM_DUMP_DIF,
- DBG_GRC_PARAM_DUMP_STATIC,
- DBG_GRC_PARAM_UNSTALL,
- DBG_GRC_PARAM_RESERVED2,
- DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
- DBG_GRC_PARAM_EXCLUDE_ALL,
- DBG_GRC_PARAM_CRASH,
- DBG_GRC_PARAM_PARITY_SAFE,
- DBG_GRC_PARAM_DUMP_CM,
- DBG_GRC_PARAM_DUMP_PHY,
- DBG_GRC_PARAM_NO_MCP,
- DBG_GRC_PARAM_NO_FW_VER,
- DBG_GRC_PARAM_RESERVED3,
- DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
- DBG_GRC_PARAM_DUMP_ILT_CDUC,
- DBG_GRC_PARAM_DUMP_ILT_CDUT,
- DBG_GRC_PARAM_DUMP_CAU_EXT,
- MAX_DBG_GRC_PARAMS
-};
-
-/* Debug status codes */
-enum dbg_status {
- DBG_STATUS_OK,
- DBG_STATUS_APP_VERSION_NOT_SET,
- DBG_STATUS_UNSUPPORTED_APP_VERSION,
- DBG_STATUS_DBG_BLOCK_NOT_RESET,
- DBG_STATUS_INVALID_ARGS,
- DBG_STATUS_OUTPUT_ALREADY_SET,
- DBG_STATUS_INVALID_PCI_BUF_SIZE,
- DBG_STATUS_PCI_BUF_ALLOC_FAILED,
- DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
- DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
- DBG_STATUS_NO_MATCHING_FRAMING_MODE,
- DBG_STATUS_VFC_READ_ERROR,
- DBG_STATUS_STORM_ALREADY_ENABLED,
- DBG_STATUS_STORM_NOT_ENABLED,
- DBG_STATUS_BLOCK_ALREADY_ENABLED,
- DBG_STATUS_BLOCK_NOT_ENABLED,
- DBG_STATUS_NO_INPUT_ENABLED,
- DBG_STATUS_NO_FILTER_TRIGGER_256B,
- DBG_STATUS_FILTER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_NOT_ENABLED,
- DBG_STATUS_CANT_ADD_CONSTRAINT,
- DBG_STATUS_TOO_MANY_TRIGGER_STATES,
- DBG_STATUS_TOO_MANY_CONSTRAINTS,
- DBG_STATUS_RECORDING_NOT_STARTED,
- DBG_STATUS_DATA_DIDNT_TRIGGER,
- DBG_STATUS_NO_DATA_RECORDED,
- DBG_STATUS_DUMP_BUF_TOO_SMALL,
- DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
- DBG_STATUS_UNKNOWN_CHIP,
- DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
- DBG_STATUS_BLOCK_IN_RESET,
- DBG_STATUS_INVALID_TRACE_SIGNATURE,
- DBG_STATUS_INVALID_NVRAM_BUNDLE,
- DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
- DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
- DBG_STATUS_NVRAM_READ_FAILED,
- DBG_STATUS_IDLE_CHK_PARSE_FAILED,
- DBG_STATUS_MCP_TRACE_BAD_DATA,
- DBG_STATUS_MCP_TRACE_NO_META,
- DBG_STATUS_MCP_COULD_NOT_HALT,
- DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_RESERVED0,
- DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
- DBG_STATUS_IGU_FIFO_BAD_DATA,
- DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
- DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
- DBG_STATUS_REG_FIFO_BAD_DATA,
- DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
- DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_RESERVED1,
- DBG_STATUS_NON_MATCHING_LINES,
- DBG_STATUS_INSUFFICIENT_HW_IDS,
- DBG_STATUS_DBG_BUS_IN_USE,
- DBG_STATUS_INVALID_STORM_DBG_MODE,
- DBG_STATUS_OTHER_ENGINE_BB_ONLY,
- DBG_STATUS_FILTER_SINGLE_HW_ID,
- DBG_STATUS_TRIGGER_SINGLE_HW_ID,
- DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
- MAX_DBG_STATUS
-};
-
-/* Debug Storms IDs */
-enum dbg_storms {
- DBG_TSTORM_ID,
- DBG_MSTORM_ID,
- DBG_USTORM_ID,
- DBG_XSTORM_ID,
- DBG_YSTORM_ID,
- DBG_PSTORM_ID,
- MAX_DBG_STORMS
-};
-
-/* Idle Check data */
-struct idle_chk_data {
- u32 buf_size;
- u8 buf_size_set;
- u8 reserved1;
- u16 reserved2;
-};
-
-struct pretend_params {
- u8 split_type;
- u8 reserved;
- u16 split_id;
-};
-
-/* Debug Tools data (per HW function)
- */
-struct dbg_tools_data {
- struct dbg_grc_data grc;
- struct dbg_bus_data bus;
- struct idle_chk_data idle_chk;
- u8 mode_enable[40];
- u8 block_in_reset[132];
- u8 chip_id;
- u8 hw_type;
- u8 num_ports;
- u8 num_pfs_per_port;
- u8 num_vfs;
- u8 initialized;
- u8 use_dmae;
- u8 reserved;
- struct pretend_params pretend;
- u32 num_regs_read;
-};
-
-/* ILT Clients */
-enum ilt_clients {
- ILT_CLI_CDUC,
- ILT_CLI_CDUT,
- ILT_CLI_QM,
- ILT_CLI_TM,
- ILT_CLI_SRC,
- ILT_CLI_TSDM,
- ILT_CLI_RGFS,
- ILT_CLI_TGFS,
- MAX_ILT_CLIENTS
-};
-
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -2644,6 +1956,9 @@ struct init_nig_pri_tc_map_req {
/* QM per global RL init parameters */
struct init_qm_global_rl_params {
+ u8 type;
+ u8 reserved0;
+ u16 reserved1;
u32 rate_limit;
};
@@ -2658,18 +1973,33 @@ struct init_qm_port_params {
/* QM per-PQ init parameters */
struct init_qm_pq_params {
- u8 vport_id;
+ u16 vport_id;
+ u16 rl_id;
+ u8 rl_valid;
u8 tc_id;
u8 wrr_group;
- u8 rl_valid;
- u16 rl_id;
u8 port_id;
- u8 reserved;
+};
+
+/* QM per RL init parameters */
+struct init_qm_rl_params {
+ u32 vport_rl;
+ u8 vport_rl_type;
+ u8 reserved[3];
+};
+
+/* QM Rate Limiter types */
+enum init_qm_rl_type {
+ QM_RL_TYPE_NORMAL,
+ QM_RL_TYPE_QCN,
+ MAX_INIT_QM_RL_TYPE
};
/* QM per-vport init parameters */
struct init_qm_vport_params {
u16 wfq;
+ u16 reserved;
+ u16 tc_wfq[NUM_OF_TCS];
u16 first_tx_pq_id[NUM_OF_TCS];
};
@@ -2728,14 +2058,14 @@ struct fw_info_location {
};
enum init_modes {
- MODE_RESERVED,
+ MODE_BB_A0_DEPRECATED,
MODE_BB,
MODE_K2,
MODE_ASIC,
- MODE_RESERVED2,
- MODE_RESERVED3,
- MODE_RESERVED4,
- MODE_RESERVED5,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
@@ -2743,8 +2073,8 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_RESERVED6,
- MODE_RESERVED7,
+ MODE_SKIP_PRAM_INIT,
+ MODE_EMUL_MAC,
MAX_INIT_MODES
};
@@ -3009,706 +2339,6 @@ struct iro {
u16 size;
};
-/***************************** Public Functions *******************************/
-
-/**
- * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
- * arrays.
- *
- * @param p_hwfn - HW device data
- * @param bin_ptr - a pointer to the binary data with debug arrays.
- */
-enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
- const u8 * const bin_ptr);
-
-/**
- * @brief qed_read_regs - Reads registers into a buffer (using GRC).
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf - Destination buffer.
- * @param addr - Source GRC address in dwords.
- * @param len - Number of registers to read.
- */
-void qed_read_regs(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
-
-/**
- * @brief qed_read_fw_info - Reads FW info from the chip.
- *
- * The FW info contains FW-related information, such as the FW version,
- * FW image (main/L2B/kuku), FW timestamp, etc.
- * The FW info is read from the internal RAM of the first Storm that is not in
- * reset.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param fw_info - Out: a pointer to write the FW info into.
- *
- * @return true if the FW info was read successfully from one of the Storms,
- * or false if all Storms are in reset.
- */
-bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct fw_info *fw_info);
-/**
- * @brief qed_dbg_grc_config - Sets the value of a GRC parameter.
- *
- * @param p_hwfn - HW device data
- * @param grc_param - GRC parameter
- * @param val - Value to set.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - grc_param is invalid
- * - val is outside the allowed boundaries
- */
-enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- enum dbg_grc_params grc_param, u32 val);
-
-/**
- * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
- * default value.
- *
- * @param p_hwfn - HW device data
- */
-void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
-/**
- * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
- * GRC Dump.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the collected GRC data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified dump buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
- * for idle check results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the idle check
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the idle check data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
- * for mcp trace results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the trace data in MCP scratchpad contain an invalid signature
- * - the bundle ID in NVRAM is invalid
- * - the trace meta data cannot be found (in NVRAM or image file)
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the mcp trace data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - the trace data in MCP scratchpad contain an invalid signature
- * - the bundle ID in NVRAM is invalid
- * - the trace meta data cannot be found (in NVRAM or image file)
- * - the trace meta data cannot be read (from NVRAM or image file)
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
- * for grc trace fifo results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
- * the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the reg fifo data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
- * for the IGU fifo results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
- * the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the IGU fifo data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
- * buffer size for protection override window results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for protection
- * override data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status
-qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-/**
- * @brief qed_dbg_protection_override_dump - Reads protection override window
- * entries and writes the results into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the protection override data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-/**
- * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
- * size for FW Asserts results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-/**
- * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the FW Asserts data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_read_attn - Reads the attention registers of the specified
- * block and type, and writes the results into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param block - Block ID.
- * @param attn_type - Attention type.
- * @param clear_status - Indicates if the attention status should be cleared.
- * @param results - OUT: Pointer to write the read results into
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum block_id block,
- enum dbg_attn_type attn_type,
- bool clear_status,
- struct dbg_attn_block_result *results);
-
-/**
- * @brief qed_dbg_print_attn - Prints attention registers values in the
- * specified results struct.
- *
- * @param p_hwfn
- * @param results - Pointer to the attention read results
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
- struct dbg_attn_block_result *results);
-
-/******************************* Data Types **********************************/
-
-struct mcp_trace_format {
- u32 data;
-#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
-#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
-#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
-#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
-#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
-#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
-#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
-#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
-#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
-#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
-#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
-#define MCP_TRACE_FORMAT_LEN_OFFSET 24
-
- char *format_str;
-};
-
-/* MCP Trace Meta data structure */
-struct mcp_trace_meta {
- u32 modules_num;
- char **modules;
- u32 formats_num;
- struct mcp_trace_format *formats;
- bool is_allocated;
-};
-
-/* Debug Tools user data */
-struct dbg_tools_user_data {
- struct mcp_trace_meta mcp_trace_meta;
- const u32 *mcp_trace_user_meta_buf;
-};
-
-/******************************** Constants **********************************/
-
-#define MAX_NAME_LEN 16
-
-/***************************** Public Functions *******************************/
-
-/**
- * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
- * debug arrays.
- *
- * @param p_hwfn - HW device data
- * @param bin_ptr - a pointer to the binary data with debug arrays.
- */
-enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
- const u8 * const bin_ptr);
-
-/**
- * @brief qed_dbg_alloc_user_data - Allocates user debug data.
- *
- * @param p_hwfn - HW device data
- * @param user_data_ptr - OUT: a pointer to the allocated memory.
- */
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
- void **user_data_ptr);
-
-/**
- * @brief qed_dbg_get_status_str - Returns a string for the specified status.
- *
- * @param status - a debug status code.
- *
- * @return a string for the specified status
- */
-const char *qed_dbg_get_status_str(enum dbg_status status);
-
-/**
- * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
- * for idle check results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - idle check dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-/**
- * @brief qed_print_idle_chk_results - Prints idle check results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - idle check dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the idle check results.
- * @param num_errors - OUT: number of errors found in idle check.
- * @param num_warnings - OUT: number of warnings found in idle check.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf,
- u32 *num_errors,
- u32 *num_warnings);
-
-/**
- * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data.
- *
- * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
- * no NVRAM access).
- *
- * @param data - pointer to MCP Trace meta data
- * @param size - size of MCP Trace meta data in dwords
- */
-void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
- const u32 *meta_buf);
-
-/**
- * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
- * for MCP Trace results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - MCP Trace dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_mcp_trace_results - Prints MCP Trace results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and
- * keeps the MCP trace meta data allocated, to support continuous MCP Trace
- * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
- * be called to free the meta data.
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf);
-
-/**
- * @brief print_mcp_trace_line - Prints MCP Trace results for a single line
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param num_dumped_bytes - number of bytes that were dumped.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf);
-
-/**
- * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data.
- * Should be called after continuous MCP Trace parsing.
- *
- * @param p_hwfn - HW device data
- */
-void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
-
-/**
- * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
- * for reg_fifo results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - reg fifo dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_reg_fifo_results - Prints reg fifo results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - reg fifo dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the reg fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
- * for igu_fifo results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - IGU fifo dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_igu_fifo_results - Prints IGU fifo results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - IGU fifo dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the IGU fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_protection_override_results_buf_size - Returns the required
- * buffer size for protection override results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - protection override dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status
-qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_protection_override_results - Prints protection override
- * results.
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - protection override dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the reg fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
- * for FW Asserts results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - FW Asserts dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_fw_asserts_results - Prints FW Asserts results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - FW Asserts dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the FW Asserts results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_dbg_parse_attn - Parses and prints attention registers values in
- * the specified results struct.
- *
- * @param p_hwfn - HW device data
- * @param results - Pointer to the attention read results
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
- struct dbg_attn_block_result *results);
-
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3745,19 +2375,28 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
/* Win 13 */
#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
+/* Returns the VOQ based on port and TC */
+#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \
+ PURE_LB_TC ? NUM_OF_PHYS_TCS *\
+ MAX_NUM_PORTS_BB + \
+ (port) : (port) * \
+ (max_phys_tcs_per_port) + (tc))
+
+struct init_qm_pq_params;
+
/**
- * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ * qed_qm_pf_mem_size(): Prepare QM ILT sizes.
*
- * Returns the required host memory size in 4KB units.
- * Must be called before all QM init HSI functions.
+ * @num_pf_cids: Number of connections used by this PF.
+ * @num_vf_cids: Number of connections used by VFs of this PF.
+ * @num_tids: Number of tasks used by this PF.
+ * @num_pf_pqs: Number of PQs used by this PF.
+ * @num_vf_pqs: Number of PQs used by VFs of this PF.
*
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * Return: The required host memory size in 4KB units.
*
- * @return The required host memory size in 4KB units.
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
*/
u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
@@ -3771,8 +2410,19 @@ struct qed_qm_common_rt_init_params {
bool global_rl_en;
bool vport_wfq_en;
struct init_qm_port_params *port_params;
+ struct init_qm_global_rl_params
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS];
};
+/**
+ * qed_qm_common_rt_init(): Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_common_rt_init_params *p_params);
@@ -3789,85 +2439,116 @@ struct qed_qm_pf_rt_init_params {
u16 num_vf_pqs;
u16 start_vport;
u16 num_vports;
+ u16 start_rl;
+ u16 num_rls;
u16 pf_wfq;
u32 pf_rl;
+ u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
+ struct init_qm_rl_params *rl_params;
};
+/**
+ * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params);
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
/**
- * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
+ * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_wfq - WFQ weight. Must be non-zero.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @pf_id: PF ID
+ * @pf_wfq: WFQ weight. Must be non-zero.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
/**
- * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
+ * qed_init_pf_rl(): Initializes the rate limit of the specified PF
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_rl - rate limit in Mb/sec units
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF ID.
+ * @pf_rl: rate limit in Mb/sec units
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
/**
- * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
+ * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
- * with the VPORT for each TC. This array is filled by
- * qed_qm_pf_rt_init
- * @param vport_wfq - WFQ weight. Must be non-zero.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @first_tx_pq_id: An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * qed_qm_pf_rt_init
+ * @wfq: WFQ weight. Must be non-zero.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
/**
- * @brief qed_init_global_rl - Initializes the rate limit of the specified
- * rate limiter
+ * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified
+ * VPORT and TC.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param rl_id - RL ID
- * @param rate_limit - rate limit in Mb/sec units
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC.
+ * (filled by qed_qm_pf_rt_init).
+ * @weight: VPORT+TC WFQ weight.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
+ */
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 weight);
+
+/**
+ * qed_init_global_rl(): Initializes the rate limit of the specified
+ * rate limiter.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @rl_id: RL ID.
+ * @rate_limit: Rate limit in Mb/sec units
+ * @vport_rl_type: Vport RL type.
+ *
+ * Return: 0 on success, -1 on error.
*/
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 rl_id, u32 rate_limit);
+ u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type);
/**
- * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
+ * qed_send_qm_stop_cmd(): Sends a stop command to the QM.
*
- * @param p_hwfn
- * @param p_ptt
- * @param is_release_cmd - true for release, false for stop.
- * @param is_tx_pq - true for Tx PQs, false for Other PQs.
- * @param start_pq - first PQ ID to stop
- * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @is_release_cmd: true for release, false for stop.
+ * @is_tx_pq: true for Tx PQs, false for Other PQs.
+ * @start_pq: first PQ ID to stop
+ * @num_pqs: Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occurred while waiting for
- * QM command done.
+ * Return: Bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
*/
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3875,53 +2556,64 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs);
/**
- * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
+ * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param dest_port - vxlan destination udp port.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: vxlan destination udp port.
+ *
+ * Return: Void.
*/
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
/**
- * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @vxlan_enable: vxlan enable flag.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param vxlan_enable - vxlan enable flag.
+ * Return: Void.
*/
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable);
/**
- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ * qed_set_gre_enable(): Enable or disable GRE tunnel in HW.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_gre_enable - eth GRE enable enable flag.
- * @param ip_gre_enable - IP GRE enable enable flag.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_gre_enable: Eth GRE enable flag.
+ * @ip_gre_enable: IP GRE enable flag.
+ *
+ * Return: Void.
*/
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable);
/**
- * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
+ * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: Geneve destination udp port.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param dest_port - geneve destination udp port.
+ * Retur: Void.
*/
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
/**
- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_geneve_enable: Eth GENEVE enable flag.
+ * @ip_geneve_enable: IP GENEVE enable flag.
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_geneve_enable - eth GENEVE enable enable flag.
- * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ * Return: Void.
*/
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3931,25 +2623,29 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable);
/**
- * @brief qed_gft_disable - Disable GFT
+ * qed_gft_disable(): Disable GFT.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to disable GFT.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to disable GFT.
+ *
+ * Return: Void.
*/
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
/**
- * @brief qed_gft_config - Enable and configure HW for GFT
+ * qed_gft_config(): Enable and configure HW for GFT.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to enable GFT.
+ * @tcp: Set profile tcp packets.
+ * @udp: Set profile udp packet.
+ * @ipv4: Set profile ipv4 packet.
+ * @ipv6: Set profile ipv6 packet.
+ * @profile_type: Define packet same fields. Use enum gft_profile_type.
*
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to enable GFT.
- * @param tcp - set profile tcp packets.
- * @param udp - set profile udp packet.
- * @param ipv4 - set profile ipv4 packet.
- * @param ipv6 - set profile ipv6 packet.
- * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ * Return: Void.
*/
void qed_gft_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3959,438 +2655,135 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool ipv4, bool ipv6, enum gft_profile_type profile_type);
/**
- * @brief qed_enable_context_validation - Enable and configure context
- * validation.
+ * qed_enable_context_validation(): Enable and configure context
+ * validation.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
+ * Return: Void.
*/
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
- * session context.
+ * qed_calc_session_ctx_validation(): Calcualte validation byte for
+ * session context.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param cid - context cid.
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @cid: Context cid.
+ *
+ * Return: Void.
*/
void qed_calc_session_ctx_validation(void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 cid);
/**
- * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
- * context.
+ * qed_calc_task_ctx_validation(): Calcualte validation byte for task
+ * context.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @tid: Context tid.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param tid - context tid.
+ * Return: Void.
*/
void qed_calc_task_ctx_validation(void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 tid);
/**
- * @brief qed_memset_session_ctx - Memset session context to 0 while
- * preserving validation bytes.
+ * qed_memset_session_ctx(): Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Size to initialzie.
+ * @ctx_type: Context type.
*
- * @param p_hwfn -
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * Return: Void.
*/
void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
/**
- * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
- * validation bytes.
+ * qed_memset_task_ctx(): Memset task context to 0 while preserving
+ * validation bytes.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: size to initialzie.
+ * @ctx_type: context type.
+ *
+ * Return: Void.
*/
void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
#define NUM_STORMS 6
/**
- * @brief qed_set_rdma_error_level - Sets the RDMA assert level.
- * If the severity of the error will be
- * above the level, the FW will assert.
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers
- * @param assert_level - An array of assert levels for each storm.
+ * qed_set_rdma_error_level(): Sets the RDMA assert level.
+ * If the severity of the error will be
+ * above the level, the FW will assert.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @assert_level: An array of assert levels for each storm.
*
+ * Return: Void.
*/
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS]);
/**
- * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
+ * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory.
*
- * @param p_hwfn - HW device data
- * @param fw_overlay_in_buf - the input FW overlay buffer.
- * @param buf_size - the size of the input FW overlay buffer in bytes.
- * must be aligned to dwords.
- * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
+ * @p_hwfn: HW device data.
+ * @fw_overlay_in_buf: The input FW overlay buffer.
+ * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes.
+ * must be aligned to dwords.
*
- * @return a pointer to the allocated overlays memory,
+ * Return: A pointer to the allocated overlays memory,
* or NULL in case of failures.
*/
struct phys_mem_desc *
qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
- const u32 * const fw_overlay_in_buf,
+ const u32 *const fw_overlay_in_buf,
u32 buf_size_in_bytes);
/**
- * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM.
+ * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_overlay_mem: the allocated FW overlay memory.
*
- * @param p_hwfn - HW device data.
- * @param p_ptt - ptt window used for writing the registers.
- * @param fw_overlay_mem - the allocated FW overlay memory.
+ * Return: Void.
*/
void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct phys_mem_desc *fw_overlay_mem);
/**
- * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory.
+ * qed_fw_overlay_mem_free(): Frees the FW overlay memory.
*
- * @param p_hwfn - HW device data.
- * @param fw_overlay_mem - the allocated FW overlay memory to free.
+ * @p_hwfn: HW device data.
+ * @fw_overlay_mem: The allocated FW overlay memory to free.
+ *
+ * Return: Void.
*/
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem);
+ struct phys_mem_desc **fw_overlay_mem);
-/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-
-/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) \
- (IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
-
-/* Tstorm ll2 port statistics */
-#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
- (IRO[2].base + ((port_id) * IRO[2].m1))
-#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-
-/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
- (IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-
-/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
- (IRO[4].base + ((pf_id) * IRO[4].m1))
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-
-/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) \
- (IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-
-/* Ustorm eth queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
- (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
-
-/* Ustorm Common Queue ring consumer */
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
- (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
-
-/* Xstorm common PQ info */
-#define XSTORM_PQ_INFO_OFFSET(pq_id) \
- (IRO[8].base + ((pq_id) * IRO[8].m1))
-#define XSTORM_PQ_INFO_SIZE (IRO[8].size)
-
-/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
-
-/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
-
-/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
-
-/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
-
-/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
-
-/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
-
-/* Xstorm overlay buffer host address */
-#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base)
-#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size)
-
-/* Ystorm overlay buffer host address */
-#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base)
-#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size)
-
-/* Pstorm overlay buffer host address */
-#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
-#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
-
-/* Tstorm overlay buffer host address */
-#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
-#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
-
-/* Mstorm overlay buffer host address */
-#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
-#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
-
-/* Ustorm overlay buffer host address */
-#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
-#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
-
-/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size)
-
-/* Tstorm LightL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size)
-
-/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size)
-
-/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
-
-/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size)
-
-/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
- * mode
- */
-#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
-#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size)
-
-/* Mstorm ETH PF queues producers */
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
- (IRO[28].base + ((queue_id) * IRO[28].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size)
-
-/* Mstorm pf statistics */
-#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size)
-
-/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[30].base + ((stat_counter_id) * IRO[30].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[30].size)
-
-/* Ustorm pf statistics */
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[31].base + ((pf_id) * IRO[31].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size)
-
-/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[32].base + ((stat_counter_id) * IRO[32].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size)
-
-/* Pstorm pf statistics */
-#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[33].base + ((pf_id) * IRO[33].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
-
-/* Control frame's EthType configuration for TX control frame security */
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
- (IRO[34].base + ((eth_type_id) * IRO[34].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size)
-
-/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size)
-
-/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[36].base + ((pf_id) * IRO[36].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size)
-
-/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
- * Use eth_tstorm_rss_update_data for update
- */
-#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size)
-
-/* Xstorm queue zone */
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[38].base + ((queue_id) * IRO[38].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size)
-
-/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[39].base + ((rss_id) * IRO[39].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size)
-
-/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[40].base + ((rss_id) * IRO[40].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size)
-
-/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size)
-
-/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size)
-
-/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
- * BDqueue-id
+#define PCICFG_OFFSET 0x2000
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs programmed
+ * for each PF.
+ * Since registers from 0x000-0x7ff are spilt across functions, each PF will
+ * have the same location for the same 4 bits
*/
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
- ((bdq_id) * IRO[43].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size)
-
-/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
- ((bdq_id) * IRO[44].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size)
-
-/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[45].base + ((storage_func_id) * IRO[45].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size)
-
-/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[46].base + ((storage_func_id) * IRO[46].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size)
-
-/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[47].base + ((storage_func_id) * IRO[47].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size)
-
-/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[48].base + ((storage_func_id) * IRO[48].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size)
-
-/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[49].base + ((storage_func_id) * IRO[49].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size)
-
-/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[50].base + ((storage_func_id) * IRO[50].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size)
-
-/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[51].base + ((pf_id) * IRO[51].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size)
-
-/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[52].base + ((pf_id) * IRO[52].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size)
-
-/* Pstorm RDMA queue statistics */
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size)
-
-/* Tstorm RDMA queue statistics */
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size)
-
-/* Xstorm error level for assert */
-#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[55].base + ((pf_id) * IRO[55].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size)
-
-/* Ystorm error level for assert */
-#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[56].base + ((pf_id) * IRO[56].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size)
-
-/* Pstorm error level for assert */
-#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[57].base + ((pf_id) * IRO[57].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size)
-
-/* Tstorm error level for assert */
-#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[58].base + ((pf_id) * IRO[58].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size)
-
-/* Mstorm error level for assert */
-#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[59].base + ((pf_id) * IRO[59].m1))
-#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size)
-
-/* Ustorm error level for assert */
-#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[60].base + ((pf_id) * IRO[60].m1))
-#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size)
-
-/* Xstorm iWARP rxmit stats */
-#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
- (IRO[61].base + ((pf_id) * IRO[61].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size)
-
-/* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
- (IRO[62].base + ((roce_pf_id) * IRO[62].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size)
-
-/* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\
- (IRO[63].base + ((roce_pf_id) * IRO[63].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size)
-
-/* RoCE Error Statistics */
-#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
- (IRO[64].base + ((roce_pf_id) * IRO[64].m1))
-#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size)
-
-/* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
- (IRO[65].base + ((roce_pf_id) * IRO[65].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size)
-
-/* RoCE CQEs Statistics */
-#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
- (IRO[66].base + ((roce_pf_id) * IRO[66].m1))
-#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size)
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff
/* Runtime array offsets */
#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
@@ -4721,116 +3114,118 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
#define QM_REG_TXPQMAP_RT_SIZE 512
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32068
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32580
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 32580
+#define QM_REG_WFQVPMAP_RT_OFFSET 33092
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_PTRTBLTX_RT_OFFSET 33092
+#define QM_REG_PTRTBLTX_RT_OFFSET 33604
#define QM_REG_PTRTBLTX_RT_SIZE 1024
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276
-#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471
-
-#define RUNTIME_ARRAY_SIZE 34472
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983
+
+#define RUNTIME_ARRAY_SIZE 34984
/* Init Callbacks */
#define DMAE_READY_CB 0
@@ -4850,216 +3245,216 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct e4_xstorm_eth_conn_ag_ctx {
+struct xstorm_eth_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
__le16 e5_reserved1;
@@ -5118,37 +3513,37 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct e4_ystorm_eth_conn_ag_ctx {
+struct ystorm_eth_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset;
u8 byte3;
__le16 word0;
@@ -5162,89 +3557,89 @@ struct e4_ystorm_eth_conn_ag_ctx {
__le32 reg3;
};
-struct e4_tstorm_eth_conn_ag_ctx {
+struct tstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -5266,63 +3661,63 @@ struct e4_tstorm_eth_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_eth_conn_ag_ctx {
+struct ustorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -5346,16 +3741,16 @@ struct mstorm_eth_conn_st_ctx {
};
/* eth connection context */
-struct e4_eth_conn_context {
+struct eth_conn_context {
struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
@@ -5512,7 +3907,7 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_ADD_UDP_FILTER,
ETH_RAMROD_RX_DELETE_UDP_FILTER,
ETH_RAMROD_RX_CREATE_GFT_ACTION,
- ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
ETH_RAMROD_TX_QUEUE_UPDATE,
ETH_RAMROD_RGFS_FILTER_ADD,
ETH_RAMROD_RGFS_FILTER_DEL,
@@ -5596,10 +3991,12 @@ struct eth_vport_rss_config {
u8 update_rss_ind_table;
u8 update_rss_capabilities;
u8 tbl_size;
- __le32 reserved2[2];
+ u8 ind_table_mask_valid;
+ u8 reserved2[3];
__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+ __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS];
__le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
- __le32 reserved3[2];
+ __le32 reserved3;
};
/* eth vport RSS mode */
@@ -5674,8 +4071,20 @@ enum gft_filter_update_action {
MAX_GFT_FILTER_UPDATE_ACTION
};
+/* Ramrod data for rx create gft action */
+struct rx_create_gft_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+/* Ramrod data for rx create openflow action */
+struct rx_create_openflow_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
/* Ramrod data for rx add openflow filter */
-struct rx_add_openflow_filter_data {
+struct rx_openflow_filter_ramrod_data {
__le16 action_icid;
u8 priority;
u8 reserved0;
@@ -5698,18 +4107,6 @@ struct rx_add_openflow_filter_data {
__le16 l4_src_port;
};
-/* Ramrod data for rx create gft action */
-struct rx_create_gft_action_data {
- u8 vport_id;
- u8 reserved[7];
-};
-
-/* Ramrod data for rx create openflow action */
-struct rx_create_openflow_action_data {
- u8 vport_id;
- u8 reserved[7];
-};
-
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -5768,7 +4165,7 @@ struct rx_queue_update_ramrod_data {
};
/* Ramrod data for rx Add UDP Filter */
-struct rx_udp_filter_data {
+struct rx_udp_filter_ramrod_data {
__le16 action_icid;
__le16 vlan_id;
u8 ip_type;
@@ -5784,7 +4181,7 @@ struct rx_udp_filter_data {
/* Add or delete GFT filter - filter is packet header of type of packet wished
* to pass certain FW flow.
*/
-struct rx_update_gft_filter_data {
+struct rx_update_gft_filter_ramrod_data {
struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length;
__le16 action_icid;
@@ -5824,7 +4221,8 @@ struct tx_queue_start_ramrod_data {
u8 pxp_tph_valid_bd;
u8 pxp_tph_valid_pkt;
__le16 pxp_st_index;
- __le16 comp_agg_size;
+ u8 comp_agg_size;
+ u8 reserved3;
__le16 queue_zone_id;
__le16 reserved2;
__le16 pbl_size;
@@ -5945,7 +4343,12 @@ struct vport_update_ramrod_data_cmn {
u8 ctl_frame_ethtype_check_en;
u8 update_in_to_in_pri_map_mode;
u8 in_to_in_pri_map[8];
- u8 reserved[6];
+ u8 update_tx_dst_port_mode_flg;
+ u8 tx_dst_port_mode_config;
+ u8 dst_vport_id;
+ u8 tx_dst_port_mode;
+ u8 dst_vport_id_valid;
+ u8 reserved[1];
};
struct vport_update_ramrod_mcast {
@@ -5964,7 +4367,7 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
+struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
u8 reserved0;
u8 state;
u8 flags0;
@@ -6193,253 +4596,253 @@ struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
__le32 reg4;
};
-struct e4_mstorm_eth_conn_ag_ctx {
+struct mstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_xstorm_eth_hw_conn_ag_ctx {
+struct xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
__le16 e5_reserved1;
@@ -6479,7 +4882,6 @@ struct gft_cam_line_mapped {
#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
};
-
/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
GFT_PROFILE_IPV4 = 0,
@@ -6640,49 +5042,49 @@ struct ystorm_rdma_task_st_ctx {
struct regpair temp[4];
};
-struct e4_ystorm_rdma_task_ag_ctx {
+struct ystorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 msem_ctx_upd_seq;
u8 flags0;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
@@ -6696,49 +5098,49 @@ struct e4_ystorm_rdma_task_ag_ctx {
__le32 fbo_hi;
};
-struct e4_mstorm_rdma_task_ag_ctx {
+struct mstorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
@@ -6762,56 +5164,56 @@ struct ustorm_rdma_task_st_ctx {
struct regpair temp[6];
};
-struct e4_ustorm_rdma_task_ag_ctx {
+struct ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
-#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 dif_rxmit_cons;
@@ -6828,16 +5230,853 @@ struct e4_ustorm_rdma_task_ag_ctx {
};
/* RDMA task context */
-struct e4_rdma_task_context {
+struct rdma_task_context {
struct ystorm_rdma_task_st_ctx ystorm_st_context;
- struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
struct tdif_task_context tdif_context;
- struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
struct ustorm_rdma_task_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+#define TOE_MAX_RAMROD_PER_PF 8
+#define TOE_TX_PAGE_SIZE_BYTES 4096
+#define TOE_GRQ_PAGE_SIZE_BYTES 4096
+#define TOE_RX_CQ_PAGE_SIZE_BYTES 4096
+
+#define TOE_RX_MAX_RSS_CHAINS 64
+#define TOE_TX_MAX_TSS_CHAINS 64
+#define TOE_RSS_INDIRECTION_TABLE_SIZE 128
+
+/* The toe storm context of Mstorm */
+struct mstorm_toe_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The toe storm context of Pstorm */
+struct pstorm_toe_conn_st_ctx {
+ __le32 reserved[36];
+};
+
+/* The toe storm context of Ystorm */
+struct ystorm_toe_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The toe storm context of Xstorm */
+struct xstorm_toe_conn_st_ctx {
+ __le32 reserved[44];
+};
+
+struct ystorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7
+ u8 completion_opcode;
+ u8 byte3;
+ __le16 word0;
+ __le32 rel_seq;
+ __le32 rel_seq_threshold;
+ __le16 app_prod;
+ __le16 app_cons;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct xstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7
+ u8 flags2;
+#define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 word2;
+ __le16 word3;
+ __le16 bd_prod;
+ __le16 word5;
+ __le16 word6;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 local_adv_wnd_seq;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+};
+
+struct ustorm_toe_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The toe storm context of Tstorm */
+struct tstorm_toe_conn_st_ctx {
+ __le32 reserved[16];
+};
+
+/* The toe storm context of Ustorm */
+struct ustorm_toe_conn_st_ctx {
+ __le32 reserved[52];
+};
+
+/* toe connection context */
+struct toe_conn_context {
+ struct ystorm_toe_conn_st_ctx ystorm_st_context;
+ struct pstorm_toe_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_toe_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct ystorm_toe_conn_ag_ctx ystorm_ag_context;
+ struct xstorm_toe_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_toe_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_toe_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_toe_conn_st_ctx tstorm_st_context;
+ struct mstorm_toe_conn_st_ctx mstorm_st_context;
+ struct ustorm_toe_conn_st_ctx ustorm_st_context;
+};
+
+/* toe init ramrod header */
+struct toe_init_ramrod_header {
+ u8 first_rss;
+ u8 num_rss;
+ u8 reserved[6];
+};
+
+/* toe pf init parameters */
+struct toe_pf_init_params {
+ __le32 push_timeout;
+ __le16 grq_buffer_size;
+ __le16 grq_sb_id;
+ u8 grq_sb_index;
+ u8 max_seg_retransmit;
+ u8 doubt_reachability;
+ u8 ll2_rx_queue_id;
+ __le16 grq_fetch_threshold;
+ u8 reserved1[2];
+ struct regpair grq_page_addr;
+};
+
+/* toe tss parameters */
+struct toe_tss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe rss parameters */
+struct toe_rss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe init ramrod data */
+struct toe_init_ramrod_data {
+ struct toe_init_ramrod_header hdr;
+ struct tcp_init_params tcp_params;
+ struct toe_pf_init_params pf_params;
+ struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS];
+ struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS];
+};
+
+/* toe offload parameters */
+struct toe_offload_params {
+ struct regpair tx_bd_page_addr;
+ struct regpair tx_app_page_addr;
+ __le32 more_to_send_seq;
+ __le16 rcv_indication_size;
+ u8 rss_tss_id;
+ u8 ignore_grq_push;
+ struct regpair rx_db_data_ptr;
+};
+
+/* TOE offload ramrod data - DMAed by firmware */
+struct toe_offload_ramrod_data {
+ struct tcp_offload_params tcp_ofld_params;
+ struct toe_offload_params toe_ofld_params;
+};
+
+/* TOE ramrod command IDs */
+enum toe_ramrod_cmd_id {
+ TOE_RAMROD_UNUSED,
+ TOE_RAMROD_FUNC_INIT,
+ TOE_RAMROD_INITATE_OFFLOAD,
+ TOE_RAMROD_FUNC_CLOSE,
+ TOE_RAMROD_SEARCHER_DELETE,
+ TOE_RAMROD_TERMINATE,
+ TOE_RAMROD_QUERY,
+ TOE_RAMROD_UPDATE,
+ TOE_RAMROD_EMPTY,
+ TOE_RAMROD_RESET_SEND,
+ TOE_RAMROD_INVALIDATE,
+ MAX_TOE_RAMROD_CMD_ID
+};
+
+/* Toe RQ buffer descriptor */
+struct toe_rx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_RX_BD_START_MASK 0x1
+#define TOE_RX_BD_START_SHIFT 0
+#define TOE_RX_BD_END_MASK 0x1
+#define TOE_RX_BD_END_SHIFT 1
+#define TOE_RX_BD_NO_PUSH_MASK 0x1
+#define TOE_RX_BD_NO_PUSH_SHIFT 2
+#define TOE_RX_BD_SPLIT_MASK 0x1
+#define TOE_RX_BD_SPLIT_SHIFT 3
+#define TOE_RX_BD_RESERVED0_MASK 0xFFF
+#define TOE_RX_BD_RESERVED0_SHIFT 4
+ __le32 reserved1;
+};
+
+/* TOE RX completion queue opcodes (opcode 0 is illegal) */
+enum toe_rx_cmp_opcode {
+ TOE_RX_CMP_OPCODE_GA = 1,
+ TOE_RX_CMP_OPCODE_GR = 2,
+ TOE_RX_CMP_OPCODE_GNI = 3,
+ TOE_RX_CMP_OPCODE_GAIR = 4,
+ TOE_RX_CMP_OPCODE_GAIL = 5,
+ TOE_RX_CMP_OPCODE_GRI = 6,
+ TOE_RX_CMP_OPCODE_GJ = 7,
+ TOE_RX_CMP_OPCODE_DGI = 8,
+ TOE_RX_CMP_OPCODE_CMP = 9,
+ TOE_RX_CMP_OPCODE_REL = 10,
+ TOE_RX_CMP_OPCODE_SKP = 11,
+ TOE_RX_CMP_OPCODE_URG = 12,
+ TOE_RX_CMP_OPCODE_RT_TO = 13,
+ TOE_RX_CMP_OPCODE_KA_TO = 14,
+ TOE_RX_CMP_OPCODE_MAX_RT = 15,
+ TOE_RX_CMP_OPCODE_DBT_RE = 16,
+ TOE_RX_CMP_OPCODE_SYN = 17,
+ TOE_RX_CMP_OPCODE_OPT_ERR = 18,
+ TOE_RX_CMP_OPCODE_FW2_TO = 19,
+ TOE_RX_CMP_OPCODE_2WY_CLS = 20,
+ TOE_RX_CMP_OPCODE_RST_RCV = 21,
+ TOE_RX_CMP_OPCODE_FIN_RCV = 22,
+ TOE_RX_CMP_OPCODE_FIN_UPL = 23,
+ TOE_RX_CMP_OPCODE_INIT = 32,
+ TOE_RX_CMP_OPCODE_RSS_UPDATE = 33,
+ TOE_RX_CMP_OPCODE_CLOSE = 34,
+ TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80,
+ TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81,
+ TOE_RX_CMP_OPCODE_TERMINATE = 82,
+ TOE_RX_CMP_OPCODE_QUERY = 83,
+ TOE_RX_CMP_OPCODE_RESET_SEND = 84,
+ TOE_RX_CMP_OPCODE_INVALIDATE = 85,
+ TOE_RX_CMP_OPCODE_EMPTY = 86,
+ TOE_RX_CMP_OPCODE_UPDATE = 87,
+ MAX_TOE_RX_CMP_OPCODE
+};
+
+/* TOE rx ooo completion data */
+struct toe_rx_cqe_ooo_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ u8 isle_num;
+ u8 reserved0;
+};
+
+/* TOE rx in order completion data */
+struct toe_rx_cqe_in_order_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ __le16 reserved1;
+};
+
+/* Union for TOE rx completion data */
+union toe_rx_cqe_data_union {
+ struct toe_rx_cqe_ooo_params ooo_params;
+ struct toe_rx_cqe_in_order_params in_order_params;
+ struct regpair raw_data;
+};
+
+/* TOE rx completion element */
+struct toe_rx_cqe {
+ __le16 icid;
+ u8 completion_opcode;
+ u8 reserved0;
+ __le32 reserved1;
+ union toe_rx_cqe_data_union data;
+};
+
+/* toe RX doorbel data */
+struct toe_rx_db_data {
+ __le32 local_adv_wnd_seq;
+ __le32 reserved[3];
+};
+
+/* Toe GRQ buffer descriptor */
+struct toe_rx_grq_bd {
+ struct regpair addr;
+ __le16 buff_id;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+/* Toe transmission application buffer descriptor */
+struct toe_tx_app_buff_desc {
+ __le32 next_buffer_start_seq;
+ __le32 reserved;
+};
+
+/* Toe transmission application buffer descriptor page pointer */
+struct toe_tx_app_buff_page_pointer {
+ struct regpair next_page_addr;
+};
+
+/* Toe transmission buffer descriptor */
+struct toe_tx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_TX_BD_PUSH_MASK 0x1
+#define TOE_TX_BD_PUSH_SHIFT 0
+#define TOE_TX_BD_NOTIFY_MASK 0x1
+#define TOE_TX_BD_NOTIFY_SHIFT 1
+#define TOE_TX_BD_LARGE_IO_MASK 0x1
+#define TOE_TX_BD_LARGE_IO_SHIFT 2
+#define TOE_TX_BD_BD_CONS_MASK 0x1FFF
+#define TOE_TX_BD_BD_CONS_SHIFT 3
+ __le32 next_bd_start_seq;
+};
+
+/* TOE completion opcodes */
+enum toe_tx_cmp_opcode {
+ TOE_TX_CMP_OPCODE_DATA,
+ TOE_TX_CMP_OPCODE_TERMINATE,
+ TOE_TX_CMP_OPCODE_EMPTY,
+ TOE_TX_CMP_OPCODE_RESET_SEND,
+ TOE_TX_CMP_OPCODE_INVALIDATE,
+ TOE_TX_CMP_OPCODE_RST_RCV,
+ MAX_TOE_TX_CMP_OPCODE
+};
+
+/* Toe transmission completion element */
+struct toe_tx_cqe {
+ __le16 icid;
+ u8 opcode;
+ u8 reserved;
+ __le32 size;
+};
+
+/* Toe transmission page pointer bd */
+struct toe_tx_page_pointer_bd {
+ struct regpair next_page_addr;
+ struct regpair prev_page_addr;
+};
+
+/* Toe transmission completion element page pointer */
+struct toe_tx_page_pointer_cqe {
+ struct regpair next_page_addr;
+};
+
+/* toe update parameters */
+struct toe_update_params {
+ __le16 flags;
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0
+#define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF
+#define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1
+ __le16 rcv_indication_size;
+ __le16 reserved1[2];
+};
+
+/* TOE update ramrod data - DMAed by firmware */
+struct toe_update_ramrod_data {
+ struct tcp_update_params tcp_upd_params;
+ struct toe_update_params toe_upd_params;
+};
+
+struct mstorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* TOE doorbell data */
+struct toe_db_data {
+ u8 params;
+#define TOE_DB_DATA_DEST_MASK 0x3
+#define TOE_DB_DATA_DEST_SHIFT 0
+#define TOE_DB_DATA_AGG_CMD_MASK 0x3
+#define TOE_DB_DATA_AGG_CMD_SHIFT 2
+#define TOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define TOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define TOE_DB_DATA_RESERVED_MASK 0x1
+#define TOE_DB_DATA_RESERVED_SHIFT 5
+#define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 bd_prod;
};
/* rdma function init ramrod data */
@@ -6911,6 +6150,8 @@ enum rdma_event_opcode {
RDMA_EVENT_CREATE_SRQ,
RDMA_EVENT_MODIFY_SRQ,
RDMA_EVENT_DESTROY_SRQ,
+ RDMA_EVENT_START_NAMESPACE_TRACKING,
+ RDMA_EVENT_STOP_NAMESPACE_TRACKING,
MAX_RDMA_EVENT_OPCODE
};
@@ -6935,18 +6176,33 @@ struct rdma_init_func_hdr {
u8 relaxed_ordering;
__le16 first_reg_srq_id;
__le32 reg_srq_base_addr;
- u8 searcher_mode;
- u8 pvrdma_mode;
+ u8 flags;
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2
+#define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F
+#define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3
+ u8 dpt_byte_threshold_log;
+ u8 dpt_common_queue_id;
u8 max_num_ns_log;
- u8 reserved;
};
/* rdma function init ramrod data */
struct rdma_init_func_ramrod_data {
struct rdma_init_func_hdr params_header;
+ struct rdma_cnq_params dptq_params;
struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
};
+/* rdma namespace tracking ramrod data */
+struct rdma_namespace_tracking_ramrod_data {
+ u8 name_space;
+ u8 reserved[7];
+};
+
/* RDMA ramrod command IDs */
enum rdma_ramrod_cmd_id {
RDMA_RAMROD_UNUSED,
@@ -6960,6 +6216,8 @@ enum rdma_ramrod_cmd_id {
RDMA_RAMROD_CREATE_SRQ,
RDMA_RAMROD_MODIFY_SRQ,
RDMA_RAMROD_DESTROY_SRQ,
+ RDMA_RAMROD_START_NS_TRACKING,
+ RDMA_RAMROD_STOP_NS_TRACKING,
MAX_RDMA_RAMROD_CMD_ID
};
@@ -7093,73 +6351,73 @@ struct rdma_xrc_srq_context {
struct regpair temp[9];
};
-struct e4_tstorm_rdma_task_ag_ctx {
+struct tstorm_rdma_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -7172,63 +6430,63 @@ struct e4_tstorm_rdma_task_ag_ctx {
__le32 reg2;
};
-struct e4_ustorm_rdma_conn_ag_ctx {
+struct ustorm_rdma_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 nvmf_only;
__le16 conn_dpi;
@@ -7241,214 +6499,214 @@ struct e4_ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct e4_xstorm_roce_conn_ag_ctx {
+struct xstorm_roce_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -7470,89 +6728,89 @@ struct e4_xstorm_roce_conn_ag_ctx {
__le32 reg6;
};
-struct e4_tstorm_roce_conn_ag_ctx {
+struct tstorm_roce_conn_ag_ctx {
u8 reserved0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -7605,15 +6863,15 @@ struct ustorm_roce_conn_st_ctx {
};
/* roce connection context */
-struct e4_roce_conn_context {
+struct roce_conn_context {
struct ystorm_roce_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_roce_conn_st_ctx pstorm_st_context;
struct xstorm_roce_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_roce_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_roce_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_roce_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_roce_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_roce_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_roce_conn_st_ctx mstorm_st_context;
@@ -7681,8 +6939,10 @@ struct roce_create_qp_req_ramrod_data {
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3
u8 name_space;
u8 reserved3[3];
__le16 regular_latency_phy_queue;
@@ -7714,8 +6974,10 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19
__le16 xrc_domain;
u8 max_ird;
u8 traffic_class;
@@ -7752,10 +7014,85 @@ struct roce_create_qp_resp_ramrod_data {
u8 reserved3[3];
};
+/* RoCE Create Suspended qp requester runtime ramrod data */
+struct roce_create_suspended_qp_req_runtime_ramrod_data {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \
+ 0x7FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+};
+
+/* RoCE Create Suspended QP requester ramrod data */
+struct roce_create_suspended_qp_req_ramrod_data {
+ struct roce_create_qp_req_ramrod_data qp_params;
+ struct roce_create_suspended_qp_req_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE Create Suspended QP responder runtime params */
+struct roce_create_suspended_qp_resp_runtime_params {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+ __le32 resreved;
+};
+
+/* RoCE RDB array entry */
+struct roce_resp_qp_rdb_entry {
+ struct regpair atomic_data;
+ struct regpair va;
+ __le32 psn;
+ __le32 rkey;
+ __le32 byte_count;
+ u8 op_type;
+ u8 reserved[3];
+};
+
+/* RoCE Create Suspended QP responder runtime ramrod data */
+struct roce_create_suspended_qp_resp_runtime_ramrod_data {
+ struct roce_create_suspended_qp_resp_runtime_params params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Create Suspended QP responder ramrod data */
+struct roce_create_suspended_qp_resp_ramrod_data {
+ struct roce_create_qp_resp_ramrod_data
+ qp_params;
+ struct roce_create_suspended_qp_resp_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE create ud qp ramrod data */
+struct roce_create_ud_qp_ramrod_data {
+ __le16 local_mac_addr[3];
+ __le16 vlan_id;
+ __le32 src_qp_id;
+ u8 name_space;
+ u8 reserved[3];
+};
+
/* roce DCQCN received statistics */
struct roce_dcqcn_received_stats {
struct regpair ecn_pkt_rcv;
struct regpair cnp_pkt_rcv;
+ struct regpair cnp_pkt_reject;
};
/* roce DCQCN sent statistics */
@@ -7787,6 +7124,12 @@ struct roce_destroy_qp_resp_ramrod_data {
__le32 reserved;
};
+/* RoCE destroy ud qp ramrod data */
+struct roce_destroy_ud_qp_ramrod_data {
+ __le32 src_qp_id;
+ __le32 reserved;
+};
+
/* roce error statistics */
struct roce_error_stats {
__le32 resp_remote_access_errors;
@@ -7809,13 +7152,21 @@ struct roce_events_stats {
/* roce slow path EQ cmd IDs */
enum roce_event_opcode {
- ROCE_EVENT_CREATE_QP = 11,
+ ROCE_EVENT_CREATE_QP = 13,
ROCE_EVENT_MODIFY_QP,
ROCE_EVENT_QUERY_QP,
ROCE_EVENT_DESTROY_QP,
ROCE_EVENT_CREATE_UD_QP,
ROCE_EVENT_DESTROY_UD_QP,
ROCE_EVENT_FUNC_UPDATE,
+ ROCE_EVENT_SUSPEND_QP,
+ ROCE_EVENT_QUERY_SUSPENDED_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_QP,
+ ROCE_EVENT_RESUME_QP,
+ ROCE_EVENT_SUSPEND_UD_QP,
+ ROCE_EVENT_RESUME_UD_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_UD_QP,
+ ROCE_EVENT_FLUSH_DPT_QP,
MAX_ROCE_EVENT_OPCODE
};
@@ -7843,6 +7194,18 @@ struct roce_init_func_ramrod_data {
struct roce_init_func_params roce;
};
+/* roce_ll2_cqe_data */
+struct roce_ll2_cqe_data {
+ u8 name_space;
+ u8 flags;
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0
+#define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F
+#define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1
+ u8 reserved1[2];
+ __le32 cid;
+};
+
/* roce modify qp requester ramrod data */
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
@@ -7870,8 +7233,10 @@ struct roce_modify_qp_req_ramrod_data {
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15
u8 fields;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
@@ -7917,8 +7282,10 @@ struct roce_modify_qp_resp_ramrod_data {
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12
u8 fields;
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
@@ -7969,18 +7336,84 @@ struct roce_query_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE Query Suspended QP requester output params */
+struct roce_query_suspended_qp_req_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+ __le32 reserved;
+};
+
+/* RoCE Query Suspended QP requester ramrod data */
+struct roce_query_suspended_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* RoCE Query Suspended QP responder runtime params */
+struct roce_query_suspended_qp_resp_runtime_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+};
+
+/* RoCE Query Suspended QP responder output params */
+struct roce_query_suspended_qp_resp_output_params {
+ struct roce_query_suspended_qp_resp_runtime_params runtime_params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Query Suspended QP responder ramrod data */
+struct roce_query_suspended_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
/* ROCE ramrod command IDs */
enum roce_ramrod_cmd_id {
- ROCE_RAMROD_CREATE_QP = 11,
+ ROCE_RAMROD_CREATE_QP = 13,
ROCE_RAMROD_MODIFY_QP,
ROCE_RAMROD_QUERY_QP,
ROCE_RAMROD_DESTROY_QP,
ROCE_RAMROD_CREATE_UD_QP,
ROCE_RAMROD_DESTROY_UD_QP,
ROCE_RAMROD_FUNC_UPDATE,
+ ROCE_RAMROD_SUSPEND_QP,
+ ROCE_RAMROD_QUERY_SUSPENDED_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_QP,
+ ROCE_RAMROD_RESUME_QP,
+ ROCE_RAMROD_SUSPEND_UD_QP,
+ ROCE_RAMROD_RESUME_UD_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_UD_QP,
+ ROCE_RAMROD_FLUSH_DPT_QP,
MAX_ROCE_RAMROD_CMD_ID
};
+/* ROCE RDB array entry type */
+enum roce_resp_qp_rdb_entry_type {
+ ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0,
+ ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1,
+ ROCE_QP_RDB_ENTRY_INVALID = 2,
+ MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE
+};
+
/* RoCE func init ramrod data */
struct roce_update_func_params {
u8 cnp_vlan_priority;
@@ -7995,7 +7428,7 @@ struct roce_update_func_params {
__le32 cnp_send_timeout;
};
-struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
u8 reserved0;
u8 state;
u8 flags0;
@@ -8222,200 +7655,200 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
__le32 reg4;
};
-struct e4_mstorm_roce_conn_ag_ctx {
+struct mstorm_roce_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_mstorm_roce_req_conn_ag_ctx {
+struct mstorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_mstorm_roce_resp_conn_ag_ctx {
+struct mstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_tstorm_roce_req_conn_ag_ctx {
+struct tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 dif_rxmit_cnt;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
@@ -8437,89 +7870,89 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
__le32 reg10;
};
-struct e4_tstorm_roce_resp_conn_ag_ctx {
+struct tstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 psn_and_rxmit_id_echo;
__le32 reg1;
__le32 reg2;
@@ -8541,63 +7974,63 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_roce_req_conn_ag_ctx {
+struct ustorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8610,63 +8043,63 @@ struct e4_ustorm_roce_req_conn_ag_ctx {
__le16 word3;
};
-struct e4_ustorm_roce_resp_conn_ag_ctx {
+struct ustorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8679,214 +8112,214 @@ struct e4_ustorm_roce_resp_conn_ag_ctx {
__le16 word3;
};
-struct e4_xstorm_roce_req_conn_ag_ctx {
+struct xstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -8908,216 +8341,216 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
__le32 orq_cons;
};
-struct e4_xstorm_roce_resp_conn_ag_ctx {
+struct xstorm_roce_resp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 irq_prod_shadow;
@@ -9139,37 +8572,37 @@ struct e4_xstorm_roce_resp_conn_ag_ctx {
__le32 msn_and_syndrome;
};
-struct e4_ystorm_roce_conn_ag_ctx {
+struct ystorm_roce_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9183,37 +8616,37 @@ struct e4_ystorm_roce_conn_ag_ctx {
__le32 reg3;
};
-struct e4_ystorm_roce_req_conn_ag_ctx {
+struct ystorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9227,37 +8660,37 @@ struct e4_ystorm_roce_req_conn_ag_ctx {
__le32 reg3;
};
-struct e4_ystorm_roce_resp_conn_ag_ctx {
+struct ystorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9294,216 +8727,216 @@ struct xstorm_iwarp_conn_st_ctx {
__le32 reserved[48];
};
-struct e4_xstorm_iwarp_conn_ag_ctx {
+struct xstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
u8 flags2;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -9551,89 +8984,89 @@ struct e4_xstorm_iwarp_conn_ag_ctx {
__le32 reg17;
};
-struct e4_tstorm_iwarp_conn_ag_ctx {
+struct tstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 unaligned_nxt_seq;
@@ -9671,16 +9104,16 @@ struct ustorm_iwarp_conn_st_ctx {
};
/* iwarp connection context */
-struct e4_iwarp_conn_context {
+struct iwarp_conn_context {
struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
@@ -9731,8 +9164,8 @@ enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
- IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
MAX_IWARP_EQE_ASYNC_OPCODE
};
@@ -9750,8 +9183,7 @@ struct iwarp_eqe_data_tcp_async_completion {
/* iWARP completion queue types */
enum iwarp_eqe_sync_opcode {
- IWARP_EVENT_TYPE_TCP_OFFLOAD =
- 11,
+ IWARP_EVENT_TYPE_TCP_OFFLOAD = 13,
IWARP_EVENT_TYPE_MPA_OFFLOAD,
IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR,
IWARP_EVENT_TYPE_CREATE_QP,
@@ -9783,8 +9215,6 @@ enum iwarp_fw_return_code {
IWARP_EXCEPTION_DETECTED_LLP_RESET,
IWARP_EXCEPTION_DETECTED_IRQ_FULL,
IWARP_EXCEPTION_DETECTED_RQ_EMPTY,
- IWARP_EXCEPTION_DETECTED_SRQ_EMPTY,
- IWARP_EXCEPTION_DETECTED_SRQ_LIMIT,
IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT,
IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR,
IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW,
@@ -9878,9 +9308,10 @@ struct iwarp_mpa_offload_ramrod_data {
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
struct regpair shared_queue_addr;
+ __le32 additional_setup_time;
__le16 rcv_wnd;
u8 stats_counter_id;
- u8 reserved3[13];
+ u8 reserved3[9];
};
/* iWARP TCP connection offload params passed by driver to FW */
@@ -9888,11 +9319,13 @@ struct iwarp_offload_params {
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
+ __le32 additional_setup_time;
__le16 physical_q0;
__le16 physical_q1;
u8 stats_counter_id;
u8 mpa_mode;
- u8 reserved[10];
+ u8 src_vport_id;
+ u8 reserved[5];
};
/* iWARP query QP output params */
@@ -9912,7 +9345,7 @@ struct iwarp_query_qp_ramrod_data {
/* iWARP Ramrod Command IDs */
enum iwarp_ramrod_cmd_id {
- IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9971,100 +9404,100 @@ struct unaligned_opaque_data {
__le32 cid;
};
-struct e4_mstorm_iwarp_conn_ag_ctx {
+struct mstorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 rcq_cons;
__le16 rcq_cons_th;
__le32 reg0;
__le32 reg1;
};
-struct e4_ustorm_iwarp_conn_ag_ctx {
+struct ustorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10077,37 +9510,37 @@ struct e4_ustorm_iwarp_conn_ag_ctx {
__le16 word3;
};
-struct e4_ystorm_iwarp_conn_ag_ctx {
+struct ystorm_iwarp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10297,216 +9730,216 @@ struct xstorm_fcoe_conn_st_ctx {
struct fcoe_wqe cached_wqes[16];
};
-struct e4_xstorm_fcoe_conn_ag_ctx {
+struct xstorm_fcoe_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
u8 flags2;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -10544,150 +9977,150 @@ struct ustorm_fcoe_conn_st_ctx {
u8 reserved[2];
};
-struct e4_tstorm_fcoe_conn_ag_ctx {
+struct tstorm_fcoe_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
u8 flags1;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
};
-struct e4_ustorm_fcoe_conn_ag_ctx {
+struct ustorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10728,37 +10161,37 @@ struct tstorm_fcoe_conn_st_ctx {
u8 reserved0[4];
};
-struct e4_mstorm_fcoe_conn_ag_ctx {
+struct mstorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
@@ -10804,21 +10237,21 @@ struct mstorm_fcoe_conn_st_ctx {
};
/* fcoe connection context */
-struct e4_fcoe_conn_context {
+struct fcoe_conn_context {
struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+ struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
struct regpair xstorm_ag_padding[6];
struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+ struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
- struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
};
@@ -10869,37 +10302,37 @@ struct fcoe_stat_ramrod_params {
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
-struct e4_ystorm_fcoe_conn_ag_ctx {
+struct ystorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10930,216 +10363,216 @@ struct xstorm_iscsi_tcp_conn_st_ctx {
__le32 reserved_iscsi[44];
};
-struct e4_xstorm_iscsi_conn_ag_ctx {
+struct xstorm_iscsi_conn_ag_ctx {
u8 cdu_validation;
u8 state;
u8 flags0;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -11187,89 +10620,89 @@ struct e4_xstorm_iscsi_conn_ag_ctx {
__le32 reg17;
};
-struct e4_tstorm_iscsi_conn_ag_ctx {
+struct tstorm_iscsi_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 rx_tcp_checksum_err_cnt;
@@ -11284,63 +10717,63 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
__le16 word0;
};
-struct e4_ustorm_iscsi_conn_ag_ctx {
+struct ustorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11358,37 +10791,37 @@ struct tstorm_iscsi_conn_st_ctx {
__le32 reserved[44];
};
-struct e4_mstorm_iscsi_conn_ag_ctx {
+struct mstorm_iscsi_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
@@ -11407,22 +10840,22 @@ struct ustorm_iscsi_conn_st_ctx {
};
/* iscsi connection context */
-struct e4_iscsi_conn_context {
+struct iscsi_conn_context {
struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct pb_context xpb2_context;
struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
struct pb_context upb_context;
struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
};
@@ -11433,37 +10866,37 @@ struct iscsi_init_ramrod_params {
struct tcp_init_params tcp_init;
};
-struct e4_ystorm_iscsi_conn_ag_ctx {
+struct ystorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11477,1922 +10910,4 @@ struct e4_ystorm_iscsi_conn_ag_ctx {
__le32 reg3;
};
-#define MFW_TRACE_SIGNATURE 0x25071946
-
-/* The trace in the buffer */
-#define MFW_TRACE_EVENTID_MASK 0x00ffff
-#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
-#define MFW_TRACE_PRM_SIZE_OFFSET 16
-#define MFW_TRACE_ENTRY_SIZE 3
-
-struct mcp_trace {
- u32 signature; /* Help to identify that the trace is valid */
- u32 size; /* the size of the trace buffer in bytes */
- u32 curr_level; /* 2 - all will be written to the buffer
- * 1 - debug trace will not be written
- * 0 - just errors will be written to the buffer
- */
- u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
- * mask it.
- */
-
- /* Warning: the following pointers are assumed to be 32bits as they are
- * used only in the MFW.
- */
- u32 trace_prod; /* The next trace will be written to this offset */
- u32 trace_oldest; /* The oldest valid trace starts at this offset
- * (usually very close after the current producer).
- */
-};
-
-#define VF_MAX_STATIC 192
-
-#define MCP_GLOB_PATH_MAX 2
-#define MCP_PORT_MAX 2
-#define MCP_GLOB_PORT_MAX 4
-#define MCP_GLOB_FUNC_MAX 16
-
-typedef u32 offsize_t; /* In DWORDS !!! */
-/* Offset from the beginning of the MCP scratchpad */
-#define OFFSIZE_OFFSET_SHIFT 0
-#define OFFSIZE_OFFSET_MASK 0x0000ffff
-/* Size of specific element (not the whole array if any) */
-#define OFFSIZE_SIZE_SHIFT 16
-#define OFFSIZE_SIZE_MASK 0xffff0000
-
-#define SECTION_OFFSET(_offsize) ((((_offsize & \
- OFFSIZE_OFFSET_MASK) >> \
- OFFSIZE_OFFSET_SHIFT) << 2))
-
-#define QED_SECTION_SIZE(_offsize) (((_offsize & \
- OFFSIZE_SIZE_MASK) >> \
- OFFSIZE_SIZE_SHIFT) << 2)
-
-#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
- SECTION_OFFSET(_offsize) + \
- (QED_SECTION_SIZE(_offsize) * idx))
-
-#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
- (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
-
-/* PHY configuration */
-struct eth_phy_cfg {
- u32 speed;
-#define ETH_SPEED_AUTONEG 0x0
-#define ETH_SPEED_SMARTLINQ 0x8
-
- u32 pause;
-#define ETH_PAUSE_NONE 0x0
-#define ETH_PAUSE_AUTONEG 0x1
-#define ETH_PAUSE_RX 0x2
-#define ETH_PAUSE_TX 0x4
-
- u32 adv_speed;
-
- u32 loopback_mode;
-#define ETH_LOOPBACK_NONE 0x0
-#define ETH_LOOPBACK_INT_PHY 0x1
-#define ETH_LOOPBACK_EXT_PHY 0x2
-#define ETH_LOOPBACK_EXT 0x3
-#define ETH_LOOPBACK_MAC 0x4
-#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5
-#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6
-#define ETH_LOOPBACK_PCS_AH_ONLY 0x7
-#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8
-#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9
-
- u32 eee_cfg;
-#define EEE_CFG_EEE_ENABLED BIT(0)
-#define EEE_CFG_TX_LPI BIT(1)
-#define EEE_CFG_ADV_SPEED_1G BIT(2)
-#define EEE_CFG_ADV_SPEED_10G BIT(3)
-#define EEE_TX_TIMER_USEC_MASK 0xfffffff0
-#define EEE_TX_TIMER_USEC_OFFSET 4
-#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00
-#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100
-#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000
-
- u32 deprecated;
-
- u32 fec_mode;
-#define FEC_FORCE_MODE_MASK 0x000000ff
-#define FEC_FORCE_MODE_OFFSET 0
-#define FEC_FORCE_MODE_NONE 0x00
-#define FEC_FORCE_MODE_FIRECODE 0x01
-#define FEC_FORCE_MODE_RS 0x02
-#define FEC_FORCE_MODE_AUTO 0x07
-#define FEC_EXTENDED_MODE_MASK 0xffffff00
-#define FEC_EXTENDED_MODE_OFFSET 8
-#define ETH_EXT_FEC_NONE 0x00000100
-#define ETH_EXT_FEC_10G_NONE 0x00000200
-#define ETH_EXT_FEC_10G_BASE_R 0x00000400
-#define ETH_EXT_FEC_20G_NONE 0x00000800
-#define ETH_EXT_FEC_20G_BASE_R 0x00001000
-#define ETH_EXT_FEC_25G_NONE 0x00002000
-#define ETH_EXT_FEC_25G_BASE_R 0x00004000
-#define ETH_EXT_FEC_25G_RS528 0x00008000
-#define ETH_EXT_FEC_40G_NONE 0x00010000
-#define ETH_EXT_FEC_40G_BASE_R 0x00020000
-#define ETH_EXT_FEC_50G_NONE 0x00040000
-#define ETH_EXT_FEC_50G_BASE_R 0x00080000
-#define ETH_EXT_FEC_50G_RS528 0x00100000
-#define ETH_EXT_FEC_50G_RS544 0x00200000
-#define ETH_EXT_FEC_100G_NONE 0x00400000
-#define ETH_EXT_FEC_100G_BASE_R 0x00800000
-#define ETH_EXT_FEC_100G_RS528 0x01000000
-#define ETH_EXT_FEC_100G_RS544 0x02000000
-
- u32 extended_speed;
-#define ETH_EXT_SPEED_MASK 0x0000ffff
-#define ETH_EXT_SPEED_OFFSET 0
-#define ETH_EXT_SPEED_AN 0x00000001
-#define ETH_EXT_SPEED_1G 0x00000002
-#define ETH_EXT_SPEED_10G 0x00000004
-#define ETH_EXT_SPEED_20G 0x00000008
-#define ETH_EXT_SPEED_25G 0x00000010
-#define ETH_EXT_SPEED_40G 0x00000020
-#define ETH_EXT_SPEED_50G_BASE_R 0x00000040
-#define ETH_EXT_SPEED_50G_BASE_R2 0x00000080
-#define ETH_EXT_SPEED_100G_BASE_R2 0x00000100
-#define ETH_EXT_SPEED_100G_BASE_R4 0x00000200
-#define ETH_EXT_SPEED_100G_BASE_P4 0x00000400
-#define ETH_EXT_ADV_SPEED_MASK 0xffff0000
-#define ETH_EXT_ADV_SPEED_OFFSET 16
-#define ETH_EXT_ADV_SPEED_RESERVED 0x00010000
-#define ETH_EXT_ADV_SPEED_1G 0x00020000
-#define ETH_EXT_ADV_SPEED_10G 0x00040000
-#define ETH_EXT_ADV_SPEED_20G 0x00080000
-#define ETH_EXT_ADV_SPEED_25G 0x00100000
-#define ETH_EXT_ADV_SPEED_40G 0x00200000
-#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00400000
-#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00800000
-#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x01000000
-#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x02000000
-#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x04000000
-};
-
-struct port_mf_cfg {
- u32 dynamic_cfg;
-#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_OV_TAG_SHIFT 0
-#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
-
- u32 reserved[1];
-};
-
-struct eth_stats {
- u64 r64;
- u64 r127;
- u64 r255;
- u64 r511;
- u64 r1023;
- u64 r1518;
-
- union {
- struct {
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
- } bb0;
- struct {
- u64 unused1;
- u64 r1519_to_max;
- u64 unused2;
- u64 unused3;
- u64 unused4;
- } ah0;
- } u0;
-
- u64 rfcs;
- u64 rxcf;
- u64 rxpf;
- u64 rxpp;
- u64 raln;
- u64 rfcr;
- u64 rovr;
- u64 rjbr;
- u64 rund;
- u64 rfrg;
- u64 t64;
- u64 t127;
- u64 t255;
- u64 t511;
- u64 t1023;
- u64 t1518;
-
- union {
- struct {
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
- } bb1;
- struct {
- u64 t1519_to_max;
- u64 unused6;
- u64 unused7;
- u64 unused8;
- } ah1;
- } u1;
-
- u64 txpf;
- u64 txpp;
-
- union {
- struct {
- u64 tlpiec;
- u64 tncl;
- } bb2;
- struct {
- u64 unused9;
- u64 unused10;
- } ah2;
- } u2;
-
- u64 rbyte;
- u64 rxuca;
- u64 rxmca;
- u64 rxbca;
- u64 rxpok;
- u64 tbyte;
- u64 txuca;
- u64 txmca;
- u64 txbca;
- u64 txcf;
-};
-
-struct brb_stats {
- u64 brb_truncate[8];
- u64 brb_discard[8];
-};
-
-struct port_stats {
- struct brb_stats brb;
- struct eth_stats eth;
-};
-
-struct couple_mode_teaming {
- u8 port_cmt[MCP_GLOB_PORT_MAX];
-#define PORT_CMT_IN_TEAM (1 << 0)
-
-#define PORT_CMT_PORT_ROLE (1 << 1)
-#define PORT_CMT_PORT_INACTIVE (0 << 1)
-#define PORT_CMT_PORT_ACTIVE (1 << 1)
-
-#define PORT_CMT_TEAM_MASK (1 << 2)
-#define PORT_CMT_TEAM0 (0 << 2)
-#define PORT_CMT_TEAM1 (1 << 2)
-};
-
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#define LLDP_PORT_ID_STAT_LEN 4
-#define DCBX_MAX_APP_PROTOCOL 32
-#define MAX_SYSTEM_LLDP_TLV_DATA 32
-
-enum _lldp_agent {
- LLDP_NEAREST_BRIDGE = 0,
- LLDP_NEAREST_NON_TPMR_BRIDGE,
- LLDP_NEAREST_CUSTOMER_BRIDGE,
- LLDP_MAX_LLDP_AGENTS
-};
-
-struct lldp_config_params_s {
- u32 config;
-#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
-#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
-#define LLDP_CONFIG_HOLD_MASK 0x00000f00
-#define LLDP_CONFIG_HOLD_SHIFT 8
-#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
-#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
-#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
-#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
-#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
-#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
- u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
-};
-
-struct lldp_status_params_s {
- u32 prefix_seq_num;
- u32 status;
- u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
- u32 suffix_seq_num;
-};
-
-struct dcbx_ets_feature {
- u32 flags;
-#define DCBX_ETS_ENABLED_MASK 0x00000001
-#define DCBX_ETS_ENABLED_SHIFT 0
-#define DCBX_ETS_WILLING_MASK 0x00000002
-#define DCBX_ETS_WILLING_SHIFT 1
-#define DCBX_ETS_ERROR_MASK 0x00000004
-#define DCBX_ETS_ERROR_SHIFT 2
-#define DCBX_ETS_CBS_MASK 0x00000008
-#define DCBX_ETS_CBS_SHIFT 3
-#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
-#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_OOO_TC_MASK 0x00000f00
-#define DCBX_OOO_TC_SHIFT 8
- u32 pri_tc_tbl[1];
-#define DCBX_TCP_OOO_TC (4)
-
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
-#define DCBX_CEE_STRICT_PRIORITY 0xf
- u32 tc_bw_tbl[2];
- u32 tc_tsa_tbl[2];
-#define DCBX_ETS_TSA_STRICT 0
-#define DCBX_ETS_TSA_CBS 1
-#define DCBX_ETS_TSA_ETS 2
-};
-
-#define DCBX_TCP_OOO_TC (4)
-#define DCBX_TCP_OOO_K2_4PORT_TC (3)
-
-struct dcbx_app_priority_entry {
- u32 entry;
-#define DCBX_APP_PRI_MAP_MASK 0x000000ff
-#define DCBX_APP_PRI_MAP_SHIFT 0
-#define DCBX_APP_PRI_0 0x01
-#define DCBX_APP_PRI_1 0x02
-#define DCBX_APP_PRI_2 0x04
-#define DCBX_APP_PRI_3 0x08
-#define DCBX_APP_PRI_4 0x10
-#define DCBX_APP_PRI_5 0x20
-#define DCBX_APP_PRI_6 0x40
-#define DCBX_APP_PRI_7 0x80
-#define DCBX_APP_SF_MASK 0x00000300
-#define DCBX_APP_SF_SHIFT 8
-#define DCBX_APP_SF_ETHTYPE 0
-#define DCBX_APP_SF_PORT 1
-#define DCBX_APP_SF_IEEE_MASK 0x0000f000
-#define DCBX_APP_SF_IEEE_SHIFT 12
-#define DCBX_APP_SF_IEEE_RESERVED 0
-#define DCBX_APP_SF_IEEE_ETHTYPE 1
-#define DCBX_APP_SF_IEEE_TCP_PORT 2
-#define DCBX_APP_SF_IEEE_UDP_PORT 3
-#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
-
-#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
-#define DCBX_APP_PROTOCOL_ID_SHIFT 16
-};
-
-struct dcbx_app_priority_feature {
- u32 flags;
-#define DCBX_APP_ENABLED_MASK 0x00000001
-#define DCBX_APP_ENABLED_SHIFT 0
-#define DCBX_APP_WILLING_MASK 0x00000002
-#define DCBX_APP_WILLING_SHIFT 1
-#define DCBX_APP_ERROR_MASK 0x00000004
-#define DCBX_APP_ERROR_SHIFT 2
-#define DCBX_APP_MAX_TCS_MASK 0x0000f000
-#define DCBX_APP_MAX_TCS_SHIFT 12
-#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
-#define DCBX_APP_NUM_ENTRIES_SHIFT 16
- struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
-};
-
-struct dcbx_features {
- struct dcbx_ets_feature ets;
- u32 pfc;
-#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
-#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
-
-#define DCBX_PFC_FLAGS_MASK 0x0000ff00
-#define DCBX_PFC_FLAGS_SHIFT 8
-#define DCBX_PFC_CAPS_MASK 0x00000f00
-#define DCBX_PFC_CAPS_SHIFT 8
-#define DCBX_PFC_MBC_MASK 0x00004000
-#define DCBX_PFC_MBC_SHIFT 14
-#define DCBX_PFC_WILLING_MASK 0x00008000
-#define DCBX_PFC_WILLING_SHIFT 15
-#define DCBX_PFC_ENABLED_MASK 0x00010000
-#define DCBX_PFC_ENABLED_SHIFT 16
-#define DCBX_PFC_ERROR_MASK 0x00020000
-#define DCBX_PFC_ERROR_SHIFT 17
-
- struct dcbx_app_priority_feature app;
-};
-
-struct dcbx_local_params {
- u32 config;
-#define DCBX_CONFIG_VERSION_MASK 0x00000007
-#define DCBX_CONFIG_VERSION_SHIFT 0
-#define DCBX_CONFIG_VERSION_DISABLED 0
-#define DCBX_CONFIG_VERSION_IEEE 1
-#define DCBX_CONFIG_VERSION_CEE 2
-#define DCBX_CONFIG_VERSION_STATIC 4
-
- u32 flags;
- struct dcbx_features features;
-};
-
-struct dcbx_mib {
- u32 prefix_seq_num;
- u32 flags;
- struct dcbx_features features;
- u32 suffix_seq_num;
-};
-
-struct lldp_system_tlvs_buffer_s {
- u16 valid;
- u16 length;
- u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
-};
-
-struct dcb_dscp_map {
- u32 flags;
-#define DCB_DSCP_ENABLE_MASK 0x1
-#define DCB_DSCP_ENABLE_SHIFT 0
-#define DCB_DSCP_ENABLE 1
- u32 dscp_pri_map[8];
-};
-
-struct public_global {
- u32 max_path;
- u32 max_ports;
-#define MODE_1P 1
-#define MODE_2P 2
-#define MODE_3P 3
-#define MODE_4P 4
- u32 debug_mb_offset;
- u32 phymod_dbg_mb_offset;
- struct couple_mode_teaming cmt;
- s32 internal_temperature;
- u32 mfw_ver;
- u32 running_bundle_id;
- s32 external_temperature;
- u32 mdump_reason;
- u64 reserved;
- u32 data_ptr;
- u32 data_size;
-};
-
-struct fw_flr_mb {
- u32 aggint;
- u32 opgen_addr;
- u32 accum_ack;
-};
-
-struct public_path {
- struct fw_flr_mb flr_mb;
- u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 process_kill;
-#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
-#define PROCESS_KILL_COUNTER_SHIFT 0
-#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
-#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
-#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
-};
-
-struct public_port {
- u32 validity_map;
-
- u32 link_status;
-#define LINK_STATUS_LINK_UP 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-#define LINK_STATUS_PFC_ENABLED 0x00000100
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
-#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
-#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
-#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
-#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
-#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
-#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-#define LINK_STATUS_SFP_TX_FAULT 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
-#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
-#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
-#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
-
-#define LINK_STATUS_FEC_MODE_MASK 0x38000000
-#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
-#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
-#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
-
- u32 link_status1;
- u32 ext_phy_fw_version;
- u32 drv_phy_cfg_addr;
-
- u32 port_stx;
-
- u32 stat_nig_timer;
-
- struct port_mf_cfg port_mf_config;
- struct port_stats stats;
-
- u32 media_type;
-#define MEDIA_UNSPECIFIED 0x0
-#define MEDIA_SFPP_10G_FIBER 0x1
-#define MEDIA_XFP_FIBER 0x2
-#define MEDIA_DA_TWINAX 0x3
-#define MEDIA_BASE_T 0x4
-#define MEDIA_SFP_1G_FIBER 0x5
-#define MEDIA_MODULE_FIBER 0x6
-#define MEDIA_KR 0xf0
-#define MEDIA_NOT_PRESENT 0xff
-
- u32 lfa_status;
- u32 link_change_count;
-
- struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
- struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
- struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
-
- /* DCBX related MIB */
- struct dcbx_local_params local_admin_dcbx_mib;
- struct dcbx_mib remote_dcbx_mib;
- struct dcbx_mib operational_dcbx_mib;
-
- u32 reserved[2];
-
- u32 transceiver_data;
-#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff
-#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
-#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
-#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
-#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
-#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00
-#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
-#define ETH_TRANSCEIVER_TYPE_NONE 0x00
-#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff
-#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
-#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
-#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
-#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
-#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
-#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
-#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
-#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
-#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
-#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
-#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
-#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
-#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
-#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
-#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
-#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
-#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
-#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
-#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
-#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
-#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
-#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
-#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
-#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
-#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
-#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
-#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
-#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
-#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
-#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
-#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a
-
- u32 wol_info;
- u32 wol_pkt_len;
- u32 wol_pkt_details;
- struct dcb_dscp_map dcb_dscp_map;
-
- u32 eee_status;
-#define EEE_ACTIVE_BIT BIT(0)
-#define EEE_LD_ADV_STATUS_MASK 0x000000f0
-#define EEE_LD_ADV_STATUS_OFFSET 4
-#define EEE_1G_ADV BIT(1)
-#define EEE_10G_ADV BIT(2)
-#define EEE_LP_ADV_STATUS_MASK 0x00000f00
-#define EEE_LP_ADV_STATUS_OFFSET 8
-#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
-#define EEE_SUPPORTED_SPEED_OFFSET 12
-#define EEE_1G_SUPPORTED BIT(1)
-#define EEE_10G_SUPPORTED BIT(2)
-
- u32 eee_remote;
-#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
-#define EEE_REMOTE_TW_TX_OFFSET 0
-#define EEE_REMOTE_TW_RX_MASK 0xffff0000
-#define EEE_REMOTE_TW_RX_OFFSET 16
-
- u32 reserved1;
- u32 oem_cfg_port;
-#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
-#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
-#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
-#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
-#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
-#define OEM_CFG_SCHED_TYPE_OFFSET 2
-#define OEM_CFG_SCHED_TYPE_ETS 0x1
-#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
-};
-
-struct public_func {
- u32 reserved0[2];
-
- u32 mtu_size;
-
- u32 reserved[7];
-
- u32 config;
-#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
-
-#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
-#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
-#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
-#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
-#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040
-
-#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
-#define FUNC_MF_CFG_MIN_BW_SHIFT 8
-#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
-#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 16
-#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
-
- u32 status;
-#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
-
- u32 mac_upper;
-#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
-#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
- u32 mac_lower;
-#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
-
- u32 fcoe_wwn_port_name_upper;
- u32 fcoe_wwn_port_name_lower;
-
- u32 fcoe_wwn_node_name_upper;
- u32 fcoe_wwn_node_name_lower;
-
- u32 ovlan_stag;
-#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_OV_STAG_SHIFT 0
-#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
-
- u32 pf_allocation;
-
- u32 preserve_data;
-
- u32 driver_last_activity_ts;
-
- u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 drv_id;
-#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
-#define DRV_ID_PDA_COMP_VER_SHIFT 0
-
-#define LOAD_REQ_HSI_VERSION 2
-#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
-#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
- DRV_ID_MCP_HSI_VER_SHIFT)
-
-#define DRV_ID_DRV_TYPE_MASK 0x7f000000
-#define DRV_ID_DRV_TYPE_SHIFT 24
-#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-
-#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
-#define DRV_ID_DRV_INIT_HW_SHIFT 31
-#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
-
- u32 oem_cfg_func;
-#define OEM_CFG_FUNC_TC_MASK 0x0000000F
-#define OEM_CFG_FUNC_TC_OFFSET 0
-#define OEM_CFG_FUNC_TC_0 0x0
-#define OEM_CFG_FUNC_TC_1 0x1
-#define OEM_CFG_FUNC_TC_2 0x2
-#define OEM_CFG_FUNC_TC_3 0x3
-#define OEM_CFG_FUNC_TC_4 0x4
-#define OEM_CFG_FUNC_TC_5 0x5
-#define OEM_CFG_FUNC_TC_6 0x6
-#define OEM_CFG_FUNC_TC_7 0x7
-
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
-};
-
-struct mcp_mac {
- u32 mac_upper;
- u32 mac_lower;
-};
-
-struct mcp_val64 {
- u32 lo;
- u32 hi;
-};
-
-struct mcp_file_att {
- u32 nvm_start_addr;
- u32 len;
-};
-
-struct bist_nvm_image_att {
- u32 return_code;
- u32 image_type;
- u32 nvm_start_addr;
- u32 len;
-};
-
-#define MCP_DRV_VER_STR_SIZE 16
-#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
-#define MCP_DRV_NVM_BUF_LEN 32
-struct drv_version_stc {
- u32 version;
- u8 name[MCP_DRV_VER_STR_SIZE - 4];
-};
-
-struct lan_stats_stc {
- u64 ucast_rx_pkts;
- u64 ucast_tx_pkts;
- u32 fcs_err;
- u32 rserved;
-};
-
-struct fcoe_stats_stc {
- u64 rx_pkts;
- u64 tx_pkts;
- u32 fcs_err;
- u32 login_failure;
-};
-
-struct ocbb_data_stc {
- u32 ocbb_host_addr;
- u32 ocsd_host_addr;
- u32 ocsd_req_update_interval;
-};
-
-#define MAX_NUM_OF_SENSORS 7
-struct temperature_status_stc {
- u32 num_of_sensors;
- u32 sensor[MAX_NUM_OF_SENSORS];
-};
-
-/* crash dump configuration header */
-struct mdump_config_stc {
- u32 version;
- u32 config;
- u32 epoc;
- u32 num_of_logs;
- u32 valid_logs;
-};
-
-enum resource_id_enum {
- RESOURCE_NUM_SB_E = 0,
- RESOURCE_NUM_L2_QUEUE_E = 1,
- RESOURCE_NUM_VPORT_E = 2,
- RESOURCE_NUM_VMQ_E = 3,
- RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
- RESOURCE_FACTOR_RSS_PER_VF_E = 5,
- RESOURCE_NUM_RL_E = 6,
- RESOURCE_NUM_PQ_E = 7,
- RESOURCE_NUM_VF_E = 8,
- RESOURCE_VFC_FILTER_E = 9,
- RESOURCE_ILT_E = 10,
- RESOURCE_CQS_E = 11,
- RESOURCE_GFT_PROFILES_E = 12,
- RESOURCE_NUM_TC_E = 13,
- RESOURCE_NUM_RSS_ENGINES_E = 14,
- RESOURCE_LL2_QUEUE_E = 15,
- RESOURCE_RDMA_STATS_QUEUE_E = 16,
- RESOURCE_BDQ_E = 17,
- RESOURCE_QCN_E = 18,
- RESOURCE_LLH_FILTER_E = 19,
- RESOURCE_VF_MAC_ADDR = 20,
- RESOURCE_LL2_CQS_E = 21,
- RESOURCE_VF_CNQS = 22,
- RESOURCE_MAX_NUM,
- RESOURCE_NUM_INVALID = 0xFFFFFFFF
-};
-
-/* Resource ID is to be filled by the driver in the MB request
- * Size, offset & flags to be filled by the MFW in the MB response
- */
-struct resource_info {
- enum resource_id_enum res_id;
- u32 size; /* number of allocated resources */
- u32 offset; /* Offset of the 1st resource */
- u32 vf_size;
- u32 vf_offset;
- u32 flags;
-#define RESOURCE_ELEMENT_STRICT (1 << 0)
-};
-
-#define DRV_ROLE_NONE 0
-#define DRV_ROLE_PREBOOT 1
-#define DRV_ROLE_OS 2
-#define DRV_ROLE_KDUMP 3
-
-struct load_req_stc {
- u32 drv_ver_0;
- u32 drv_ver_1;
- u32 fw_ver;
- u32 misc0;
-#define LOAD_REQ_ROLE_MASK 0x000000FF
-#define LOAD_REQ_ROLE_SHIFT 0
-#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
-#define LOAD_REQ_LOCK_TO_SHIFT 8
-#define LOAD_REQ_LOCK_TO_DEFAULT 0
-#define LOAD_REQ_LOCK_TO_NONE 255
-#define LOAD_REQ_FORCE_MASK 0x000F0000
-#define LOAD_REQ_FORCE_SHIFT 16
-#define LOAD_REQ_FORCE_NONE 0
-#define LOAD_REQ_FORCE_PF 1
-#define LOAD_REQ_FORCE_ALL 2
-#define LOAD_REQ_FLAGS0_MASK 0x00F00000
-#define LOAD_REQ_FLAGS0_SHIFT 20
-#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
-};
-
-struct load_rsp_stc {
- u32 drv_ver_0;
- u32 drv_ver_1;
- u32 fw_ver;
- u32 misc0;
-#define LOAD_RSP_ROLE_MASK 0x000000FF
-#define LOAD_RSP_ROLE_SHIFT 0
-#define LOAD_RSP_HSI_MASK 0x0000FF00
-#define LOAD_RSP_HSI_SHIFT 8
-#define LOAD_RSP_FLAGS0_MASK 0x000F0000
-#define LOAD_RSP_FLAGS0_SHIFT 16
-#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
-};
-
-struct mdump_retain_data_stc {
- u32 valid;
- u32 epoch;
- u32 pf;
- u32 status;
-};
-
-union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
- struct mcp_mac wol_mac;
-
- struct eth_phy_cfg drv_phy_cfg;
-
- struct mcp_val64 val64;
-
- u8 raw_data[MCP_DRV_NVM_BUF_LEN];
-
- struct mcp_file_att file_att;
-
- u32 ack_vf_disabled[VF_MAX_STATIC / 32];
-
- struct drv_version_stc drv_version;
-
- struct lan_stats_stc lan_stats;
- struct fcoe_stats_stc fcoe_stats;
- struct ocbb_data_stc ocbb_info;
- struct temperature_status_stc temp_info;
- struct resource_info resource;
- struct bist_nvm_image_att nvm_image_att;
- struct mdump_config_stc mdump_config;
-};
-
-struct public_drv_mb {
- u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK 0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ 0x10000000
-#define DRV_MSG_CODE_LOAD_DONE 0x11000000
-#define DRV_MSG_CODE_INIT_HW 0x12000000
-#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
-#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
-#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
-#define DRV_MSG_CODE_INIT_PHY 0x22000000
-#define DRV_MSG_CODE_LINK_RESET 0x23000000
-#define DRV_MSG_CODE_SET_DCBX 0x25000000
-#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
-#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
-#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
-#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
-#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
-#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
-#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
-#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
-#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
-#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
-
-#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
-#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000
-#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000
-#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
-#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
-#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
-#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
-#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
-#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
-#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
-#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
-#define DRV_MSG_CODE_MCP_RESET 0x00090000
-#define DRV_MSG_CODE_SET_VERSION 0x000f0000
-#define DRV_MSG_CODE_MCP_HALT 0x00100000
-#define DRV_MSG_CODE_SET_VMAC 0x00110000
-#define DRV_MSG_CODE_GET_VMAC 0x00120000
-#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
-#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
-#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
-#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
-#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
-
-#define DRV_MSG_CODE_GET_STATS 0x00130000
-#define DRV_MSG_CODE_STATS_TYPE_LAN 1
-#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
-#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
-#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
-
-#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
-
-#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
-
-#define DRV_MSG_CODE_BIST_TEST 0x001e0000
-#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
-#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
-/* Send crash dump commands with param[3:0] - opcode */
-#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
-#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
-#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
-#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
-
-#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000
-
-#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
-#define RESOURCE_CMD_REQ_RESC_SHIFT 0
-#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
-#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
-#define RESOURCE_OPCODE_REQ 1
-#define RESOURCE_OPCODE_REQ_WO_AGING 2
-#define RESOURCE_OPCODE_REQ_W_AGING 3
-#define RESOURCE_OPCODE_RELEASE 4
-#define RESOURCE_OPCODE_FORCE_RELEASE 5
-#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
-#define RESOURCE_CMD_REQ_AGE_SHIFT 8
-
-#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
-#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
-#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
-#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
-#define RESOURCE_OPCODE_GNT 1
-#define RESOURCE_OPCODE_BUSY 2
-#define RESOURCE_OPCODE_RELEASED 3
-#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
-#define RESOURCE_OPCODE_WRONG_OWNER 5
-#define RESOURCE_OPCODE_UNKNOWN_CMD 255
-
-#define RESOURCE_DUMP 0
-
-/* DRV_MSG_CODE_MDUMP_CMD parameters */
-#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
-#define DRV_MSG_CODE_MDUMP_ACK 0x01
-#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
-#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
-#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
-#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
-#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
-#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
-#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
-
-#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
-#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b
-#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c
-
-#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
-#define DRV_MSG_CODE_OS_WOL 0x002e0000
-
-#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000
-#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000
-#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
- u32 drv_mb_param;
-#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
-#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
-#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
-#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
-#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
-#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
-#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
-#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
-#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
-#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
-
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
-#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
-#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
-
-#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
-#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
-#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
-#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
-#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
-#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
-
-#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
-#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
-#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
-
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
-
-#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
-#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
-
-#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
- DRV_MB_PARAM_WOL_DISABLED | \
- DRV_MB_PARAM_WOL_ENABLED)
-#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
-#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
-#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
-
-#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
- DRV_MB_PARAM_ESWITCH_MODE_VEB | \
- DRV_MB_PARAM_ESWITCH_MODE_VEPA)
-#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
-#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
-#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
-
-#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
-#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
-
-#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
-#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
-#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
-
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000
-
- /* Resource Allocation params - Driver version support */
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
-
-#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
-#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
-#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
-#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
-
-#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
-#define DRV_MB_PARAM_BIST_RC_PASSED 1
-#define DRV_MB_PARAM_BIST_RC_FAILED 2
-#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
-
-#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
-#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00
-
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008
-#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
-
-/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff
-
-/* Driver attributes params */
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000
-
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
-
- u32 fw_mb_header;
-#define FW_MSG_CODE_MASK 0xffff0000
-#define FW_MSG_CODE_UNSUPPORTED 0x00000000
-#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
-#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
-#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
-#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
-#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
-#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
-#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
-#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
-#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
-#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000
-#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
-
-#define FW_MSG_CODE_NVM_OK 0x00010000
-#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
-#define FW_MSG_CODE_PHY_OK 0x00110000
-#define FW_MSG_CODE_OK 0x00160000
-#define FW_MSG_CODE_ERROR 0x00170000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
-#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
-
-#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
-#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
-#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
-#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
-#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000
-#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000
-
-#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
-
- u32 fw_mb_param;
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
-
- /* Get PF RDMA protocol command response */
-#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
-#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
-#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
-#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
-
- /* Get MFW feature support response */
-#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0)
-#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1)
-#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5)
-#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6)
-#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16)
-
-#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0)
-
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
-
-#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff
-#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
-
- u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK 0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
-#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
-
- u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK 0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
-#define MCP_EVENT_MASK 0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
-
- union drv_union_data union_data;
-};
-
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
-
-enum MFW_DRV_MSG_TYPE {
- MFW_DRV_MSG_LINK_CHANGE,
- MFW_DRV_MSG_FLR_FW_ACK_FAILED,
- MFW_DRV_MSG_VF_DISABLED,
- MFW_DRV_MSG_LLDP_DATA_UPDATED,
- MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
- MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
- MFW_DRV_MSG_ERROR_RECOVERY,
- MFW_DRV_MSG_BW_UPDATE,
- MFW_DRV_MSG_S_TAG_UPDATE,
- MFW_DRV_MSG_GET_LAN_STATS,
- MFW_DRV_MSG_GET_FCOE_STATS,
- MFW_DRV_MSG_GET_ISCSI_STATS,
- MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_FAILURE_DETECTED,
- MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
- MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
- MFW_DRV_MSG_RESERVED,
- MFW_DRV_MSG_GET_TLV_REQ,
- MFW_DRV_MSG_OEM_CFG_UPDATE,
- MFW_DRV_MSG_MAX
-};
-
-#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
-#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
-#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
-#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
-
-struct public_mfw_mb {
- u32 sup_msgs;
- u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
- u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
-};
-
-enum public_sections {
- PUBLIC_DRV_MB,
- PUBLIC_MFW_MB,
- PUBLIC_GLOBAL,
- PUBLIC_PATH,
- PUBLIC_PORT,
- PUBLIC_FUNC,
- PUBLIC_MAX_SECTIONS
-};
-
-struct mcp_public_data {
- u32 num_sections;
- u32 sections[PUBLIC_MAX_SECTIONS];
- struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
- struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
- struct public_global global;
- struct public_path path[MCP_GLOB_PATH_MAX];
- struct public_port port[MCP_GLOB_PORT_MAX];
- struct public_func func[MCP_GLOB_FUNC_MAX];
-};
-
-#define MAX_I2C_TRANSACTION_SIZE 16
-
-/* OCBB definitions */
-enum tlvs {
- /* Category 1: Device Properties */
- DRV_TLV_CLP_STR,
- DRV_TLV_CLP_STR_CTD,
- /* Category 6: Device Configuration */
- DRV_TLV_SCSI_TO,
- DRV_TLV_R_T_TOV,
- DRV_TLV_R_A_TOV,
- DRV_TLV_E_D_TOV,
- DRV_TLV_CR_TOV,
- DRV_TLV_BOOT_TYPE,
- /* Category 8: Port Configuration */
- DRV_TLV_NPIV_ENABLED,
- /* Category 10: Function Configuration */
- DRV_TLV_FEATURE_FLAGS,
- DRV_TLV_LOCAL_ADMIN_ADDR,
- DRV_TLV_ADDITIONAL_MAC_ADDR_1,
- DRV_TLV_ADDITIONAL_MAC_ADDR_2,
- DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
- DRV_TLV_LSO_MIN_SEGMENT_COUNT,
- DRV_TLV_PROMISCUOUS_MODE,
- DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
- DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
- DRV_TLV_OS_DRIVER_STATES,
- DRV_TLV_PXE_BOOT_PROGRESS,
- /* Category 12: FC/FCoE Configuration */
- DRV_TLV_NPIV_STATE,
- DRV_TLV_NUM_OF_NPIV_IDS,
- DRV_TLV_SWITCH_NAME,
- DRV_TLV_SWITCH_PORT_NUM,
- DRV_TLV_SWITCH_PORT_ID,
- DRV_TLV_VENDOR_NAME,
- DRV_TLV_SWITCH_MODEL,
- DRV_TLV_SWITCH_FW_VER,
- DRV_TLV_QOS_PRIORITY_PER_802_1P,
- DRV_TLV_PORT_ALIAS,
- DRV_TLV_PORT_STATE,
- DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_LINK_FAILURE_COUNT,
- DRV_TLV_FCOE_BOOT_PROGRESS,
- /* Category 13: iSCSI Configuration */
- DRV_TLV_TARGET_LLMNR_ENABLED,
- DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
- DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
- DRV_TLV_AUTHENTICATION_METHOD,
- DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
- DRV_TLV_MAX_FRAME_SIZE,
- DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_ISCSI_BOOT_PROGRESS,
- /* Category 20: Device Data */
- DRV_TLV_PCIE_BUS_RX_UTILIZATION,
- DRV_TLV_PCIE_BUS_TX_UTILIZATION,
- DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
- DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
- DRV_TLV_NCSI_RX_BYTES_RECEIVED,
- DRV_TLV_NCSI_TX_BYTES_SENT,
- /* Category 22: Base Port Data */
- DRV_TLV_RX_DISCARDS,
- DRV_TLV_RX_ERRORS,
- DRV_TLV_TX_ERRORS,
- DRV_TLV_TX_DISCARDS,
- DRV_TLV_RX_FRAMES_RECEIVED,
- DRV_TLV_TX_FRAMES_SENT,
- /* Category 23: FC/FCoE Port Data */
- DRV_TLV_RX_BROADCAST_PACKETS,
- DRV_TLV_TX_BROADCAST_PACKETS,
- /* Category 28: Base Function Data */
- DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
- DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
- DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_PF_RX_FRAMES_RECEIVED,
- DRV_TLV_RX_BYTES_RECEIVED,
- DRV_TLV_PF_TX_FRAMES_SENT,
- DRV_TLV_TX_BYTES_SENT,
- DRV_TLV_IOV_OFFLOAD,
- DRV_TLV_PCI_ERRORS_CAP_ID,
- DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
- DRV_TLV_UNCORRECTABLE_ERROR_MASK,
- DRV_TLV_CORRECTABLE_ERROR_STATUS,
- DRV_TLV_CORRECTABLE_ERROR_MASK,
- DRV_TLV_PCI_ERRORS_AECC_REGISTER,
- DRV_TLV_TX_QUEUES_EMPTY,
- DRV_TLV_RX_QUEUES_EMPTY,
- DRV_TLV_TX_QUEUES_FULL,
- DRV_TLV_RX_QUEUES_FULL,
- /* Category 29: FC/FCoE Function Data */
- DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
- DRV_TLV_FCOE_RX_BYTES_RECEIVED,
- DRV_TLV_FCOE_TX_FRAMES_SENT,
- DRV_TLV_FCOE_TX_BYTES_SENT,
- DRV_TLV_CRC_ERROR_COUNT,
- DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_1_TIMESTAMP,
- DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_2_TIMESTAMP,
- DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_3_TIMESTAMP,
- DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_4_TIMESTAMP,
- DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_5_TIMESTAMP,
- DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
- DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
- DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
- DRV_TLV_DISPARITY_ERROR_COUNT,
- DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
- DRV_TLV_LAST_FLOGI_TIMESTAMP,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
- DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
- DRV_TLV_LAST_FLOGI_RJT,
- DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
- DRV_TLV_FDISCS_SENT_COUNT,
- DRV_TLV_FDISC_ACCS_RECEIVED,
- DRV_TLV_FDISC_RJTS_RECEIVED,
- DRV_TLV_PLOGI_SENT_COUNT,
- DRV_TLV_PLOGI_ACCS_RECEIVED,
- DRV_TLV_PLOGI_RJTS_RECEIVED,
- DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_1_TIMESTAMP,
- DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_2_TIMESTAMP,
- DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_3_TIMESTAMP,
- DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_4_TIMESTAMP,
- DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_5_TIMESTAMP,
- DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
- DRV_TLV_LOGOS_ISSUED,
- DRV_TLV_LOGO_ACCS_RECEIVED,
- DRV_TLV_LOGO_RJTS_RECEIVED,
- DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_1_TIMESTAMP,
- DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_2_TIMESTAMP,
- DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_3_TIMESTAMP,
- DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_4_TIMESTAMP,
- DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_5_TIMESTAMP,
- DRV_TLV_LOGOS_RECEIVED,
- DRV_TLV_ACCS_ISSUED,
- DRV_TLV_PRLIS_ISSUED,
- DRV_TLV_ACCS_RECEIVED,
- DRV_TLV_ABTS_SENT_COUNT,
- DRV_TLV_ABTS_ACCS_RECEIVED,
- DRV_TLV_ABTS_RJTS_RECEIVED,
- DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_1_TIMESTAMP,
- DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_2_TIMESTAMP,
- DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_3_TIMESTAMP,
- DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_4_TIMESTAMP,
- DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_5_TIMESTAMP,
- DRV_TLV_RSCNS_RECEIVED,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
- DRV_TLV_LUN_RESETS_ISSUED,
- DRV_TLV_ABORT_TASK_SETS_ISSUED,
- DRV_TLV_TPRLOS_SENT,
- DRV_TLV_NOS_SENT_COUNT,
- DRV_TLV_NOS_RECEIVED_COUNT,
- DRV_TLV_OLS_COUNT,
- DRV_TLV_LR_COUNT,
- DRV_TLV_LRR_COUNT,
- DRV_TLV_LIP_SENT_COUNT,
- DRV_TLV_LIP_RECEIVED_COUNT,
- DRV_TLV_EOFA_COUNT,
- DRV_TLV_EOFNI_COUNT,
- DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
- DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
- DRV_TLV_SCSI_STATUS_BUSY_COUNT,
- DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
- DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
- DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
- DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
- DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
- DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
- DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
- /* Category 30: iSCSI Function Data */
- DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
- DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
- DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
- DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
-};
-
-struct nvm_cfg_mac_address {
- u32 mac_addr_hi;
-#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff
-#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
-
- u32 mac_addr_lo;
-};
-
-struct nvm_cfg1_glob {
- u32 generic_cont0;
-#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0
-#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
-#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
-#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
-#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
-#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
-#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
-
- u32 engineering_change[3];
- u32 manufacturing_id;
- u32 serial_number[4];
- u32 pcie_cfg;
- u32 mgmt_traffic;
-
- u32 core_cfg;
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15
-
- u32 e_lane_cfg1;
- u32 e_lane_cfg2;
- u32 f_lane_cfg1;
- u32 f_lane_cfg2;
- u32 mps10_preemphasis;
- u32 mps10_driver_current;
- u32 mps25_preemphasis;
- u32 mps25_driver_current;
- u32 pci_id;
- u32 pci_subsys_id;
- u32 bar;
- u32 mps10_txfir_main;
- u32 mps10_txfir_post;
- u32 mps25_txfir_main;
- u32 mps25_txfir_post;
- u32 manufacture_ver;
- u32 manufacture_time;
- u32 led_global_settings;
- u32 generic_cont1;
-
- u32 mbi_version;
-#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff
-#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
-#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00
-#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
-#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000
-#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
-
- u32 mbi_date;
- u32 misc_sig;
-
- u32 device_capabilities;
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
-
- u32 power_dissipated;
- u32 power_consumed;
- u32 efi_version;
- u32 multi_net_modes_cap;
- u32 reserved[41];
-};
-
-struct nvm_cfg1_path {
- u32 reserved[30];
-};
-
-struct nvm_cfg1_port {
- u32 rel_to_opt123;
- u32 rel_to_opt124;
-
- u32 generic_cont0;
-#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000
-#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
-#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
-#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
-#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
-
- u32 pcie_cfg;
- u32 features;
-
- u32 speed_cap_mask;
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
-
- u32 link_settings;
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
-
- u32 phy_cfg;
- u32 mgmt_traffic;
-
- u32 ext_phy;
- /* EEE power saving mode */
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
-
- u32 mba_cfg1;
- u32 mba_cfg2;
- u32 vf_cfg;
- struct nvm_cfg_mac_address lldp_mac_address;
- u32 led_port_settings;
- u32 transceiver_00;
- u32 device_ids;
-
- u32 board_cfg;
-#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff
-#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
-#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
-#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
-#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
-
- u32 mnm_10g_cap;
- u32 mnm_10g_ctrl;
- u32 mnm_10g_misc;
- u32 mnm_25g_cap;
- u32 mnm_25g_ctrl;
- u32 mnm_25g_misc;
- u32 mnm_40g_cap;
- u32 mnm_40g_ctrl;
- u32 mnm_40g_misc;
- u32 mnm_50g_cap;
- u32 mnm_50g_ctrl;
- u32 mnm_50g_misc;
- u32 mnm_100g_cap;
- u32 mnm_100g_ctrl;
- u32 mnm_100g_misc;
-
- u32 temperature;
- u32 ext_phy_cfg1;
-
- u32 extended_speed;
-#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff
-#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400
-
- u32 extended_fec_mode;
-
- u32 reserved[112];
-};
-
-struct nvm_cfg1_func {
- struct nvm_cfg_mac_address mac_address;
- u32 rsrv1;
- u32 rsrv2;
- u32 device_id;
- u32 cmn_cfg;
- u32 pci_cfg;
- struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
- struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
- u32 preboot_generic_cfg;
- u32 reserved[8];
-};
-
-struct nvm_cfg1 {
- struct nvm_cfg1_glob glob;
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
- struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
- struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
-};
-
-enum spad_sections {
- SPAD_SECTION_TRACE,
- SPAD_SECTION_NVM_CFG,
- SPAD_SECTION_PUBLIC,
- SPAD_SECTION_PRIVATE,
- SPAD_SECTION_MAX
-};
-
-#define MCP_TRACE_SIZE 2048 /* 2kb */
-
-/* This section is located at a fixed location in the beginning of the
- * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
- * All the rest of data has a floating location which differs from version to
- * version, and is pointed by the mcp_meta_data below.
- * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
- * with it from nvram in order to clear this portion.
- */
-struct static_init {
- u32 num_sections;
- offsize_t sections[SPAD_SECTION_MAX];
-#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
-
- struct mcp_trace trace;
-#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
- u8 trace_buffer[MCP_TRACE_SIZE];
-#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
- /* running_mfw has the same definition as in nvm_map.h.
- * This bit indicate both the running dir, and the running bundle.
- * It is set once when the LIM is loaded.
- */
- u32 running_mfw;
-#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
- u32 build_time;
-#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
- u32 reset_type;
-#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
- u32 mfw_secure_mode;
-#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
- u16 pme_status_pf_bitmap;
-#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
- u16 pme_enable_pf_bitmap;
-#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
- u32 mim_nvm_addr;
- u32 mim_start_addr;
- u32 ah_pcie_link_params;
-#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
-#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
-#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
-#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
-#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
-#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
-#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
-#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
-#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
-
- u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
-};
-
-#define NVM_MAGIC_VALUE 0x669955aa
-
-enum nvm_image_type {
- NVM_TYPE_TIM1 = 0x01,
- NVM_TYPE_TIM2 = 0x02,
- NVM_TYPE_MIM1 = 0x03,
- NVM_TYPE_MIM2 = 0x04,
- NVM_TYPE_MBA = 0x05,
- NVM_TYPE_MODULES_PN = 0x06,
- NVM_TYPE_VPD = 0x07,
- NVM_TYPE_MFW_TRACE1 = 0x08,
- NVM_TYPE_MFW_TRACE2 = 0x09,
- NVM_TYPE_NVM_CFG1 = 0x0a,
- NVM_TYPE_L2B = 0x0b,
- NVM_TYPE_DIR1 = 0x0c,
- NVM_TYPE_EAGLE_FW1 = 0x0d,
- NVM_TYPE_FALCON_FW1 = 0x0e,
- NVM_TYPE_PCIE_FW1 = 0x0f,
- NVM_TYPE_HW_SET = 0x10,
- NVM_TYPE_LIM = 0x11,
- NVM_TYPE_AVS_FW1 = 0x12,
- NVM_TYPE_DIR2 = 0x13,
- NVM_TYPE_CCM = 0x14,
- NVM_TYPE_EAGLE_FW2 = 0x15,
- NVM_TYPE_FALCON_FW2 = 0x16,
- NVM_TYPE_PCIE_FW2 = 0x17,
- NVM_TYPE_AVS_FW2 = 0x18,
- NVM_TYPE_INIT_HW = 0x19,
- NVM_TYPE_DEFAULT_CFG = 0x1a,
- NVM_TYPE_MDUMP = 0x1b,
- NVM_TYPE_META = 0x1c,
- NVM_TYPE_ISCSI_CFG = 0x1d,
- NVM_TYPE_FCOE_CFG = 0x1f,
- NVM_TYPE_ETH_PHY_FW1 = 0x20,
- NVM_TYPE_ETH_PHY_FW2 = 0x21,
- NVM_TYPE_BDN = 0x22,
- NVM_TYPE_8485X_PHY_FW = 0x23,
- NVM_TYPE_PUB_KEY = 0x24,
- NVM_TYPE_RECOVERY = 0x25,
- NVM_TYPE_PLDM = 0x26,
- NVM_TYPE_UPK1 = 0x27,
- NVM_TYPE_UPK2 = 0x28,
- NVM_TYPE_MASTER_KC = 0x29,
- NVM_TYPE_BACKUP_KC = 0x2a,
- NVM_TYPE_HW_DUMP = 0x2b,
- NVM_TYPE_HW_DUMP_OUT = 0x2c,
- NVM_TYPE_BIN_NVM_META = 0x30,
- NVM_TYPE_ROM_TEST = 0xf0,
- NVM_TYPE_88X33X0_PHY_FW = 0x31,
- NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
- NVM_TYPE_MAX,
-};
-
-#define DIR_ID_1 (0)
-
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 2734f49956f7..e535983ce21b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask {
#define DMAE_MAX_CLIENTS 32
/**
- * @brief qed_gtt_init - Initialize GTT windows
+ * qed_gtt_init(): Initialize GTT windows.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ * qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return struct _qed_status - success (0), negative - error.
+ * Return: struct _qed_status - success (0), negative - error.
*/
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_pool_free -
+ * qed_ptt_pool_free(): Free PTT pool.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt
*
- * @return u32
+ * Return: u32.
*/
u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ * qed_ptt_get_bar_addr(): Get PPT's external BAR address.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_ptt: P_ptt
*
- * @return u32
+ * Return: u32.
*/
u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
/**
- * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ * qed_ptt_set_win(): Set PTT Window's GRC BAR address
*
- * @param p_hwfn
- * @param new_hw_addr
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @new_hw_addr: New HW address.
+ * @p_ptt: P_Ptt
+ *
+ * Return: Void.
*/
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 new_hw_addr);
/**
- * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ * qed_get_reserved_ptt(): Get a specific reserved PTT.
*
- * @param p_hwfn
- * @param ptt_idx
+ * @p_hwfn: HW device data.
+ * @ptt_idx: Ptt Index.
*
- * @return struct qed_ptt *
+ * Return: struct qed_ptt *.
*/
struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
enum reserved_ptts ptt_idx);
/**
- * @brief qed_wr - Write value to BAR using the given ptt
+ * qed_wr(): Write value to BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @val: Val.
+ * @hw_addr: HW address
*
- * @param p_hwfn
- * @param p_ptt
- * @param val
- * @param hw_addr
+ * Return: Void.
*/
void qed_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn,
u32 val);
/**
- * @brief qed_rd - Read value from BAR using the given ptt
+ * qed_rd(): Read value from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address
*
- * @param p_hwfn
- * @param p_ptt
- * @param val
- * @param hw_addr
+ * Return: Void.
*/
u32 qed_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr);
/**
- * @brief qed_memcpy_from - copy n bytes from BAR using the given
- * ptt
- *
- * @param p_hwfn
- * @param p_ptt
- * @param dest
- * @param hw_addr
- * @param n
+ * qed_memcpy_from(): Copy n bytes from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @dest: Destination.
+ * @hw_addr: HW address.
+ * @n: N
+ *
+ * Return: Void.
*/
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
size_t n);
/**
- * @brief qed_memcpy_to - copy n bytes to BAR using the given
- * ptt
- *
- * @param p_hwfn
- * @param p_ptt
- * @param hw_addr
- * @param src
- * @param n
+ * qed_memcpy_to(): Copy n bytes to BAR using the given ptt
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address.
+ * @src: Source.
+ * @n: N
+ *
+ * Return: Void.
*/
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
void *src,
size_t n);
/**
- * @brief qed_fid_pretend - pretend to another function when
- * accessing the ptt window. There is no way to unpretend
- * a function. The only way to cancel a pretend is to
- * pretend back to the original function.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param fid - fid field of pxp_pretend structure. Can contain
- * either pf / vf, port/path fields are don't care.
+ * qed_fid_pretend(): pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @fid: fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ *
+ * Return: Void.
*/
void qed_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 fid);
/**
- * @brief qed_port_pretend - pretend to another port when
- * accessing the ptt window
+ * qed_port_pretend(): Pretend to another port when accessing the ptt window
*
- * @param p_hwfn
- * @param p_ptt
- * @param port_id - the port to pretend to
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ *
+ * Return: Void.
*/
void qed_port_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 port_id);
/**
- * @brief qed_port_unpretend - cancel any previously set port
- * pretend
+ * qed_port_unpretend(): Cancel any previously set port pretend
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_port_fid_pretend - pretend to another port and another function
- * when accessing the ptt window
+ * qed_port_fid_pretend(): Pretend to another port and another function
+ * when accessing the ptt window
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ * @fid: fid field of pxp_pretend structure. Can contain either pf / vf.
*
- * @param p_hwfn
- * @param p_ptt
- * @param port_id - the port to pretend to
- * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
+ * Return: Void.
*/
void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 port_id, u16 fid);
/**
- * @brief qed_vfid_to_concrete - build a concrete FID for a
- * given VF ID
+ * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID
*
- * @param p_hwfn
- * @param p_ptt
- * @param vfid
+ * @p_hwfn: HW device data.
+ * @vfid: VFID.
+ *
+ * Return: Void.
*/
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
/**
- * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
- * this is declared here since other files will require it.
- * @param idx
+ * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ *
+ * @idx: Index
+ *
+ * Return: Void.
*/
u32 qed_dmae_idx_to_go_cmd(u8 idx);
/**
- * @brief qed_dmae_info_alloc - Init the dmae_info structure
- * which is part of p_hwfn.
- * @param p_hwfn
+ * qed_dmae_info_alloc(): Init the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
*/
int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_dmae_info_free - Free the dmae_info structure
- * which is part of p_hwfn
+ * qed_dmae_info_free(): Free the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
@@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
#define QED_HW_ERR_MAX_STR_SIZE 256
/**
- * @brief qed_hw_err_notify - Notify upper layer driver and management FW
- * about a HW error.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param err_type
- * @param fmt - debug data buffer to send to the MFW
- * @param ... - buffer format args
+ * qed_hw_err_notify(): Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @err_type: Err Type.
+ * @fmt: Debug data buffer to send to the MFW
+ * @...: buffer format args
+ *
+ * Return void.
*/
void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index ea888a2c6ddb..321c43408153 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#include <linux/types.h>
@@ -13,17 +13,18 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
#include "qed_reg_addr.h"
-#define CDU_VALIDATION_DEFAULT_CFG 61
+#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
{400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
{528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
{608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
};
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
{240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
@@ -42,25 +43,49 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
+/* Initial VOQ byte credit */
+#define QM_INITIAL_VOQ_BYTE_CRD 98304
/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
+/* VOQ constants */
+#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
+#define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1)
+
/* WFQ constants */
-/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
-#define QM_WFQ_UPPER_BOUND 62500000
+/* PF WFQ increment value, 0x9000 = 4*9*1024 */
+#define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_WFQ_UPPER_BOUND 62500000
+
+/* PF WFQ max increment value, 0.7 * upper bound */
+#define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10)
+
+/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */
+#define QM_PF_WFQ_CRD_E5_NUM_VOQS 16
+
+/* VP WFQ increment value */
+#define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL)
-/* Bit of VOQ in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+/* VP WFQ min increment value */
+#define QM_VP_WFQ_MIN_INC_VAL 10800
-/* Bit of PF in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+/* VP WFQ max increment value, 2^30 */
+#define QM_VP_WFQ_MAX_INC_VAL 0x40000000
-/* 0x9000 = 4*9*1024 */
-#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+/* VP WFQ bypass threshold */
+#define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100)
-/* Max WFQ increment value is 0.7 * upper bound */
-#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
+/* VP RL credit task cost */
+#define QM_VP_RL_CRD_TASK_COST 9700
+
+/* Bit of VOQ in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_PF_SHIFT 5
/* RL constants */
@@ -71,12 +96,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
/* RL increment value - rate is specified in mbps */
-#define QM_RL_INC_VAL(rate) ({ \
- typeof(rate) __rate = (rate); \
- max_t(u32, \
- (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
- (8 * 100)), \
- 1); })
+#define QM_RL_INC_VAL(rate) ({ \
+ typeof(rate) __rate = (rate); \
+ max_t(u32, \
+ (u32)(((__rate ? __rate : \
+ 100000) * \
+ QM_RL_PERIOD * \
+ 101) / (8 * 100)), 1); })
/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_PF_RL_UPPER_BOUND 62500000
@@ -84,16 +110,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* Max PF RL increment value is 0.7 * upper bound */
#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
-/* Vport RL Upper bound, link speed is in Mpbs */
-#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
- QM_RL_INC_VAL(speed), \
- 9700 + 1000))
-
-/* Max Vport RL increment value is the Vport RL upper bound */
-#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
-
-/* Vport RL credit threshold in case of QM bypass */
-#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
+/* QCN RL Upper bound, speed is in Mpbs */
+#define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \
+ u32, \
+ (u32)(((speed) * \
+ QM_RL_PERIOD * 101) / (8 * 100)), \
+ QM_VP_RL_CRD_TASK_COST \
+ + 1000))
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
@@ -156,20 +179,20 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
cmd ## _ ## field, \
value)
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \
rl_id, ext_voq, wrr) \
do { \
u32 __reg = 0; \
\
BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \
- \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \
+ memset(&(map), 0, sizeof(map)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \
!!(rl_valid)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \
(wrr)); \
\
STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
@@ -184,8 +207,8 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(((rl) >> 8) << 9))
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
- XSTORM_PQ_INFO_OFFSET(pq_id)
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
+ XSTORM_PQ_INFO_OFFSET(pq_id))
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -204,7 +227,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u8 num_ext_voqs = MAX_NUM_VOQS;
u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
/* Enable RLs for all VOQs */
@@ -236,7 +259,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ QM_PF_WFQ_UPPER_BOUND);
}
/* Prepare global RL enable/disable runtime init values */
@@ -257,7 +280,7 @@ static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
- QM_VP_RL_BYPASS_THRESH_SPEED);
+ QM_GLOBAL_RL_UPPER_BOUND(10000) - 1);
}
}
@@ -271,7 +294,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ QM_VP_WFQ_BYPASS_THRESH);
}
/* Prepare runtime init values to allocate PBF command queue lines for
@@ -291,14 +314,14 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
}
/* Prepare runtime init values to allocate PBF command queue lines. */
-static void qed_cmdq_lines_rt_init(
- struct qed_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- struct init_qm_port_params port_params[MAX_NUM_PORTS])
+static void
+qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u8 tc, ext_voq, port_id, num_tcs_in_port;
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u8 num_ext_voqs = MAX_NUM_VOQS;
/* Clear PBF lines of all VOQs */
for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
@@ -364,11 +387,11 @@ static void qed_cmdq_lines_rt_init(
* - No optimization for lossy TC (all are considered lossless). Shared space
* is not enabled and allocated for each TC.
*/
-static void qed_btb_blocks_rt_init(
- struct qed_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- struct init_qm_port_params port_params[MAX_NUM_PORTS])
+static void
+qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
u8 tc, ext_voq, port_id, num_tcs_in_port;
@@ -428,7 +451,7 @@ static void qed_btb_blocks_rt_init(
*/
static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
{
- u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
(u32)QM_RL_CRD_REG_SIGN_BIT;
u32 inc_val;
u16 rl_id;
@@ -450,11 +473,73 @@ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
return 0;
}
+/* Returns the upper bound for the specified Vport RL parameters.
+ * link_speed is in Mbps.
+ * Returns 0 in case of error.
+ */
+static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type,
+ u32 link_speed)
+{
+ switch (vport_rl_type) {
+ case QM_RL_TYPE_NORMAL:
+ return QM_INITIAL_VOQ_BYTE_CRD;
+ case QM_RL_TYPE_QCN:
+ return QM_GLOBAL_RL_UPPER_BOUND(link_speed);
+ default:
+ return 0;
+ }
+}
+
+/* Prepare VPORT RL runtime init values.
+ * Return -1 on error.
+ */
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u16 start_rl,
+ u16 num_rls,
+ u32 link_speed,
+ struct init_qm_rl_params *rl_params)
+{
+ u16 i, rl_id;
+
+ if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n");
+ return -1;
+ }
+
+ /* Go over all PF VPORTs */
+ for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) {
+ u32 upper_bound, inc_val;
+
+ upper_bound =
+ qed_get_vport_rl_upper_bound((enum init_qm_rl_type)
+ rl_params[i].vport_rl_type,
+ link_speed);
+
+ inc_val =
+ QM_RL_INC_VAL(rl_params[i].vport_rl ?
+ rl_params[i].vport_rl : link_speed);
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn,
+ "Invalid RL rate - limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
/* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params,
- u32 base_mem_addr_4kb)
+static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
{
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
struct init_qm_vport_params *vport_params = p_params->vport_params;
@@ -487,7 +572,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
u16 *p_first_tx_pq_id, vport_id_in_pf;
- struct qm_rf_pq_map_e4 tx_pq_map;
+ struct qm_rf_pq_map tx_pq_map;
u8 tc_id = pq_params[i].tc_id;
bool is_vf_pq;
u8 ext_voq;
@@ -504,8 +589,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
&vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
u32 map_val =
- (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
- (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
+ (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) |
+ (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT);
/* Create new VP PQ */
*p_first_tx_pq_id = pq_id;
@@ -520,7 +605,6 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
tx_pq_map,
- E4,
pq_id,
*p_first_tx_pq_id,
pq_params[i].rl_valid,
@@ -570,6 +654,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn,
QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
tx_pq_vf_mask[i]);
+
+ return 0;
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -620,7 +706,6 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
* Return -1 on error.
*/
static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
-
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
@@ -629,8 +714,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
u8 ext_voq;
u16 i;
- inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq);
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -652,7 +737,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
@@ -689,34 +774,38 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u16 num_vports,
struct init_qm_vport_params *vport_params)
{
- u16 vport_pq_id, i;
+ u16 vport_pq_id, wfq, i;
u32 inc_val;
u8 tc;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (!vport_params[i].wfq)
- continue;
-
- inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT WFQ weight configuration\n");
- return -1;
- }
-
/* Each VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ /* Check if VPORT/TC is valid */
vport_pq_id = vport_params[i].first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID) {
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPCRD_RT_OFFSET +
- vport_pq_id,
- (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET +
- vport_pq_id, inc_val);
+ if (vport_pq_id == QM_INVALID_PQ_ID)
+ continue;
+
+ /* Find WFQ weight (per VPORT or per VPORT+TC) */
+ wfq = vport_params[i].wfq;
+ wfq = wfq ? wfq : vport_params[i].tc_wfq[tc];
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (inc_val > QM_VP_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
}
+
+ /* Config registers */
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET +
+ vport_pq_id,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
}
}
@@ -780,11 +869,14 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
QM_OPPOR_LINE_VOQ_DEF);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ,
+ p_params->pf_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ,
+ p_params->vport_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL,
+ p_params->pf_rl_en ? 1 : 0);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
- p_params->global_rl_en);
+ p_params->global_rl_en ? 1 : 0);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
SET_FIELD(mask,
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
@@ -830,7 +922,6 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
u16 i;
u8 tc;
-
/* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
@@ -843,7 +934,8 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
p_params->num_tids, 0);
/* Map Tx PQs */
- qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+ if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb))
+ return -1;
/* Init PF WFQ */
if (p_params->pf_wfq)
@@ -858,15 +950,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
+ /* Set VPORT RL */
+ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl,
+ p_params->num_rls, p_params->link_speed,
+ p_params->rl_params))
+ return -1;
+
return 0;
}
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -897,41 +995,66 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
{
+ int result = 0;
u16 vport_pq_id;
- u32 inc_val;
u8 tc;
- inc_val = QM_WFQ_INC_VAL(wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ for (tc = 0; tc < NUM_OF_TCS && !result; tc++) {
+ vport_pq_id = first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID)
+ result = qed_init_vport_tc_wfq(p_hwfn, p_ptt,
+ vport_pq_id, wfq);
+ }
+
+ return result;
+}
+
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 wfq)
+{
+ u32 inc_val;
+
+ if (first_tx_pq_id == QM_INVALID_PQ_ID)
+ return -1;
+
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
return -1;
}
- /* A VPORT can have several VPORT PQ IDs for various TCs */
- for (tc = 0; tc < NUM_OF_TCS; tc++) {
- vport_pq_id = first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID)
- qed_wr(p_hwfn,
- p_ptt,
- QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
- }
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4,
+ inc_val);
return 0;
}
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
+ struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type)
{
- u32 inc_val;
+ u32 inc_val, upper_bound;
+ upper_bound =
+ (vport_rl_type ==
+ QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) :
+ QM_INITIAL_VOQ_BYTE_CRD;
inc_val = QM_RL_INC_VAL(rate_limit);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
- DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n");
return -1;
}
qed_wr(p_hwfn, p_ptt,
QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_RLGLBLUPPERBOUND + rl_id * 4,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
return 0;
@@ -1013,7 +1136,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
__le32 *p_data, u32 addr, u32 len_in_dwords)
{
- struct qed_dmae_params params = {};
+ struct qed_dmae_params params = { 0 };
u32 *data_cpu;
int rc;
@@ -1066,16 +1189,16 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1099,18 +1222,20 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE,
+ eth_gre_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE,
+ ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1148,22 +1273,23 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
bool eth_geneve_enable, bool ip_geneve_enable)
{
u32 reg_val;
- u8 shift;
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE,
+ eth_geneve_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE,
+ ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1179,16 +1305,16 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
/* Update DORQ registers */
qed_wr(p_hwfn,
p_ptt,
- DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn,
p_ptt,
- DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
ip_geneve_enable ? 1 : 0);
}
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -1208,7 +1334,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
/* update PRS FIC register */
qed_wr(p_hwfn,
p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
} else {
/* clear VXLAN_NO_L2_ENABLE flag */
@@ -1229,7 +1355,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
- struct regpair ram_line = { };
+ struct regpair ram_line = { 0 };
/* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
@@ -1621,6 +1747,8 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
storm_buf_size = GET_FIELD(hdr->data,
FW_OVERLAY_BUF_HDR_BUF_SIZE);
storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
+ if (storm_id >= NUM_STORMS)
+ break;
storm_mem_desc = allocated_mem + storm_id;
storm_mem_desc->size = storm_buf_size * sizeof(u32);
@@ -1645,7 +1773,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
/* If memory allocation has failed, free all allocated memory */
if (buf_offset < buf_size) {
- qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
+ qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
return NULL;
}
@@ -1679,16 +1807,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
}
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem)
+ struct phys_mem_desc **fw_overlay_mem)
{
u8 storm_id;
- if (!fw_overlay_mem)
+ if (!fw_overlay_mem || !(*fw_overlay_mem))
return;
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
struct phys_mem_desc *storm_mem_desc =
- (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ (struct phys_mem_desc *)*fw_overlay_mem + storm_id;
/* Free Storm's physical memory */
if (storm_mem_desc->virt_addr)
@@ -1699,5 +1827,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
}
/* Free allocated virtual memory */
- kfree(fw_overlay_mem);
+ kfree(*fw_overlay_mem);
+ *fw_overlay_mem = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 7e6c6389523b..b3bf9899c1a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -15,6 +15,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
@@ -46,30 +47,32 @@ static u32 pxp_global_win[] = {
/* IRO Array */
static const u32 iro_arr[] = {
0x00000000, 0x00000000, 0x00080000,
+ 0x00004478, 0x00000008, 0x00080000,
0x00003288, 0x00000088, 0x00880000,
- 0x000058e8, 0x00000020, 0x00200000,
+ 0x000058a8, 0x00000020, 0x00200000,
+ 0x00003188, 0x00000008, 0x00080000,
0x00000b00, 0x00000008, 0x00040000,
0x00000a80, 0x00000008, 0x00040000,
0x00000000, 0x00000008, 0x00020000,
0x00000080, 0x00000008, 0x00040000,
0x00000084, 0x00000008, 0x00020000,
- 0x00005718, 0x00000004, 0x00040000,
- 0x00004dd0, 0x00000000, 0x00780000,
+ 0x00005798, 0x00000004, 0x00040000,
+ 0x00004e50, 0x00000000, 0x00780000,
0x00003e40, 0x00000000, 0x00780000,
- 0x00004480, 0x00000000, 0x00780000,
+ 0x00004500, 0x00000000, 0x00780000,
0x00003210, 0x00000000, 0x00780000,
0x00003b50, 0x00000000, 0x00780000,
0x00007f58, 0x00000000, 0x00780000,
- 0x00005f58, 0x00000000, 0x00080000,
+ 0x00005fd8, 0x00000000, 0x00080000,
0x00007100, 0x00000000, 0x00080000,
- 0x0000aea0, 0x00000000, 0x00080000,
+ 0x0000af20, 0x00000000, 0x00080000,
0x00004398, 0x00000000, 0x00080000,
0x0000a5a0, 0x00000000, 0x00080000,
0x0000bde8, 0x00000000, 0x00080000,
0x00000020, 0x00000004, 0x00040000,
- 0x000056c8, 0x00000010, 0x00100000,
+ 0x00005688, 0x00000010, 0x00100000,
0x0000c210, 0x00000030, 0x00300000,
- 0x0000b088, 0x00000038, 0x00380000,
+ 0x0000b108, 0x00000038, 0x00380000,
0x00003d20, 0x00000080, 0x00400000,
0x0000bf60, 0x00000000, 0x00040000,
0x00004560, 0x00040080, 0x00040000,
@@ -77,11 +80,11 @@ static const u32 iro_arr[] = {
0x00003d60, 0x00000080, 0x00200000,
0x00008960, 0x00000040, 0x00300000,
0x0000e840, 0x00000060, 0x00600000,
- 0x00004618, 0x00000080, 0x00380000,
- 0x00010738, 0x000000c0, 0x00c00000,
+ 0x00004698, 0x00000080, 0x00380000,
+ 0x000107b8, 0x000000c0, 0x00c00000,
0x000001f8, 0x00000002, 0x00020000,
- 0x0000a2a0, 0x00000000, 0x01080000,
- 0x0000a3a8, 0x00000008, 0x00080000,
+ 0x0000a260, 0x00000000, 0x01080000,
+ 0x0000a368, 0x00000008, 0x00080000,
0x000001c0, 0x00000008, 0x00080000,
0x000001f8, 0x00000008, 0x00080000,
0x00000ac0, 0x00000008, 0x00080000,
@@ -90,39 +93,46 @@ static const u32 iro_arr[] = {
0x00000280, 0x00000008, 0x00080000,
0x00000680, 0x00080018, 0x00080000,
0x00000b78, 0x00080018, 0x00020000,
- 0x0000c640, 0x00000050, 0x003c0000,
- 0x00012038, 0x00000018, 0x00100000,
- 0x00011b00, 0x00000040, 0x00180000,
- 0x000095d0, 0x00000050, 0x00200000,
+ 0x0000c600, 0x00000058, 0x003c0000,
+ 0x00012038, 0x00000020, 0x00100000,
+ 0x00011b00, 0x00000048, 0x00180000,
+ 0x00009650, 0x00000050, 0x00200000,
0x00008b10, 0x00000040, 0x00280000,
- 0x00011640, 0x00000018, 0x00100000,
- 0x0000c828, 0x00000048, 0x00380000,
- 0x00011710, 0x00000020, 0x00200000,
- 0x00004650, 0x00000080, 0x00100000,
+ 0x000116c0, 0x00000018, 0x00100000,
+ 0x0000c808, 0x00000048, 0x00380000,
+ 0x00011790, 0x00000020, 0x00200000,
+ 0x000046d0, 0x00000080, 0x00100000,
0x00003618, 0x00000010, 0x00100000,
- 0x0000a968, 0x00000008, 0x00010000,
+ 0x0000a9e8, 0x00000008, 0x00010000,
0x000097a0, 0x00000008, 0x00010000,
- 0x00011990, 0x00000008, 0x00010000,
- 0x0000f018, 0x00000008, 0x00010000,
- 0x00012628, 0x00000008, 0x00010000,
- 0x00011da8, 0x00000008, 0x00010000,
- 0x0000aa78, 0x00000030, 0x00100000,
- 0x0000d768, 0x00000028, 0x00280000,
- 0x00009a58, 0x00000018, 0x00180000,
- 0x00009bd8, 0x00000008, 0x00080000,
- 0x00013a18, 0x00000008, 0x00080000,
- 0x000126e8, 0x00000018, 0x00180000,
- 0x0000e608, 0x00500288, 0x00100000,
- 0x00012970, 0x00000138, 0x00280000,
+ 0x00011a10, 0x00000008, 0x00010000,
+ 0x0000e9f8, 0x00000008, 0x00010000,
+ 0x00012648, 0x00000008, 0x00010000,
+ 0x000121c8, 0x00000008, 0x00010000,
+ 0x0000af08, 0x00000030, 0x00100000,
+ 0x0000d748, 0x00000028, 0x00280000,
+ 0x00009e68, 0x00000018, 0x00180000,
+ 0x00009fe8, 0x00000008, 0x00080000,
+ 0x00013ea8, 0x00000008, 0x00080000,
+ 0x00012f18, 0x00000018, 0x00180000,
+ 0x0000dfe8, 0x00500288, 0x00100000,
+ 0x000131a0, 0x00000138, 0x00280000,
};
void qed_init_iro_array(struct qed_dev *cdev)
{
- cdev->iro_arr = iro_arr;
+ cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
+ if (rt_offset >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing %u in rt_data at index %u!\n",
+ val, rt_offset);
+ return;
+ }
+
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
@@ -132,6 +142,14 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
{
size_t i;
+ if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing values in rt_data at indices %u-%u!\n",
+ rt_offset,
+ (u32)(rt_offset + size - 1));
+ return;
+ }
+
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
@@ -175,7 +193,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
return rc;
/* invalidate after writing */
- for (j = i; j < i + segment; j++)
+ for (j = i; j < (u32)(i + segment); j++)
p_valid[j] = false;
/* Jump over the entire segment, including invalid entry */
@@ -245,7 +263,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr, u32 fill, u32 fill_count)
+ u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
struct qed_dmae_params params = {};
@@ -372,7 +390,7 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
case INIT_SRC_ZEROS:
data = le32_to_cpu(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
- rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data);
else
qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
@@ -419,7 +437,6 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
-
val = qed_rd(p_hwfn, p_ptt, addr);
if (poll == INIT_POLL_NONE)
@@ -515,8 +532,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
INIT_IF_MODE_OP_CMD_OFFSET);
}
-static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
- struct init_if_phase_op *p_cmd,
+static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
@@ -563,7 +579,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
modes);
break;
case INIT_OP_IF_PHASE:
- cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ cmd_num += qed_init_cmd_phase(&cmd->if_phase,
phase, phase_id);
break;
case INIT_OP_DELAY:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
index a573c8921982..12e5c4e370d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -12,23 +12,24 @@
#include "qed.h"
/**
- * @brief qed_init_iro_array - init iro_arr.
+ * qed_init_iro_array(): init iro_arr.
*
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_init_iro_array(struct qed_dev *cdev);
/**
- * @brief qed_init_run - Run the init-sequence.
+ * qed_init_run(): Run the init-sequence.
*
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @phase: Phase.
+ * @phase_id: Phase ID.
+ * @modes: Mode.
*
- * @param p_hwfn
- * @param p_ptt
- * @param phase
- * @param phase_id
- * @param modes
- * @return _qed_status_t
+ * Return: _qed_status_t
*/
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
int modes);
/**
- * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ * qed_init_alloc(): Allocate RT array, Store 'values' ptrs.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
- *
- * @return _qed_status_t
+ * Return: _qed_status_t.
*/
int qed_init_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_hwfn_deallocate
+ * qed_init_free(): Init HW function deallocate.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ * qed_init_store_rt_reg(): Store a configuration value in the RT array.
*
+ * @p_hwfn: HW device data.
+ * @rt_offset: RT offset.
+ * @val: Val.
*
- * @param p_hwfn
- * @param rt_offset
- * @param val
+ * Return: Void.
*/
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
@@ -72,29 +74,21 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
#define OVERWRITE_RT_REG(hwfn, offset, val) \
qed_init_store_rt_reg(hwfn, offset, val)
-/**
- * @brief
- *
- *
- * @param p_hwfn
- * @param rt_offset
- * @param val
- * @param size
- */
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 *val,
size_t size);
#define STORE_RT_REG_AGG(hwfn, offset, val) \
- qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val))
/**
- * @brief
- * Initialize GTT global windows and set admin window
- * related params of GTT/PTT to default values.
+ * qed_gtt_init(): Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return Void.
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index f78e6055f654..a97f691839e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -36,7 +36,7 @@ struct qed_sb_sp_info {
struct qed_sb_info sb_info;
/* per protocol index data */
- struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
+ struct qed_pi_info pi_info_arr[PIS_PER_SB];
};
enum qed_attention_type {
@@ -1507,7 +1507,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
else
SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1);
- sb_offset = igu_sb_id * PIS_PER_SB_E4;
+ sb_offset = igu_sb_id * PIS_PER_SB;
pi_offset = sb_offset + pi_index;
if (p_hwfn->hw_init_done)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index c5550e96bbe1..84c17e97f569 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -53,51 +53,54 @@ enum qed_coalescing_fsm {
};
/**
- * @brief qed_int_igu_enable_int - enable device interrupts
+ * qed_int_igu_enable_int(): Enable device interrupts.
*
- * @param p_hwfn
- * @param p_ptt
- * @param int_mode - interrupt mode to use
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrupt mode to use.
+ *
+ * Return: Void.
*/
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
- * @brief qed_int_igu_disable_int - disable device interrupts
+ * qed_int_igu_disable_int(): Disable device interrupts.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
- * register from igu.
+ * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc
+ * register from igu.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u64
+ * Return: u64.
*/
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
#define QED_SP_SB_ID 0xffff
/**
- * @brief qed_int_sb_init - Initializes the sb_info structure.
+ * qed_int_sb_init(): Initializes the sb_info structure.
*
- * once the structure is initialized it can be passed to sb related functions.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: points to an uninitialized (but allocated) sb_info structure
+ * @sb_virt_addr: SB Virtual address.
+ * @sb_phy_addr: SB Physial address.
+ * @sb_id: the sb_id to be used (zero based in driver)
+ * should use QED_SP_SB_ID for SP Status block
*
- * @param p_hwfn
- * @param p_ptt
- * @param sb_info points to an uninitialized (but
- * allocated) sb_info structure
- * @param sb_virt_addr
- * @param sb_phy_addr
- * @param sb_id the sb_id to be used (zero based in driver)
- * should use QED_SP_SB_ID for SP Status block
+ * Return: int.
*
- * @return int
+ * Once the structure is initialized it can be passed to sb related functions.
*/
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
dma_addr_t sb_phy_addr,
u16 sb_id);
/**
- * @brief qed_int_sb_setup - Setup the sb.
+ * qed_int_sb_setup(): Setup the sb.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: Initialized sb_info structure.
*
- * @param p_hwfn
- * @param p_ptt
- * @param sb_info initialized sb_info structure
+ * Return: Void.
*/
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info);
/**
- * @brief qed_int_sb_release - releases the sb_info structure.
+ * qed_int_sb_release(): Releases the sb_info structure.
*
- * once the structure is released, it's memory can be freed
+ * @p_hwfn: HW device data.
+ * @sb_info: Points to an allocated sb_info structure.
+ * @sb_id: The sb_id to be used (zero based in driver)
+ * should never be equal to QED_SP_SB_ID
+ * (SP Status block).
*
- * @param p_hwfn
- * @param sb_info points to an allocated sb_info structure
- * @param sb_id the sb_id to be used (zero based in driver)
- * should never be equal to QED_SP_SB_ID
- * (SP Status block)
+ * Return: int.
*
- * @return int
+ * Once the structure is released, it's memory can be freed.
*/
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info,
u16 sb_id);
/**
- * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
- * default status block.
+ * qed_int_sp_dpc(): To be called when an interrupt is received on the
+ * default status block.
*
- * @param p_hwfn - pointer to hwfn
+ * @t: Tasklet.
+ *
+ * Return: Void.
*
*/
void qed_int_sp_dpc(struct tasklet_struct *t);
/**
- * @brief qed_int_get_num_sbs - get the number of status
- * blocks configured for this funciton in the igu.
+ * qed_int_get_num_sbs(): Get the number of status blocks configured
+ * for this funciton in the igu.
*
- * @param p_hwfn
- * @param p_sb_cnt_info
+ * @p_hwfn: HW device data.
+ * @p_sb_cnt_info: Pointer to SB count info.
*
- * @return int - number of status blocks configured
+ * Return: Void.
*/
void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
struct qed_sb_cnt_info *p_sb_cnt_info);
/**
- * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
+ * qed_int_disable_post_isr_release(): Performs the cleanup post ISR
* release. The API need to be called after releasing all slowpath IRQs
* of the device.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
+ * Return: Void.
*/
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
- * @brief qed_int_attn_clr_enable - sets whether the general behavior is
+ * qed_int_attn_clr_enable: Sets whether the general behavior is
* preventing attentions from being reasserted, or following the
* attributes of the specific attention.
*
- * @param cdev
- * @param clr_enable
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable
+ *
+ * Return: Void.
*
*/
void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
/**
- * @brief - Doorbell Recovery handler.
+ * qed_db_rec_handler(): Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
@@ -192,7 +204,7 @@ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
- ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff
@@ -223,30 +235,34 @@ struct qed_igu_info {
};
/**
- * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources
+ * provided by MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into
+ * an IGU sb-id
*
- * @param p_hwfn
- * @param sb_id - user provided sb_id
+ * @p_hwfn: HW device data.
+ * @sb_id: user provided sb_id.
*
- * @return an index inside IGU CAM where the SB resides
+ * Return: An index inside IGU CAM where the SB resides.
*/
u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
- * @brief return a pointer to an unused valid SB
+ * qed_get_igu_free_sb(): Return a pointer to an unused valid SB
*
- * @param p_hwfn
- * @param b_is_pf - true iff we want a SB belonging to a PF
+ * @p_hwfn: HW device data.
+ * @b_is_pf: True iff we want a SB belonging to a PF.
*
- * @return point to an igu_block, NULL if none is available
+ * Return: Point to an igu_block, NULL if none is available.
*/
struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
bool b_is_pf);
@@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ * qed_int_igu_read_cam(): Reads the IGU CAM.
* This function needs to be called during hardware
* prepare. It reads the info from igu cam to know which
* status block is the default / base status block etc.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
void *cookie);
/**
- * @brief qed_int_register_cb - Register callback func for
- * slowhwfn statusblock.
- *
- * Every protocol that uses the slowhwfn status block
- * should register a callback function that will be called
- * once there is an update of the sp status block.
- *
- * @param p_hwfn
- * @param comp_cb - function to be called when there is an
- * interrupt on the sp sb
- *
- * @param cookie - passed to the callback function
- * @param sb_idx - OUT parameter which gives the chosen index
- * for this protocol.
- * @param p_fw_cons - pointer to the actual address of the
- * consumer for this protocol.
- *
- * @return int
+ * qed_int_register_cb(): Register callback func for slowhwfn statusblock.
+ *
+ * @p_hwfn: HW device data.
+ * @comp_cb: Function to be called when there is an
+ * interrupt on the sp sb
+ * @cookie: Passed to the callback function
+ * @sb_idx: (OUT) parameter which gives the chosen index
+ * for this protocol.
+ * @p_fw_cons: Pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * Return: Int.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
*/
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
@@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn,
__le16 **p_fw_cons);
/**
- * @brief qed_int_unregister_cb - Unregisters callback
- * function from sp sb.
- * Partner of qed_int_register_cb -> should be called
- * when no longer required.
+ * qed_int_unregister_cb(): Unregisters callback function from sp sb.
+ *
+ * @p_hwfn: HW device data.
+ * @pi: Producer Index.
*
- * @param p_hwfn
- * @param pi
+ * Return: Int.
*
- * @return int
+ * Partner of qed_int_register_cb -> should be called
+ * when no longer required.
*/
int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
u8 pi);
/**
- * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ * qed_int_get_sp_sb_id(): Get the slowhwfn sb id.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u16
+ * Return: u16.
*/
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
/**
- * @brief Status block cleanup. Should be called for each status
- * block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param igu_sb_id - igu status block id
- * @param opaque - opaque fid of the sb owner.
- * @param b_set - set(1) / clear(0)
+ * qed_int_igu_init_pure_rt_single(): Status block cleanup.
+ * Should be called for each status
+ * block that will be used -> both PF / VF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @igu_sb_id: IGU status block id.
+ * @opaque: Opaque fid of the sb owner.
+ * @b_set: Set(1) / Clear(0).
+ *
+ * Return: Void.
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
bool b_set);
/**
- * @brief qed_int_cau_conf - configure cau for a given status
- * block
- *
- * @param p_hwfn
- * @param ptt
- * @param sb_phys
- * @param igu_sb_id
- * @param vf_number
- * @param vf_valid
+ * qed_int_cau_conf_sb(): Configure cau for a given status block.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_phys: SB Physical.
+ * @igu_sb_id: IGU status block id.
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
+ *
+ * Return: Void.
*/
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
u8 vf_valid);
/**
- * @brief qed_int_alloc
+ * qed_int_alloc(): QED interrupt alloc.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_int_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_int_free
+ * qed_int_free(): QED interrupt free.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_int_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_int_setup
+ * qed_int_setup(): QED interrupt setup.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Enable Interrupt & Attention for hw function
+ * qed_int_igu_enable(): Enable Interrupt & Attention for hw function.
*
- * @param p_hwfn
- * @param p_ptt
- * @param int_mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrut mode
*
- * @return int
+ * Return: Int.
*/
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
- * @brief - Initialize CAU status block entry
+ * qed_init_cau_sb_entry(): Initialize CAU status block entry.
+ *
+ * @p_hwfn: HW device data.
+ * @p_sb_entry: Pointer SB entry.
+ * @pf_id: PF number
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
*
- * @param p_hwfn
- * @param p_sb_entry
- * @param pf_id
- * @param vf_number
- * @param vf_valid
+ * Return: Void.
*/
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
new file mode 100644
index 000000000000..3ccdd3b1d8cb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_IRO_HSI_H
+#define _QED_IRO_HSI_H
+
+#include <linux/types.h>
+
+enum {
+ IRO_YSTORM_FLOW_CONTROL_MODE_GTT,
+ IRO_PSTORM_PKT_DUPLICATION_CFG,
+ IRO_TSTORM_PORT_STAT,
+ IRO_TSTORM_LL2_PORT_STAT,
+ IRO_TSTORM_PKT_DUPLICATION_CFG,
+ IRO_USTORM_VF_PF_CHANNEL_READY_GTT,
+ IRO_USTORM_FLR_FINAL_ACK_GTT,
+ IRO_USTORM_EQE_CONS_GTT,
+ IRO_USTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_USTORM_COMMON_QUEUE_CONS_GTT,
+ IRO_XSTORM_PQ_INFO,
+ IRO_XSTORM_INTEG_TEST_DATA,
+ IRO_YSTORM_INTEG_TEST_DATA,
+ IRO_PSTORM_INTEG_TEST_DATA,
+ IRO_TSTORM_INTEG_TEST_DATA,
+ IRO_MSTORM_INTEG_TEST_DATA,
+ IRO_USTORM_INTEG_TEST_DATA,
+ IRO_XSTORM_OVERLAY_BUF_ADDR,
+ IRO_YSTORM_OVERLAY_BUF_ADDR,
+ IRO_PSTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_OVERLAY_BUF_ADDR,
+ IRO_MSTORM_OVERLAY_BUF_ADDR,
+ IRO_USTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_LL2_RX_PRODS_GTT,
+ IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_USTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT,
+ IRO_MSTORM_QUEUE_STAT,
+ IRO_MSTORM_TPA_TIMEOUT_US,
+ IRO_MSTORM_ETH_VF_PRODS,
+ IRO_MSTORM_ETH_PF_PRODS_GTT,
+ IRO_MSTORM_ETH_PF_STAT,
+ IRO_USTORM_QUEUE_STAT,
+ IRO_USTORM_ETH_PF_STAT,
+ IRO_PSTORM_QUEUE_STAT,
+ IRO_PSTORM_ETH_PF_STAT,
+ IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT,
+ IRO_TSTORM_ETH_PRS_INPUT,
+ IRO_ETH_RX_RATE_LIMIT,
+ IRO_TSTORM_ETH_RSS_UPDATE_GTT,
+ IRO_XSTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_YSTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_GRQ_PROD,
+ IRO_TSTORM_SCSI_CMDQ_CONS_GTT,
+ IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_TSTORM_ISCSI_RX_STATS,
+ IRO_MSTORM_ISCSI_RX_STATS,
+ IRO_USTORM_ISCSI_RX_STATS,
+ IRO_XSTORM_ISCSI_TX_STATS,
+ IRO_YSTORM_ISCSI_TX_STATS,
+ IRO_PSTORM_ISCSI_TX_STATS,
+ IRO_TSTORM_FCOE_RX_STATS,
+ IRO_PSTORM_FCOE_TX_STATS,
+ IRO_PSTORM_RDMA_QUEUE_STAT,
+ IRO_TSTORM_RDMA_QUEUE_STAT,
+ IRO_XSTORM_RDMA_ASSERT_LEVEL,
+ IRO_YSTORM_RDMA_ASSERT_LEVEL,
+ IRO_PSTORM_RDMA_ASSERT_LEVEL,
+ IRO_TSTORM_RDMA_ASSERT_LEVEL,
+ IRO_MSTORM_RDMA_ASSERT_LEVEL,
+ IRO_USTORM_RDMA_ASSERT_LEVEL,
+ IRO_XSTORM_IWARP_RXMIT_STATS,
+ IRO_TSTORM_ROCE_EVENTS_STAT,
+ IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS,
+ IRO_YSTORM_ROCE_ERROR_STATS,
+ IRO_PSTORM_ROCE_DCQCN_SENT_STATS,
+ IRO_USTORM_ROCE_CQE_STATS,
+};
+
+/* Pstorm LiteL2 queue statistics */
+
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].base \
+ + ((core_tx_stats_id) * IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].size)
+
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[IRO_ETH_RX_RATE_LIMIT].base \
+ + ((pf_id) * IRO[IRO_ETH_RX_RATE_LIMIT].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[IRO_ETH_RX_RATE_LIMIT].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_GTT_OFFSET(queue_id) \
+ (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].base \
+ + ((queue_id) * IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].m1))
+#define MSTORM_ETH_PF_PRODS_GTT_SIZE (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_MSTORM_ETH_PF_STAT].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[IRO_MSTORM_ETH_PF_STAT].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone
+ * size mode.
+ */
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[IRO_MSTORM_ETH_VF_PRODS].base \
+ + ((vf_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m1) \
+ + ((vf_queue_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[IRO_MSTORM_ETH_VF_PRODS].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_MSTORM_INTEG_TEST_DATA].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_MSTORM_INTEG_TEST_DATA].size)
+
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_MSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_ISCSI_RX_STATS].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_MSTORM_ISCSI_RX_STATS].size)
+
+/* Mstorm overlay buffer host address */
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_MSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_MSTORM_QUEUE_STAT].m1))
+#define MSTORM_QUEUE_STAT_SIZ (IRO[IRO_MSTORM_QUEUE_STAT].size)
+
+/* Mstorm error level for assert */
+#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[IRO_MSTORM_TPA_TIMEOUT_US].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[IRO_MSTORM_TPA_TIMEOUT_US].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_OFFSET(ethtype_id) \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].base \
+ + ((ethtype_id) * IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_SIZE \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_PSTORM_ETH_PF_STAT].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[IRO_PSTORM_ETH_PF_STAT].size)
+
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_FCOE_TX_STATS].base \
+ + ((pf_id) * IRO[IRO_PSTORM_FCOE_TX_STATS].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[IRO_PSTORM_FCOE_TX_STATS].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_PSTORM_INTEG_TEST_DATA].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_PSTORM_INTEG_TEST_DATA].size)
+
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_PSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_PSTORM_ISCSI_TX_STATS].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_PSTORM_ISCSI_TX_STATS].size)
+
+/* Pstorm overlay buffer host address */
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg
+ * data type.
+ */
+#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].m1))
+#define PSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_PSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_PSTORM_QUEUE_STAT].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_QUEUE_STAT].size)
+
+/* Pstorm error level for assert */
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_PSTORM_RDMA_QUEUE_STAT].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[IRO_TSTORM_ETH_PRS_INPUT].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[IRO_TSTORM_ETH_PRS_INPUT].size)
+
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
+ * Use eth_tstorm_rss_update_data for update.
+ */
+#define TSTORM_ETH_RSS_UPDATE_GTT_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].base \
+ + ((pf_id) * IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].m1))
+#define TSTORM_ETH_RSS_UPDATE_GTT_SIZE\
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].size)
+
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_FCOE_RX_STATS].base \
+ + ((pf_id) * IRO[IRO_TSTORM_FCOE_RX_STATS].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[IRO_TSTORM_FCOE_RX_STATS].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_TSTORM_INTEG_TEST_DATA].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_TSTORM_INTEG_TEST_DATA].size)
+
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_TSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_ISCSI_RX_STATS].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_TSTORM_ISCSI_RX_STATS].size)
+
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_LL2_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_LL2_PORT_STAT].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[IRO_TSTORM_LL2_PORT_STAT].size)
+
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_GTT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].base \
+ + ((core_rx_queue_id) * IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].m1))
+#define TSTORM_LL2_RX_PRODS_GTT_SIZE (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].size)
+
+/* Tstorm overlay buffer host address */
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].base)
+
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Tstorm LL2 packet duplication configuration.
+ * Use tstorm_pkt_dup_cfg data type.
+ */
+#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].m1))
+#define TSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_PORT_STAT].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[IRO_TSTORM_PORT_STAT].size)
+
+/* Tstorm error level for assert */
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_TSTORM_RDMA_QUEUE_STAT].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].base \
+ + ((roce_pf_id) * IRO[IRO_TSTORM_ROCE_EVENTS_STAT].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_GTT_OFFSET(cmdq_queue_id) \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].base \
+ + ((cmdq_queue_id) * IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].m1))
+#define TSTORM_SCSI_CMDQ_CONS_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].m1))
+#define USTORM_COMMON_QUEUE_CONS_GTT_SIZE \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_EQE_CONS_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_EQE_CONS_GTT].m1))
+#define USTORM_EQE_CONS_GTT_SIZE (IRO[IRO_USTORM_EQE_CONS_GTT].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_USTORM_ETH_PF_STAT].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[IRO_USTORM_ETH_PF_STAT].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define USTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].m1))
+#define USTORM_FLR_FINAL_ACK_GTT_SIZE (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_USTORM_INTEG_TEST_DATA].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_USTORM_INTEG_TEST_DATA].size)
+
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_USTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_USTORM_ISCSI_RX_STATS].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_USTORM_ISCSI_RX_STATS].size)
+
+/* Ustorm overlay buffer host address */
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_USTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_USTORM_QUEUE_STAT].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[IRO_USTORM_QUEUE_STAT].size)
+
+/* Ustorm error level for assert */
+#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].size)
+
+/* RoCE CQEs Statistics */
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_USTORM_ROCE_CQE_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_USTORM_ROCE_CQE_STATS].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[IRO_USTORM_ROCE_CQE_STATS].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_USTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_USTORM_TOE_CQ_PROD].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[IRO_USTORM_TOE_CQ_PROD].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_TOE_GRQ_PROD].base \
+ + ((pf_id) * IRO[IRO_USTORM_TOE_GRQ_PROD].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[IRO_USTORM_TOE_GRQ_PROD].size)
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_GTT_OFFSET(vf_id) \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].base \
+ + ((vf_id) * IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].m1))
+#define USTORM_VF_PF_CHANNEL_READY_GTT_SIZE \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_id) \
+ (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_id) * IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define XSTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_XSTORM_INTEG_TEST_DATA].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_XSTORM_INTEG_TEST_DATA].size)
+
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_XSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_XSTORM_ISCSI_TX_STATS].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_XSTORM_ISCSI_TX_STATS].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].base \
+ + ((pf_id) * IRO[IRO_XSTORM_IWARP_RXMIT_STATS].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].size)
+
+/* Xstorm overlay buffer host address */
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Xstorm common PQ info */
+#define XSTORM_PQ_INFO_OFFSET(pq_id) \
+ (IRO[IRO_XSTORM_PQ_INFO].base \
+ + ((pq_id) * IRO[IRO_XSTORM_PQ_INFO].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[IRO_XSTORM_PQ_INFO].size)
+
+/* Xstorm error level for assert */
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_GTT_OFFSET \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].base)
+#define YSTORM_FLOW_CONTROL_MODE_GTT_SIZE \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_YSTORM_INTEG_TEST_DATA].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_YSTORM_INTEG_TEST_DATA].size)
+
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_YSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_YSTORM_ISCSI_TX_STATS].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_YSTORM_ISCSI_TX_STATS].size)
+
+/* Ystorm overlay buffer host address */
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ystorm error level for assert */
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].size)
+
+/* RoCE Error Statistics */
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_ERROR_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_ERROR_STATS].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[IRO_YSTORM_ROCE_ERROR_STATS].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_YSTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_YSTORM_TOE_CQ_PROD].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[IRO_YSTORM_TOE_CQ_PROD].size)
+
+/* Per-chip offsets in iro_arr in dwords */
+#define E4_IRO_ARR_OFFSET 0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index db926d8b3033..511ab214eb9c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -29,6 +29,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_iro_hsi.h"
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
@@ -627,10 +628,9 @@ static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -642,10 +642,9 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index dab7a5d09f87..dec2b00259d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -34,10 +34,13 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
void qed_iscsi_free(struct qed_hwfn *p_hwfn);
/**
- * @brief - Fills provided statistics struct with statistics.
+ * qed_get_protocol_stats_iscsi(): Fills provided statistics
+ * struct with statistics.
*
- * @param cdev
- * @param stats - points to struct that will be filled with statistics.
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ *
+ * Return: Void.
*/
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index fc8b3e64f153..1d1d4caad680 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -114,6 +114,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT);
+ p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT);
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
return;
@@ -1297,6 +1299,14 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
prev_weight = weight;
while (weight) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid_map to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return 0;
+
msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
weight = bitmap_weight(bmap->bitmap, bmap->max_count);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index ba8c7a31cce1..2edd6bf64a3c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -28,6 +28,7 @@
#include "qed_dev_api.h"
#include <linux/qed/qed_eth_if.h>
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_l2.h"
@@ -37,7 +38,6 @@
#include "qed_sp.h"
#include "qed_sriov.h"
-
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
@@ -904,9 +904,10 @@ qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
{
u32 init_prod_val = 0;
- *pp_prod = p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
+ *pp_prod = (u8 __iomem *)
+ p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
@@ -1111,7 +1112,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
{
int rc;
-
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
pbl_addr, pbl_size,
qed_get_cm_pq_idx_mcos(p_hwfn, tc));
@@ -2010,7 +2010,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb,
struct qed_ntuple_filter_params *p_params)
{
- struct rx_update_gft_filter_data *p_ramrod = NULL;
+ struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u16 abs_rx_q_id = 0;
@@ -2031,7 +2031,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
}
rc = qed_sp_init_request(p_hwfn, &p_ent,
- ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -2100,7 +2100,7 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
CAU_SB_ENTRY_TIMER_RES0);
address = BAR0_MAP_REG_USDM_RAM +
- USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
@@ -2134,7 +2134,7 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
CAU_SB_ENTRY_TIMER_RES1);
address = BAR0_MAP_REG_XSDM_RAM +
- XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
@@ -2848,7 +2848,7 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
-static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
+static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac)
{
int i, ret;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8eceeebb1a7b..a538cf478c14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -92,18 +92,18 @@ struct qed_filter_mcast {
};
/**
- * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
+ * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue.
*
- * @param p_hwfn
- * @param p_rxq Handler of queue to close
- * @param eq_completion_only If True completion will be on
- * EQe, if False completion will be
- * on EQe if p_hwfn opaque
- * different from the RXQ opaque
- * otherwise on CQe.
- * @param cqe_completion If True completion will be
- * receive on CQe.
- * @return int
+ * @p_hwfn: HW device data.
+ * @p_rxq: Handler of queue to close
+ * @eq_completion_only: If True completion will be on
+ * EQe, if False completion will be
+ * on EQe if p_hwfn opaque
+ * different from the RXQ opaque
+ * otherwise on CQe.
+ * @cqe_completion: If True completion will be receive on CQe.
+ *
+ * Return: Int.
*/
int
qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
@@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
bool eq_completion_only, bool cqe_completion);
/**
- * @brief qed_eth_tx_queue_stop - closes a Tx queue
+ * qed_eth_tx_queue_stop(): Closes a Tx queue.
*
- * @param p_hwfn
- * @param p_txq - handle to Tx queue needed to be closed
+ * @p_hwfn: HW device data.
+ * @p_txq: handle to Tx queue needed to be closed.
*
- * @return int
+ * Return: Int.
*/
int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
@@ -146,7 +146,6 @@ struct qed_sp_vport_start_params {
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
-
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
@@ -205,16 +204,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_vport_stop -
- *
- * This ramrod closes a VPort after all its RX and TX queues are terminated.
- * An Assert is generated if any queues are left open.
+ * qed_sp_vport_stop: This ramrod closes a VPort after all its
+ * RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
*
- * @param p_hwfn
- * @param opaque_fid
- * @param vport_id VPort ID
+ * @p_hwfn: HW device data.
+ * @opaque_fid: Opaque FID
+ * @vport_id: VPort ID.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
@@ -225,22 +223,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_rx_eth_queues_update -
+ * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue.
+ * It is used for setting the active state
+ * of the queue and updating the TPA and
+ * SGE parameters.
+ * @p_hwfn: HW device data.
+ * @pp_rxq_handlers: An array of queue handlers to be updated.
+ * @num_rxqs: number of queues to update.
+ * @complete_cqe_flg: Post completion to the CQE Ring if set.
+ * @complete_event_flg: Post completion to the Event Ring if set.
+ * @comp_mode: Comp mode.
+ * @p_comp_data: Pointer Comp data.
*
- * This ramrod updates an RX queue. It is used for setting the active state
- * of the queue and updating the TPA and SGE parameters.
+ * Return: Int.
*
- * @note At the moment - only used by non-linux VFs.
- *
- * @param p_hwfn
- * @param pp_rxq_handlers An array of queue handlers to be updated.
- * @param num_rxqs number of queues to update.
- * @param complete_cqe_flg Post completion to the CQE Ring if set
- * @param complete_event_flg Post completion to the Event Ring if set
- * @param comp_mode
- * @param p_comp_data
- *
- * @return int
+ * Note At the moment - only used by non-linux VFs.
*/
int
@@ -257,30 +254,32 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
/**
- * *@brief qed_arfs_mode_configure -
- *
- **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
- **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ * qed_arfs_mode_configure(): Enable or disable rfs mode.
+ * It must accept at least one of tcp or udp true
+ * and at least one of ipv4 or ipv6 true to enable
+ * rfs mode.
*
- **@param p_hwfn
- **@param p_ptt
- **@param p_cfg_params - arfs mode configuration parameters.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_cfg_params: arfs mode configuration parameters.
*
+ * Return. Void.
*/
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params);
/**
- * @brief - qed_configure_rfs_ntuple_filter
+ * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add
+ * or remove arfs hw filter
*
- * This ramrod should be used to add or remove arfs hw filter
+ * @p_hwfn: HW device data.
+ * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @p_params: Pointer to params.
*
- * @params p_hwfn
- * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
- * it with cookie and callback function address, if not
- * using this mode then client must pass NULL.
- * @params p_params
+ * Return: Void.
*/
int
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
@@ -374,16 +373,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
/**
- * @brief - Starts an Rx queue, when queue_cid is already prepared
+ * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is
+ * already prepared
*
- * @param p_hwfn
- * @param p_cid
- * @param bd_max_bytes
- * @param bd_chain_phys_addr
- * @param cqe_pbl_addr
- * @param cqe_pbl_size
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @bd_max_bytes: Max bytes.
+ * @bd_chain_phys_addr: Chain physcial address.
+ * @cqe_pbl_addr: PBL address.
+ * @cqe_pbl_size: PBL size.
*
- * @return int
+ * Return: Int.
*/
int
qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
@@ -393,15 +393,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
/**
- * @brief - Starts a Tx queue, where queue_cid is already prepared
+ * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is
+ * already prepared
*
- * @param p_hwfn
- * @param p_cid
- * @param pbl_addr
- * @param pbl_size
- * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @pbl_addr: PBL address.
+ * @pbl_size: PBL size.
+ * @pq_id: Parameters for choosing the PQ for this Tx queue.
*
- * @return int
+ * Return: Int.
*/
int
qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c46a7f756ed5..ed274f033626 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -28,6 +28,7 @@
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_ll2.h"
@@ -43,6 +44,8 @@
#define QED_LL2_TX_SIZE (256)
#define QED_LL2_RX_SIZE (4096)
+#define QED_LL2_INVALID_STATS_ID 0xff
+
struct qed_cb_ll2_info {
int rx_cnt;
u32 rx_size;
@@ -62,6 +65,29 @@ struct qed_ll2_buffer {
dma_addr_t phys_addr;
};
+static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn,
+ u8 ll2_queue_type, u8 qid)
+{
+ u8 stats_id;
+
+ /* For legacy (RAM based) queues, the stats_id will be set as the
+ * queue_id. Otherwise (context based queue), it will be set to
+ * the "abs_pf_id" offset from the end of the RAM based queue IDs.
+ * If the final value exceeds the total counters amount, return
+ * INVALID value to indicate that the stats for this connection should
+ * be disabled.
+ */
+ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
+ stats_id = qid;
+ else
+ stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id;
+
+ if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS)
+ return stats_id;
+ else
+ return QED_LL2_INVALID_STATS_ID;
+}
+
static void qed_ll2b_complete_tx_packet(void *cxt,
u8 connection_handle,
void *cookie,
@@ -106,7 +132,7 @@ static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
}
static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
- struct qed_ll2_buffer *buffer)
+ struct qed_ll2_buffer *buffer)
{
spin_lock_bh(&cdev->ll2->lock);
@@ -352,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
num_bds_in_packet = p_pkt->bd_used;
list_del(&p_pkt->list_entry);
- if (num_bds < num_bds_in_packet) {
+ if (unlikely(num_bds < num_bds_in_packet)) {
DP_NOTICE(p_hwfn,
"Rest of BDs does not cover whole packet\n");
goto out;
@@ -462,7 +488,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
if (!list_empty(&p_rx->active_descq))
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
- if (!p_pkt) {
+ if (unlikely(!p_pkt)) {
DP_NOTICE(p_hwfn,
"[%d] LL2 Rx completion but active_descq is empty\n",
p_ll2_conn->input.conn_type);
@@ -475,7 +501,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
else
qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
- if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd))
DP_NOTICE(p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
@@ -597,18 +623,18 @@ static bool
qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
struct core_rx_slow_path_cqe *p_cqe)
{
- struct ooo_opaque *iscsi_ooo;
+ struct ooo_opaque *ooo_opq;
u32 cid;
if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
return false;
- iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
- if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
+ ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data;
+ if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES)
return false;
/* Need to make a flush */
- cid = le32_to_cpu(iscsi_ooo->cid);
+ cid = le32_to_cpu(ooo_opq->cid);
qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
return true;
@@ -624,7 +650,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
union core_rx_cqe_union *cqe = NULL;
u16 cq_new_idx = 0, cq_old_idx = 0;
struct qed_ooo_buffer *p_buffer;
- struct ooo_opaque *iscsi_ooo;
+ struct ooo_opaque *ooo_opq;
u8 placement_offset = 0;
u8 cqe_type;
@@ -645,7 +671,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
&cqe->rx_cqe_sp))
continue;
- if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+ if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) {
DP_NOTICE(p_hwfn,
"Got a non-regular LB LL2 completion [type 0x%02x]\n",
cqe_type);
@@ -657,22 +683,21 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
packet_length = le16_to_cpu(p_cqe_fp->packet_length);
vlan = le16_to_cpu(p_cqe_fp->vlan);
- iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
- qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
- iscsi_ooo);
- cid = le32_to_cpu(iscsi_ooo->cid);
+ ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
+ qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq);
+ cid = le32_to_cpu(ooo_opq->cid);
/* Process delete isle first */
- if (iscsi_ooo->drop_size)
+ if (ooo_opq->drop_size)
qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
- iscsi_ooo->drop_isle,
- iscsi_ooo->drop_size);
+ ooo_opq->drop_isle,
+ ooo_opq->drop_size);
- if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
+ if (ooo_opq->ooo_opcode == TCP_EVENT_NOP)
continue;
/* Now process create/add/join isles */
- if (list_empty(&p_rx->active_descq)) {
+ if (unlikely(list_empty(&p_rx->active_descq))) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX chain has no submitted buffers\n"
);
@@ -682,12 +707,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
- if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
- if (!p_pkt) {
+ if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN ||
+ ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) {
+ if (unlikely(!p_pkt)) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX packet is not valid\n");
return -EIO;
@@ -701,19 +726,19 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_chain_consume(&p_rx->rxq_chain);
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
- switch (iscsi_ooo->ooo_opcode) {
+ switch (ooo_opq->ooo_opcode) {
case TCP_EVENT_ADD_NEW_ISLE:
qed_ooo_add_new_isle(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer);
break;
case TCP_EVENT_ADD_ISLE_RIGHT:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer,
QED_OOO_RIGHT_BUF);
break;
@@ -721,7 +746,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer,
QED_OOO_LEFT_BUF);
break;
@@ -729,13 +754,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle +
- 1,
+ ooo_opq->ooo_isle + 1,
p_buffer,
QED_OOO_LEFT_BUF);
qed_ooo_join_isles(p_hwfn,
p_hwfn->p_ooo_info,
- cid, iscsi_ooo->ooo_isle);
+ cid, ooo_opq->ooo_isle);
break;
case TCP_EVENT_ADD_PEN:
num_ooo_add_to_peninsula++;
@@ -747,7 +771,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
} else {
DP_NOTICE(p_hwfn,
"Unexpected event (%d) TX OOO completion\n",
- iscsi_ooo->ooo_opcode);
+ ooo_opq->ooo_opcode);
}
}
@@ -859,16 +883,16 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return 0;
- if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn)))
return 0;
new_idx = le16_to_cpu(*p_tx->p_fw_cons);
num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
- if (!num_bds)
+ if (unlikely(!num_bds))
return 0;
while (num_bds) {
@@ -877,10 +901,10 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
p_pkt = list_first_entry(&p_tx->active_descq,
struct qed_ll2_tx_packet, list_entry);
- if (!p_pkt)
+ if (unlikely(!p_pkt))
return -EINVAL;
- if (p_pkt->bd_used != 1) {
+ if (unlikely(p_pkt->bd_used != 1)) {
DP_NOTICE(p_hwfn,
"Unexpectedly many BDs(%d) in TX OOO completion\n",
p_pkt->bd_used);
@@ -1008,7 +1032,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
- if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
+ if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO))
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
@@ -1124,6 +1148,7 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+
qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
/* Get SPQ entry */
@@ -1533,7 +1558,7 @@ static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
- struct e4_core_conn_context *p_cxt;
+ struct core_conn_context *p_cxt;
struct qed_ll2_tx_packet *p_pkt;
struct qed_ll2_info *p_ll2_conn;
struct qed_hwfn *p_hwfn = cxt;
@@ -1544,7 +1569,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
int rc = -EINVAL;
u32 i, capacity;
size_t desc_size;
- u8 qid;
+ u8 qid, stats_id;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
@@ -1610,16 +1635,32 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
p_ll2_conn->input.rx_conn_type);
+ stats_id = qed_ll2_handle_to_stats_id(p_hwfn,
+ p_ll2_conn->input.rx_conn_type,
+ qid);
p_ll2_conn->queue_id = qid;
- p_ll2_conn->tx_stats_id = qid;
+ p_ll2_conn->tx_stats_id = stats_id;
- DP_VERBOSE(p_hwfn, QED_MSG_LL2,
- "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
- p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
+ /* If there is no valid stats id for this connection, disable stats */
+ if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) {
+ p_ll2_conn->tx_stats_en = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Disabling stats for queue %d - not enough counters\n",
+ qid);
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n",
+ p_hwfn->rel_pf_id,
+ p_ll2_conn->input.rx_conn_type, qid, stats_id);
if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
- p_rx->set_prod_addr = p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ p_rx->set_prod_addr =
+ (u8 __iomem *)p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_LL2_RX_PRODS, qid);
} else {
/* QED_LL2_RX_TYPE_CTX - using doorbell */
p_rx->ctx_based = 1;
@@ -1762,7 +1803,7 @@ int qed_ll2_post_rx_buffer(void *cxt,
}
}
- /* If we're lacking entires, let's try to flush buffers to FW */
+ /* If we're lacking entries, let's try to flush buffers to FW */
if (!p_curp || !p_curb) {
rc = -EBUSY;
p_curp = NULL;
@@ -1842,8 +1883,8 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
}
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
- p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
+ if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) {
start_bd->nw_vlan_or_lb_echo =
cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
} else {
@@ -1964,28 +2005,29 @@ int qed_ll2_prepare_tx_packet(void *cxt,
int rc = 0;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return -EINVAL;
p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain;
- if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
+ if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet))
return -EIO;
spin_lock_irqsave(&p_tx->lock, flags);
- if (p_tx->cur_send_packet) {
+ if (unlikely(p_tx->cur_send_packet)) {
rc = -EEXIST;
goto out;
}
/* Get entry, but only if we have tx elements for it */
- if (!list_empty(&p_tx->free_descq))
+ if (unlikely(!list_empty(&p_tx->free_descq)))
p_curp = list_first_entry(&p_tx->free_descq,
struct qed_ll2_tx_packet, list_entry);
- if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
+ if (unlikely(p_curp &&
+ qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds))
p_curp = NULL;
- if (!p_curp) {
+ if (unlikely(!p_curp)) {
rc = -EBUSY;
goto out;
}
@@ -2014,16 +2056,16 @@ int qed_ll2_set_fragment_of_tx_packet(void *cxt,
unsigned long flags;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return -EINVAL;
- if (!p_ll2_conn->tx_queue.cur_send_packet)
+ if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet))
return -EINVAL;
p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
- if (cur_send_frag_num >= p_cur_send_packet->bd_used)
+ if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used))
return -EINVAL;
/* Fill the BD information, and possibly notify FW */
@@ -2609,7 +2651,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
DP_NOTICE(cdev, "Failed to add an LLH filter\n");
goto err3;
}
-
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
@@ -2651,7 +2692,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
*/
nr_frags = skb_shinfo(skb)->nr_frags;
- if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) {
DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1 + nr_frags);
return -EINVAL;
@@ -2693,7 +2734,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
*/
rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
&pkt, 1);
- if (rc)
+ if (unlikely(rc))
goto err;
for (i = 0; i < nr_frags; i++) {
@@ -2717,7 +2758,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
/* if failed not much to do here, partial packet has been posted
* we can't free memory, will need to wait for completion
*/
- if (rc)
+ if (unlikely(rc))
goto err2;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index df88d00053a2..0bfc375161ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -32,7 +32,6 @@
#define QED_LL2_LEGACY_CONN_BASE_PF 0
#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
-
struct qed_ll2_rx_packet {
struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd;
@@ -119,41 +118,41 @@ struct qed_ll2_info {
extern const struct qed_ll2_ops qed_ll2_ops_pass;
/**
- * @brief qed_ll2_acquire_connection - allocate resources,
- * starts rx & tx (if relevant) queues pair. Provides
- * connecion handler as output parameter.
+ * qed_ll2_acquire_connection(): Allocate resources,
+ * starts rx & tx (if relevant) queues pair.
+ * Provides connecion handler as output
+ * parameter.
*
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @data: Describes connection parameters.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param data - describes connection parameters
- * @return int
+ * Return: Int.
*/
int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
/**
- * @brief qed_ll2_establish_connection - start previously
- * allocated LL2 queues pair
+ * qed_ll2_establish_connection(): start previously allocated LL2 queues pair
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param p_ptt
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
+ * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param addr rx (physical address) buffers to submit
- * @param cookie
- * @param notify_fw produce corresponding Rx BD immediately
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: RX (physical address) buffers to submit.
+ * @buf_len: Buffer Len.
+ * @cookie: Cookie.
+ * @notify_fw: Produce corresponding Rx BD immediately.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle,
@@ -161,15 +160,15 @@ int qed_ll2_post_rx_buffer(void *cxt,
u16 buf_len, void *cookie, u8 notify_fw);
/**
- * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
- * to prepare Tx packet submission to FW.
+ * qed_ll2_prepare_tx_packet(): Request for start Tx BD
+ * to prepare Tx packet submission to FW.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle
- * @param pkt - info regarding the tx packet
- * @param notify_fw - issue doorbell to fw for this packet
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: Connection handle.
+ * @pkt: Info regarding the tx packet.
+ * @notify_fw: Issue doorbell to fw for this packet.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle,
@@ -177,81 +176,83 @@ int qed_ll2_prepare_tx_packet(void *cxt,
bool notify_fw);
/**
- * @brief qed_ll2_release_connection - releases resources
- * allocated for LL2 connection
+ * qed_ll2_release_connection(): Releases resources allocated for LL2
+ * connection.
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
+ * Return: Void.
*/
void qed_ll2_release_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
- * Tx BD of BDs requested by
- * qed_ll2_prepare_tx_packet
+ * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill
+ * Tx BD of BDs requested by
+ * qed_ll2_prepare_tx_packet
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle
- * obtained from
- * qed_ll2_require_connection
- * @param addr
- * @param nbytes
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: Address.
+ * @nbytes: Number of bytes.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle,
dma_addr_t addr, u16 nbytes);
/**
- * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
- *
+ * qed_ll2_terminate_connection(): Stops Tx/Rx queues
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle
- * obtained from
- * qed_ll2_require_connection
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_get_stats - get LL2 queue's statistics
+ * qed_ll2_get_stats(): Get LL2 queue's statistics
*
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @p_stats: Pointer Status.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param p_stats
- *
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats);
/**
- * @brief qed_ll2_alloc - Allocates LL2 connections set
+ * qed_ll2_alloc(): Allocates LL2 connections set.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ll2_setup - Inits LL2 connections set
+ * qed_ll2_setup(): Inits LL2 connections set.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*
*/
void qed_ll2_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ll2_free - Releases LL2 connections set
+ * qed_ll2_free(): Releases LL2 connections set
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 15ef59aa34ff..7673b3e07736 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -99,10 +99,6 @@ static const u32 qed_mfw_ext_10g[] __initconst = {
ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
};
-static const u32 qed_mfw_ext_20g[] __initconst = {
- ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
-};
-
static const u32 qed_mfw_ext_25g[] __initconst = {
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
@@ -148,7 +144,6 @@ static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
- QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
@@ -262,7 +257,7 @@ module_exit(qed_exit);
/* Check if the DMA controller on the machine can properly handle the DMA
* addressing required by the device.
-*/
+ */
static int qed_set_coherency_mask(struct qed_dev *cdev)
{
struct device *dev = &cdev->pdev->dev;
@@ -547,7 +542,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
goto err2;
}
- DP_INFO(cdev, "qed_probe completed successfully\n");
+ DP_INFO(cdev, "%s completed successfully\n", __func__);
return cdev;
@@ -980,7 +975,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
rc = qed_set_int_mode(cdev, false);
if (rc) {
- DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+ DP_ERR(cdev, "%s ERR\n", __func__);
return rc;
}
@@ -1161,6 +1156,7 @@ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
/* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(wq_flag, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
@@ -1299,6 +1295,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
} else {
DP_NOTICE(cdev,
"Failed to acquire PTT for aRFS\n");
+ rc = -EINVAL;
goto err;
}
}
@@ -1381,7 +1378,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
(params->drv_minor << 16) |
(params->drv_rev << 8) |
(params->drv_eng);
- strlcpy(drv_version.name, params->name,
+ strscpy(drv_version.name, params->name,
MCP_DRV_VER_STR_SIZE - 4);
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
@@ -2891,7 +2888,7 @@ static int qed_update_drv_state(struct qed_dev *cdev, bool active)
return status;
}
-static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
+static int qed_update_mac(struct qed_dev *cdev, const u8 *mac)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
@@ -3078,8 +3075,10 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
"Scheduling slowpath task [Flag: %d]\n",
QED_SLOWPATH_MFW_TLV_REQ);
+ /* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
@@ -3158,3 +3157,8 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
return 0;
}
+
+unsigned long qed_get_epoch_time(void)
+{
+ return ktime_get_real_seconds();
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 24cd41567775..64678a256f3b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -17,6 +17,7 @@
#include "qed_cxt.h"
#include "qed_dcbx.h"
#include "qed_hsi.h"
+#include "qed_mfw_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -30,11 +31,11 @@
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
- qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \
_val)
#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
- qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)))
#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
@@ -384,7 +385,7 @@ qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
/* Get the union data */
- if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
+ if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) {
u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb,
union_data);
@@ -410,7 +411,7 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb, union_data);
memset(&union_data, 0, sizeof(union_data));
- if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
+ if (p_mb_params->p_data_src && p_mb_params->data_src_size)
memcpy(&union_data, p_mb_params->p_data_src,
p_mb_params->data_src_size);
qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
@@ -671,7 +672,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep)
{
struct qed_mcp_mb_params mb_params;
u8 raw_data[MCP_DRV_NVM_BUF_LEN];
@@ -684,6 +686,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
/* Use the maximal value since the actual one is part of the response */
mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+ if (b_can_sleep)
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
@@ -916,7 +920,6 @@ enum qed_load_req_force {
};
static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
-
enum qed_load_req_force force_cmd,
u8 *p_mfw_force_cmd)
{
@@ -1526,15 +1529,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
ext_speed = 0;
if (params->ext_speed.autoneg)
- ext_speed |= ETH_EXT_SPEED_AN;
+ ext_speed |= ETH_EXT_SPEED_NONE;
val = params->ext_speed.forced_speed;
if (val & QED_EXT_SPEED_1G)
ext_speed |= ETH_EXT_SPEED_1G;
if (val & QED_EXT_SPEED_10G)
ext_speed |= ETH_EXT_SPEED_10G;
- if (val & QED_EXT_SPEED_20G)
- ext_speed |= ETH_EXT_SPEED_20G;
if (val & QED_EXT_SPEED_25G)
ext_speed |= ETH_EXT_SPEED_25G;
if (val & QED_EXT_SPEED_40G)
@@ -1560,8 +1561,6 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
ext_speed |= ETH_EXT_ADV_SPEED_1G;
if (val & QED_EXT_SPEED_MASK_10G)
ext_speed |= ETH_EXT_ADV_SPEED_10G;
- if (val & QED_EXT_SPEED_MASK_20G)
- ext_speed |= ETH_EXT_ADV_SPEED_20G;
if (val & QED_EXT_SPEED_MASK_25G)
ext_speed |= ETH_EXT_ADV_SPEED_25G;
if (val & QED_EXT_SPEED_MASK_40G)
@@ -2081,7 +2080,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id)
{
- u32 global_offsize;
+ u32 global_offsize, public_base;
if (IS_VF(p_hwfn->cdev)) {
if (p_hwfn->vf_iov_info) {
@@ -2098,16 +2097,16 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
}
}
+ public_base = p_hwfn->mcp_info->public_base;
global_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_hwfn->
- mcp_info->public_base,
+ SECTION_OFFSIZE_ADDR(public_base,
PUBLIC_GLOBAL));
*p_mfw_ver =
qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize,
0) + offsetof(struct public_global, mfw_ver));
- if (p_running_bundle_id != NULL) {
+ if (p_running_bundle_id) {
*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global,
@@ -2209,6 +2208,7 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
return 0;
}
+
static bool qed_is_transceiver_ready(u32 transceiver_state,
u32 transceiver_type)
{
@@ -2378,7 +2378,7 @@ qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"According to Legacy capabilities, L2 personality is %08x\n",
- (u32) *p_proto);
+ (u32)*p_proto);
}
static int
@@ -2423,7 +2423,7 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFUP,
"According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
- (u32) *p_proto, resp, param);
+ (u32)*p_proto, resp, param);
return 0;
}
@@ -2445,9 +2445,6 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
- case FUNC_MF_CFG_PROTOCOL_NVMETCP:
- *p_proto = QED_PCI_NVMETCP;
- break;
case FUNC_MF_CFG_PROTOCOL_FCOE:
*p_proto = QED_PCI_FCOE;
break;
@@ -2854,7 +2851,7 @@ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
}
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *mac)
+ struct qed_ptt *p_ptt, const u8 *mac)
{
struct qed_mcp_mb_params mb_params;
u32 mfw_mac[2];
@@ -3026,7 +3023,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param,
&read_len,
- (u32 *)(p_buf + offset));
+ (u32 *)(p_buf + offset), false);
if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
@@ -3034,7 +3031,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
}
/* This can be a lengthy process, and it's possible scheduler
- * isn't preemptable. Sleep a bit to prevent CPU hogging.
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
*/
if (bytes_left % 0x1000 <
(bytes_left - read_len) % 0x1000)
@@ -3129,10 +3126,12 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
* to be delivered to MFW.
*/
if (param && cmd == QED_PUT_FILE_DATA) {
- buf_idx = QED_MFW_GET_FIELD(param,
- FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
- buf_size = QED_MFW_GET_FIELD(param,
- FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
+ buf_idx =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
+ buf_size =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
} else {
buf_idx += buf_size;
buf_size = min_t(u32, (len - buf_idx),
@@ -3176,7 +3175,7 @@ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_TRANSCEIVER_READ,
nvm_offset, &resp, &param, &buf_size,
- (u32 *)(p_buf + offset));
+ (u32 *)(p_buf + offset), true);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to send a transceiver read command to the MFW. rc = %d.\n",
@@ -3275,7 +3274,7 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
DRV_MSG_CODE_BIST_TEST, param,
&resp, &resp_param,
&buf_size,
- (u32 *)p_image_att);
+ (u32 *)p_image_att, false);
if (rc)
return rc;
@@ -3388,7 +3387,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
type = NVM_TYPE_DEFAULT_CFG;
break;
case QED_NVM_IMAGE_NVM_META:
- type = NVM_TYPE_META;
+ type = NVM_TYPE_NVM_META;
break;
default:
DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
@@ -3905,10 +3904,6 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
- if (QED_IS_E5(p_hwfn->cdev))
- features |=
- DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
-
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
}
@@ -4002,7 +3997,8 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_GET_NVM_CFG_OPTION,
- mb_param, &resp, &param, p_len, (u32 *)p_buf);
+ mb_param, &resp, &param, p_len,
+ (u32 *)p_buf, false);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 8edb450d0abf..564723800d15 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -266,97 +266,97 @@ union qed_mfw_tlv_data {
#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
/**
- * @brief - returns the link params of the hw function
+ * qed_mcp_get_link_params(): Returns the link params of the hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link params
+ * Returns: Pointer to link params.
*/
-struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link state of the hw function
+ * qed_mcp_get_link_state(): Return the link state of the hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link state
+ * Returns: Pointer to link state.
*/
-struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link capabilities of the hw function
+ * qed_mcp_get_link_capabilities(): Return the link capabilities of the
+ * hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link capabilities
+ * Returns: Pointer to link capabilities.
*/
struct qed_mcp_link_capabilities
*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
/**
- * @brief Request the MFW to set the the link according to 'link_input'.
+ * qed_mcp_set_link(): Request the MFW to set the link according
+ * to 'link_input'.
*
- * @param p_hwfn
- * @param p_ptt
- * @param b_up - raise link if `true'. Reset link if `false'.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_up: Raise link if `true'. Reset link if `false'.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_up);
/**
- * @brief Get the management firmware version value
+ * qed_mcp_get_mfw_ver(): Get the management firmware version value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mfw_ver - mfw version value
- * @param p_running_bundle_id - image id in nvram; Optional.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mfw_ver: MFW version value.
+ * @p_running_bundle_id: Image id in nvram; Optional.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - operation was successful.
*/
int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
- * @brief Get the MBI version value
+ * qed_mcp_get_mbi_ver(): Get the MBI version value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mbi_ver: A pointer to a variable to be filled with the MBI version.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - operation was successful.
*/
int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_mbi_ver);
/**
- * @brief Get media type value of the port.
+ * qed_mcp_get_media_type(): Get media type value of the port.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param mfw_ver - media type value
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @media_type: Media type value
*
- * @return int -
- * 0 - Operation was successul.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *media_type);
/**
- * @brief Get transceiver data of the port.
+ * qed_mcp_get_transceiver_data(): Get transceiver data of the port.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_transceiver_state - transceiver state.
- * @param p_transceiver_type - media type value
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_transceiver_state: Transceiver state.
+ * @p_tranceiver_type: Media type value.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
u32 *p_tranceiver_type);
/**
- * @brief Get transceiver supported speed mask.
+ * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_speed_mask - Bit mask of all supported speeds.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_speed_mask: Bit mask of all supported speeds.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_speed_mask);
/**
- * @brief Get board configuration.
+ * qed_mcp_get_board_config(): Get board configuration.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_board_config - Board config.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_board_config: Board config.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_board_config);
/**
- * @brief General function for sending commands to the MCP
- * mailbox. It acquire mutex lock for the entire
- * operation, from sending the request until the MCP
- * response. Waiting for MCP response will be checked up
- * to 5 seconds every 5ms.
+ * qed_mcp_cmd(): General function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 5ms.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param cmd - command to be sent to the MCP.
- * @param param - Optional param
- * @param o_mcp_resp - The MCP response code (exclude sequence).
- * @param o_mcp_param- Optional parameter provided by the MCP
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @cmd: command to be sent to the MCP.
+ * @param: Optional param
+ * @o_mcp_resp: The MCP response code (exclude sequence).
+ * @o_mcp_param: Optional parameter provided by the MCP
* response
- * @return int - 0 - operation
- * was successul.
+ *
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param);
/**
- * @brief - drains the nig, allowing completion to pass in case of pauses.
- * (Should be called only from sleepable context)
+ * qed_mcp_drain(): drains the nig, allowing completion to pass in
+ * case of pauses.
+ * (Should be called only from sleepable context)
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int.
*/
int qed_mcp_drain(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Get the flash size value
+ * qed_mcp_get_flash_size(): Get the flash size value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_flash_size - flash size in bytes to be filled.
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_flash_size: Flash size in bytes to be filled.
*
- * @return int - 0 - operation was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_flash_size);
/**
- * @brief Send driver version to MFW
+ * qed_mcp_send_drv_version(): Send driver version to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param version - Version value
- * @param name - Protocol driver name
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_ver: Version value.
*
- * @return int - 0 - operation was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
@@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_mcp_drv_version *p_ver);
/**
- * @brief Read the MFW process kill counter
+ * qed_get_process_kill_counter(): Read the MFW process kill counter.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return u32
+ * Return: u32.
*/
u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Trigger a recovery process
+ * qed_start_recovery_process(): Trigger a recovery process.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int
+ * Return: Int.
*/
int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief A recovery handler must call this function as its first step.
- * It is assumed that the handler is not run from an interrupt context.
+ * qed_recovery_prolog(): A recovery handler must call this function
+ * as its first step.
+ * It is assumed that the handler is not run from
+ * an interrupt context.
*
- * @param cdev
- * @param p_ptt
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: int.
*/
int qed_recovery_prolog(struct qed_dev *cdev);
/**
- * @brief Notify MFW about the change in base device properties
+ * qed_mcp_ov_update_current_config(): Notify MFW about the change in base
+ * device properties
*
- * @param p_hwfn
- * @param p_ptt
- * @param client - qed client type
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @client: Qed client type.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_client client);
/**
- * @brief Notify MFW about the driver state
+ * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state.
*
- * @param p_hwfn
- * @param p_ptt
- * @param drv_state - Driver state
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @drv_state: Driver state.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_driver_state drv_state);
/**
- * @brief Send MTU size to MFW
+ * qed_mcp_ov_update_mtu(): Send MTU size to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mtu - MTU size
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mtu: MTU size.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 mtu);
/**
- * @brief Send MAC address to MFW
+ * qed_mcp_ov_update_mac(): Send MAC address to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mac - MAC address
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mac: MAC address.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *mac);
+ struct qed_ptt *p_ptt, const u8 *mac);
/**
- * @brief Send WOL mode to MFW
+ * qed_mcp_ov_update_wol(): Send WOL mode to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param wol - WOL mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @wol: WOL mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_wol wol);
/**
- * @brief Set LED status
+ * qed_mcp_set_led(): Set LED status.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mode - LED mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mode: LED mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
/**
- * @brief Read from nvm
+ * qed_mcp_nvm_read(): Read from NVM.
*
- * @param cdev
- * @param addr - nvm offset
- * @param p_buf - nvm read buffer
- * @param len - buffer len
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @p_buf: NVM read buffer.
+ * @len: Buffer len.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Write to nvm
+ * qed_mcp_nvm_write(): Write to NVM.
*
- * @param cdev
- * @param addr - nvm offset
- * @param cmd - nvm command
- * @param p_buf - nvm write buffer
- * @param len - buffer len
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @cmd: NVM command.
+ * @p_buf: NVM write buffer.
+ * @len: Buffer len.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Check latest response
+ * qed_mcp_nvm_resp(): Check latest response.
*
- * @param cdev
- * @param p_buf - nvm write buffer
+ * @cdev: Qed dev pointer.
+ * @p_buf: NVM write buffer.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
@@ -604,13 +606,13 @@ struct qed_nvm_image_att {
};
/**
- * @brief Allows reading a whole nvram image
+ * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image.
*
- * @param p_hwfn
- * @param image_id - image to get attributes for
- * @param p_image_att - image attributes structure into which to fill data
+ * @p_hwfn: HW device data.
+ * @image_id: Image to get attributes for.
+ * @p_image_att: Image attributes structure into which to fill data.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
@@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
struct qed_nvm_image_att *p_image_att);
/**
- * @brief Allows reading a whole nvram image
+ * qed_mcp_get_nvm_image(): Allows reading a whole nvram image.
*
- * @param p_hwfn
- * @param image_id - image requested for reading
- * @param p_buffer - allocated buffer into which to fill data
- * @param buffer_len - length of the allocated buffer.
+ * @p_hwfn: HW device data.
+ * @image_id: image requested for reading.
+ * @p_buffer: allocated buffer into which to fill data.
+ * @buffer_len: length of the allocated buffer.
*
- * @return 0 iff p_buffer now contains the nvram image.
+ * Return: 0 if p_buffer now contains the nvram image.
*/
int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
enum qed_nvm_images image_id,
u8 *p_buffer, u32 buffer_len);
/**
- * @brief Bist register test
+ * qed_mcp_bist_register_test(): Bist register test.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Bist clock test
+ * qed_mcp_bist_clock_test(): Bist clock test.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Bist nvm test - get number of images
+ * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param num_images - number of images if operation was
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @num_images: number of images if operation was
* successful. 0 if not.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *num_images);
/**
- * @brief Bist nvm test - get image attributes by index
+ * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes
+ * by index.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_image_att - Attributes of image
- * @param image_index - Index of image to get information for
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_image_att: Attributes of image.
+ * @image_index: Index of image to get information for.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
u32 image_index);
/**
- * @brief - Processes the TLV request from MFW i.e., get the required TLV info
- * from the qed client and send it to the MFW.
+ * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e.,
+ * get the required TLV info
+ * from the qed client and send it to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Send raw debug data to the MFW
+ * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_buf: raw debug data buffer.
+ * @size: Buffer size.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_buf - raw debug data buffer
- * @param size - buffer size
+ * Return : Int.
*/
int
qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
@@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
}
/**
- * @brief Initialize the interface with the MCP
+ * qed_mcp_cmd_init(): Initialize the interface with the MCP.
*
- * @param p_hwfn - HW func
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Initialize the port interface with the MCP
+ * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*
- * @param p_hwfn
- * @param p_ptt
* Can only be called after `num_ports_in_engines' is set
*/
void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Releases resources allocated during the init process.
+ * qed_mcp_free(): Releases resources allocated during the init process.
*
- * @param p_hwfn - HW func
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW function.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_free(struct qed_hwfn *p_hwfn);
/**
- * @brief This function is called from the DPC context. After
- * pointing PTT to the mfw mb, check for events sent by the MCP
- * to the driver and ack them. In case a critical event
- * detected, it will be handled here, otherwise the work will be
- * queued to a sleepable work-queue.
+ * qed_mcp_handle_events(): This function is called from the DPC context.
+ * After pointing PTT to the mfw mb, check for events sent by
+ * the MCP to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @p_hwfn: HW function.
+ * @p_ptt: PTT required for register access.
*
- * @param p_hwfn - HW function
- * @param p_ptt - PTT required for register access
- * @return int - 0 - operation
- * was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -858,169 +866,177 @@ struct qed_load_req_params {
};
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
- * returns whether this PF is the first on the engine/port or function.
+ * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the
+ * operation succeeds, returns whether this PF is
+ * the first on the engine/port or function.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_load_req_params *p_params);
/**
- * @brief Sends a LOAD_DONE message to the MFW
+ * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Sends a UNLOAD_REQ message to the MFW
+ * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Sends a UNLOAD_DONE message to the MFW
+ * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Read the MFW mailbox into Current buffer.
+ * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Ack to mfw that driver finished FLR process for VFs
+ * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs
*
- * @param p_hwfn
- * @param p_ptt
- * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vfs_to_ack: bit mask of all engine VFs for which the PF acks.
*
- * @param return int - 0 upon success.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack);
/**
- * @brief - calls during init to read shmem of all function-related info.
+ * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of
+ * all function-related info.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Reset the MCP using mailbox command.
+ * qed_mcp_reset(): Reset the MCP using mailbox command.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Sends an NVM read command request to the MFW to get
- * a buffer.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
- * DRV_MSG_CODE_NVM_READ_NVRAM commands
- * @param param - [0:23] - Offset [24:31] - Size
- * @param o_mcp_resp - MCP response
- * @param o_mcp_param - MCP response param
- * @param o_txn_size - Buffer size output
- * @param o_buf - Pointer to the buffer returned by the MFW.
+ * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get
+ * a buffer.
*
- * @param return 0 upon success.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands.
+ * @param: [0:23] - Offset [24:31] - Size.
+ * @o_mcp_resp: MCP response.
+ * @o_mcp_param: MCP response param.
+ * @o_txn_size: Buffer size output.
+ * @o_buf: Pointer to the buffer returned by the MFW.
+ * @b_can_sleep: Can sleep.
+ *
+ * Return: 0 upon success.
*/
int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep);
/**
- * @brief Read from sfp
+ * qed_mcp_phy_sfp_read(): Read from sfp.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param port - transceiver port
- * @param addr - I2C address
- * @param offset - offset in sfp
- * @param len - buffer length
- * @param p_buf - buffer to read into
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @port: transceiver port.
+ * @addr: I2C address.
+ * @offset: offset in sfp.
+ * @len: buffer length.
+ * @p_buf: buffer to read into.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
/**
- * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info]
+ * are accessible
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return true iff MFW is running and mcp_info is initialized
+ * Return: true if MFW is running and mcp_info is initialized.
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
/**
- * @brief request MFW to configure MSI-X for a VF
+ * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF.
*
- * @param p_hwfn
- * @param p_ptt
- * @param vf_id - absolute inside engine
- * @param num_sbs - number of entries to request
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vf_id: absolute inside engine.
+ * @num: number of entries to request.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
/**
- * @brief - Halt the MCP.
+ * qed_mcp_halt(): Halt the MCP.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief - Wake up the MCP.
+ * qed_mcp_resume: Wake up the MCP.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
@@ -1038,13 +1054,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
-/* @brief - Gets the mdump retained data from the MFW.
+/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mdump_retain
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mdump_retain: mdump retain.
*
- * @param return 0 upon success.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
@@ -1052,15 +1068,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
struct mdump_retain_data_stc *p_mdump_retain);
/**
- * @brief - Sets the MFW's max value for the given resource
+ * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource.
*
- * @param p_hwfn
- * @param p_ptt
- * @param res_id
- * @param resc_max_val
- * @param p_mcp_resp
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: RES ID.
+ * @resc_max_val: Resec max val.
+ * @p_mcp_resp: MCP Resp
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
@@ -1069,16 +1085,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
u32 resc_max_val, u32 *p_mcp_resp);
/**
- * @brief - Gets the MFW allocation info for the given resource
+ * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given
+ * resource.
*
- * @param p_hwfn
- * @param p_ptt
- * @param res_id
- * @param p_mcp_resp
- * @param p_resc_num
- * @param p_resc_start
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: Res ID.
+ * @p_mcp_resp: MCP resp.
+ * @p_resc_num: Resc num.
+ * @p_resc_start: Resc start.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
@@ -1087,13 +1104,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
/**
- * @brief Send eswitch mode to MFW
+ * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param eswitch - eswitch mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @eswitch: eswitch mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1113,12 +1130,12 @@ enum qed_resc_lock {
};
/**
- * @brief - Initiates PF FLR
+ * qed_mcp_initiate_pf_flr(): Initiates PF FLR.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
struct qed_resc_lock_params {
@@ -1151,13 +1168,13 @@ struct qed_resc_lock_params {
};
/**
- * @brief Acquires MFW generic resource lock
+ * qed_mcp_resc_lock(): Acquires MFW generic resource lock.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
@@ -1175,13 +1192,13 @@ struct qed_resc_unlock_params {
};
/**
- * @brief Releases MFW generic resource lock
+ * qed_mcp_resc_unlock(): Releases MFW generic resource lock.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
@@ -1189,12 +1206,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
struct qed_resc_unlock_params *p_params);
/**
- * @brief - default initialization for lock/unlock resource structs
+ * qed_mcp_resc_lock_default_init(): Default initialization for
+ * lock/unlock resource structs.
+ *
+ * @p_lock: lock params struct to be initialized; Can be NULL.
+ * @p_unlock: unlock params struct to be initialized; Can be NULL.
+ * @resource: the requested resource.
+ * @b_is_permanent: disable retries & aging when set.
*
- * @param p_lock - lock params struct to be initialized; Can be NULL
- * @param p_unlock - unlock params struct to be initialized; Can be NULL
- * @param resource - the requested resource
- * @paral b_is_permanent - disable retries & aging when set
+ * Return: Void.
*/
void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
struct qed_resc_unlock_params *p_unlock,
@@ -1202,94 +1222,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
resource, bool b_is_permanent);
/**
- * @brief - Return whether management firmware support smart AN
+ * qed_mcp_is_smart_an_supported(): Return whether management firmware
+ * support smart AN
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return bool - true if feature is supported.
+ * Return: bool true if feature is supported.
*/
bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
/**
- * @brief Learn of supported MFW features; To be done during early init
+ * qed_mcp_get_capabilities(): Learn of supported MFW features;
+ * To be done during early init.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Int.
*/
int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Inform MFW of set of features supported by driver. Should be done
- * inside the content of the LOAD_REQ.
+ * qed_mcp_set_capabilities(): Inform MFW of set of features supported
+ * by driver. Should be done inside the content
+ * of the LOAD_REQ.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Read ufp config from the shared memory.
+ * qed_mcp_read_ufp_config(): Read ufp config from the shared memory.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Populate the nvm info shadow in the given hardware function
+ * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Int.
*/
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
/**
- * @brief Delete nvm info shadow in the given hardware function
+ * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
/**
- * @brief Get the engine affinity configuration.
+ * qed_mcp_get_engine_config(): Get the engine affinity configuration.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Int.
*/
int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Get the PPFID bitmap.
+ * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Get NVM config attribute value.
+ * qed_mcp_nvm_get_cfg(): Get NVM config attribute value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param option_id
- * @param entity_id
- * @param flags
- * @param p_buf
- * @param p_len
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @p_len: Len.
+ *
+ * Return: Int.
*/
int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
u32 *p_len);
/**
- * @brief Set NVM config attribute value.
+ * qed_mcp_nvm_set_cfg(): Set NVM config attribute value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param option_id
- * @param entity_id
- * @param flags
- * @param p_buf
- * @param len
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @len: Len.
+ *
+ * Return: Int.
*/
int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
new file mode 100644
index 000000000000..8a0e3c5d4bda
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -0,0 +1,2474 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_MFW_HSI_H
+#define _QED_MFW_HSI_H
+
+#define MFW_TRACE_SIGNATURE 0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK 0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
+#define MFW_TRACE_PRM_SIZE_OFFSET 16
+#define MFW_TRACE_ENTRY_SIZE 3
+
+struct mcp_trace {
+ u32 signature; /* Help to identify that the trace is valid */
+ u32 size; /* the size of the trace buffer in bytes */
+ u32 curr_level; /* 2 - all will be written to the buffer
+ * 1 - debug trace will not be written
+ * 0 - just errors will be written to the buffer
+ */
+ u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
+ * mask it.
+ */
+
+ /* Warning: the following pointers are assumed to be 32bits as they are
+ * used only in the MFW.
+ */
+ u32 trace_prod; /* The next trace will be written to this offset */
+ u32 trace_oldest; /* The oldest valid trace starts at this offset
+ * (usually very close after the current producer).
+ */
+};
+
+#define VF_MAX_STATIC 192
+#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32)
+#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+
+#define EXT_VF_MAX_STATIC 240
+#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1)
+#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+#define ADDED_VF_BITMAP_SIZE 2
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2
+#define MCP_GLOB_PORT_MAX 4
+#define MCP_GLOB_FUNC_MAX 16
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+#define SECTION_OFFSET(_offsize) (((((_offsize) & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
+
+#define QED_SECTION_SIZE(_offsize) ((((_offsize) & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
+
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET((_offsize)) + \
+ (QED_SECTION_SIZE((_offsize)) * (idx)))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ ((_pub_base) + offsetof(struct mcp_public_data, sections[_section]))
+
+/* PHY configuration */
+struct eth_phy_cfg {
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0x0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause;
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed;
+
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE 0x0
+#define ETH_LOOPBACK_INT_PHY 0x1
+#define ETH_LOOPBACK_EXT_PHY 0x2
+#define ETH_LOOPBACK_EXT 0x3
+#define ETH_LOOPBACK_MAC 0x4
+#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5
+#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6
+#define ETH_LOOPBACK_PCS_AH_ONLY 0x7
+#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8
+#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9
+
+ u32 eee_cfg;
+#define EEE_CFG_EEE_ENABLED BIT(0)
+#define EEE_CFG_TX_LPI BIT(1)
+#define EEE_CFG_ADV_SPEED_1G BIT(2)
+#define EEE_CFG_ADV_SPEED_10G BIT(3)
+#define EEE_TX_TIMER_USEC_MASK 0xfffffff0
+#define EEE_TX_TIMER_USEC_OFFSET 4
+#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00
+#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100
+#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000
+
+ u32 link_modes;
+
+ u32 fec_mode;
+#define FEC_FORCE_MODE_MASK 0x000000ff
+#define FEC_FORCE_MODE_OFFSET 0
+#define FEC_FORCE_MODE_NONE 0x00
+#define FEC_FORCE_MODE_FIRECODE 0x01
+#define FEC_FORCE_MODE_RS 0x02
+#define FEC_FORCE_MODE_AUTO 0x07
+#define FEC_EXTENDED_MODE_MASK 0xffffff00
+#define FEC_EXTENDED_MODE_OFFSET 8
+#define ETH_EXT_FEC_NONE 0x00000000
+#define ETH_EXT_FEC_10G_NONE 0x00000100
+#define ETH_EXT_FEC_10G_BASE_R 0x00000200
+#define ETH_EXT_FEC_25G_NONE 0x00000400
+#define ETH_EXT_FEC_25G_BASE_R 0x00000800
+#define ETH_EXT_FEC_25G_RS528 0x00001000
+#define ETH_EXT_FEC_40G_NONE 0x00002000
+#define ETH_EXT_FEC_40G_BASE_R 0x00004000
+#define ETH_EXT_FEC_50G_NONE 0x00008000
+#define ETH_EXT_FEC_50G_BASE_R 0x00010000
+#define ETH_EXT_FEC_50G_RS528 0x00020000
+#define ETH_EXT_FEC_50G_RS544 0x00040000
+#define ETH_EXT_FEC_100G_NONE 0x00080000
+#define ETH_EXT_FEC_100G_BASE_R 0x00100000
+#define ETH_EXT_FEC_100G_RS528 0x00200000
+#define ETH_EXT_FEC_100G_RS544 0x00400000
+
+ u32 extended_speed;
+#define ETH_EXT_SPEED_MASK 0x0000ffff
+#define ETH_EXT_SPEED_OFFSET 0
+#define ETH_EXT_SPEED_NONE 0x00000001
+#define ETH_EXT_SPEED_1G 0x00000002
+#define ETH_EXT_SPEED_10G 0x00000004
+#define ETH_EXT_SPEED_25G 0x00000008
+#define ETH_EXT_SPEED_40G 0x00000010
+#define ETH_EXT_SPEED_50G_BASE_R 0x00000020
+#define ETH_EXT_SPEED_50G_BASE_R2 0x00000040
+#define ETH_EXT_SPEED_100G_BASE_R2 0x00000080
+#define ETH_EXT_SPEED_100G_BASE_R4 0x00000100
+#define ETH_EXT_SPEED_100G_BASE_P4 0x00000200
+#define ETH_EXT_ADV_SPEED_MASK 0xFFFF0000
+#define ETH_EXT_ADV_SPEED_OFFSET 16
+#define ETH_EXT_ADV_SPEED_1G 0x00010000
+#define ETH_EXT_ADV_SPEED_10G 0x00020000
+#define ETH_EXT_ADV_SPEED_25G 0x00040000
+#define ETH_EXT_ADV_SPEED_40G 0x00080000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00100000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00200000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x00400000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x00800000
+#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x01000000
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg;
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+struct eth_stats {
+ u64 r64;
+ u64 r127;
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+
+ union {
+ struct {
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ } bb0;
+ struct {
+ u64 unused1;
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
+
+ u64 rfcs;
+ u64 rxcf;
+ u64 rxpf;
+ u64 rxpp;
+ u64 raln;
+ u64 rfcr;
+ u64 rovr;
+ u64 rjbr;
+ u64 rund;
+ u64 rfrg;
+ u64 t64;
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+
+ union {
+ struct {
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ } bb1;
+ struct {
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
+
+ u64 txpf;
+ u64 txpp;
+
+ union {
+ struct {
+ u64 tlpiec;
+ u64 tncl;
+ } bb2;
+ struct {
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
+
+ u64 rbyte;
+ u64 rxuca;
+ u64 rxmca;
+ u64 rxbca;
+ u64 rxpok;
+ u64 tbyte;
+ u64 txuca;
+ u64 txmca;
+ u64 txbca;
+ u64 txcf;
+};
+
+struct pkt_type_cnt {
+ u64 tc_tx_pkt_cnt[8];
+ u64 tc_tx_oct_cnt[8];
+ u64 priority_rx_pkt_cnt[8];
+ u64 priority_rx_oct_cnt[8];
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct eth_stats eth;
+};
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM BIT(0)
+
+#define PORT_CMT_PORT_ROLE BIT(1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE BIT(1)
+
+#define PORT_CMT_TEAM_MASK BIT(2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 BIT(2)
+};
+
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+#define MAX_TLV_BUFFER 128
+
+enum _lldp_agent {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status;
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
+ u32 pri_tc_tbl[1];
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+struct dcbx_features {
+ struct dcbx_ets_feature ets;
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_STATIC 4
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u32 flags;
+#define LLDP_SYSTEM_TLV_VALID_MASK 0x1
+#define LLDP_SYSTEM_TLV_VALID_OFFSET 0
+#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2
+#define LLDP_SYSTEM_TLV_MANDATORY_SHIFT 1
+#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000
+#define LLDP_SYSTEM_TLV_LENGTH_SHIFT 16
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+struct lldp_received_tlvs_s {
+ u32 prefix_seq_num;
+ u32 length;
+ u32 tlvs_buffer[MAX_TLV_BUFFER];
+ u32 suffix_seq_num;
+};
+
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct generic_idc_msg_s {
+ u32 source_pf;
+ struct mcp_val64 msg;
+};
+
+struct pcie_stats_stc {
+ u32 sr_cnt_wr_byte_msb;
+ u32 sr_cnt_wr_byte_lsb;
+ u32 sr_cnt_wr_cnt;
+ u32 sr_cnt_rd_byte_msb;
+ u32 sr_cnt_rd_byte_lsb;
+ u32 sr_cnt_rd_cnt;
+};
+
+enum _attribute_commands_e {
+ ATTRIBUTE_CMD_READ = 0,
+ ATTRIBUTE_CMD_WRITE,
+ ATTRIBUTE_CMD_READ_CLEAR,
+ ATTRIBUTE_CMD_CLEAR,
+ ATTRIBUTE_NUM_OF_COMMANDS
+};
+
+struct public_global {
+ u32 max_path;
+ u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+ u32 mdump_reason;
+ u32 ext_phy_upgrade_fw;
+ u8 runtime_port_swap_map[MODE_4P];
+ u32 data_ptr;
+ u32 data_size;
+ u32 bmb_error_status_cnt;
+ u32 bmb_jumbo_frame_cnt;
+ u32 sent_to_bmc_cnt;
+ u32 handled_by_mfw;
+ u32 sent_to_nw_cnt;
+ u32 to_bmc_kb_per_second;
+ u32 bcast_dropped_to_bmc_cnt;
+ u32 mcast_dropped_to_bmc_cnt;
+ u32 ucast_dropped_to_bmc_cnt;
+ u32 ncsi_response_failure_cnt;
+ u32 device_attr;
+ u32 vpd_warning;
+};
+
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack;
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) ((aeu_reg_id) * 32 + (aeu_bit))
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct dci_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct dci_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct dci_fc_npiv_tbl {
+ struct dci_fc_npiv_cfg fc_npiv_cfg;
+ struct dci_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct pause_flood_monitor {
+ u8 period_cnt;
+ u8 any_brb_prs_packet_hist;
+ u8 any_brb_block_is_full_hist;
+ u8 flags;
+ u32 num_of_state_changes;
+};
+
+struct public_port {
+ u32 validity_map;
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 BIT(27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_EXT_PHY_LINK_UP BIT(30)
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+ u32 link_change_count;
+
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+
+ u32 fc_npiv_nvram_tbl_addr;
+ u32 fc_npiv_nvram_tbl_size;
+
+ u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_STATE_IN_SETUP 0x10
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00
+#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
+#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a
+
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
+
+ u32 eee_status;
+#define EEE_ACTIVE_BIT BIT(0)
+#define EEE_LD_ADV_STATUS_MASK 0x000000f0
+#define EEE_LD_ADV_STATUS_OFFSET 4
+#define EEE_1G_ADV BIT(1)
+#define EEE_10G_ADV BIT(2)
+#define EEE_LP_ADV_STATUS_MASK 0x00000f00
+#define EEE_LP_ADV_STATUS_OFFSET 8
+#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
+#define EEE_SUPPORTED_SPEED_OFFSET 12
+#define EEE_1G_SUPPORTED BIT(1)
+#define EEE_10G_SUPPORTED BIT(2)
+
+ u32 eee_remote;
+#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
+#define EEE_REMOTE_TW_TX_OFFSET 0
+#define EEE_REMOTE_TW_RX_MASK 0xffff0000
+#define EEE_REMOTE_TW_RX_OFFSET 16
+
+ u32 module_info;
+
+ u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
+#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET 2
+#define OEM_CFG_SCHED_TYPE_ETS 0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
+
+ struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS];
+ u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA];
+ u32 phy_module_temperature;
+ u32 nig_reg_stat_rx_bmb_packet;
+ u32 nig_reg_rx_llh_ncsi_mcp_mask;
+ u32 nig_reg_rx_llh_ncsi_mcp_mask_2;
+ struct pause_flood_monitor pause_flood_monitor;
+ u32 nig_drain_cnt;
+ struct pkt_type_cnt pkt_tc_priority_cnt;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 mtu_size;
+
+ u32 c2s_pcp_map_lower;
+ u32 c2s_pcp_map_upper;
+ u32 c2s_pcp_map_default;
+
+ struct generic_idc_msg_s generic_idc_msg;
+
+ u32 num_of_msix;
+
+ u32 config;
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
+
+ u32 mac_upper;
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag;
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation;
+
+ u32 preserve_data;
+
+ u32 driver_last_activity_ts;
+
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define LOAD_REQ_HSI_VERSION 2
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT)
+
+ u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK 0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET 0
+#define OEM_CFG_FUNC_TC_0 0x0
+#define OEM_CFG_FUNC_TC_1 0x1
+#define OEM_CFG_FUNC_TC_2 0x2
+#define OEM_CFG_FUNC_TC_3 0x3
+#define OEM_CFG_FUNC_TC_4 0x4
+#define OEM_CFG_FUNC_TC_5 0x5
+#define OEM_CFG_FUNC_TC_6 0x6
+#define OEM_CFG_FUNC_TC_7 0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
+
+ struct drv_version_stc drv_ver;
+};
+
+struct mcp_mac {
+ u32 mac_upper;
+ u32 mac_lower;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct fcoe_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct iscsi_stats_stc {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct rdma_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+struct fcoe_cap_stc {
+ u32 max_ios;
+ u32 max_log;
+ u32 max_exch;
+ u32 max_npiv;
+ u32 max_tgt;
+ u32 max_outstnd;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+enum resource_id_enum {
+ RESOURCE_NUM_SB_E = 0,
+ RESOURCE_NUM_L2_QUEUE_E = 1,
+ RESOURCE_NUM_VPORT_E = 2,
+ RESOURCE_NUM_VMQ_E = 3,
+ RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
+ RESOURCE_FACTOR_RSS_PER_VF_E = 5,
+ RESOURCE_NUM_RL_E = 6,
+ RESOURCE_NUM_PQ_E = 7,
+ RESOURCE_NUM_VF_E = 8,
+ RESOURCE_VFC_FILTER_E = 9,
+ RESOURCE_ILT_E = 10,
+ RESOURCE_CQS_E = 11,
+ RESOURCE_GFT_PROFILES_E = 12,
+ RESOURCE_NUM_TC_E = 13,
+ RESOURCE_NUM_RSS_ENGINES_E = 14,
+ RESOURCE_LL2_QUEUE_E = 15,
+ RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
+ RESOURCE_QCN_E = 18,
+ RESOURCE_LLH_FILTER_E = 19,
+ RESOURCE_VF_MAC_ADDR = 20,
+ RESOURCE_LL2_CQS_E = 21,
+ RESOURCE_VF_CNQS = 22,
+ RESOURCE_MAX_NUM,
+ RESOURCE_NUM_INVALID = 0xFFFFFFFF
+};
+
+/* Resource ID is to be filled by the driver in the MB request
+ * Size, offset & flags to be filled by the MFW in the MB response
+ */
+struct resource_info {
+ enum resource_id_enum res_id;
+ u32 size; /* number of allocated resources */
+ u32 offset; /* Offset of the 1st resource */
+ u32 vf_size;
+ u32 vf_offset;
+ u32 flags;
+#define RESOURCE_ELEMENT_STRICT BIT(0)
+};
+
+struct mcp_wwn {
+ u32 wwn_upper;
+ u32 wwn_lower;
+};
+
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+struct attribute_cmd_write_stc {
+ u32 val;
+ u32 mask;
+ u32 offset;
+};
+
+struct lldp_stats_stc {
+ u32 tx_frames_total;
+ u32 rx_frames_total;
+ u32 rx_frames_discarded;
+ u32 rx_age_outs;
+};
+
+struct get_att_ctrl_stc {
+ u32 disabled_attns;
+ u32 controllable_attns;
+};
+
+struct trace_filter_stc {
+ u32 level;
+ u32 modules;
+};
+
+union drv_union_data {
+ struct mcp_mac wol_mac;
+
+ struct eth_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64;
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[EXT_VF_BITMAP_SIZE_IN_DWORDS];
+
+ struct drv_version_stc drv_version;
+
+ struct lan_stats_stc lan_stats;
+ struct fcoe_stats_stc fcoe_stats;
+ struct iscsi_stats_stc iscsi_stats;
+ struct rdma_stats_stc rdma_stats;
+ struct ocbb_data_stc ocbb_info;
+ struct temperature_status_stc temp_info;
+ struct resource_info resource;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
+ struct mcp_mac lldp_mac;
+ struct mcp_wwn fcoe_fabric_name;
+ u32 dword;
+
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ struct mdump_retain_data_stc mdump_retain;
+ struct attribute_cmd_write_stc attribute_cmd_write;
+ struct lldp_stats_stc lldp_stats;
+ struct pcie_stats_stc pcie_stats;
+
+ struct get_att_ctrl_stc get_att_ctrl;
+ struct fcoe_cap_stc fcoe_cap;
+ struct trace_filter_stc trace_filter;
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define DRV_MSG_SEQ_NUMBER_OFFSET 0
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_OFFSET 16
+
+ u32 drv_mb_param;
+
+ u32 fw_mb_header;
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_SEQ_NUMBER_OFFSET 0
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_OFFSET 16
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ union drv_union_data union_data;
+};
+
+#define DRV_MSG_CODE(_code_) ((_code_) << DRV_MSG_CODE_OFFSET)
+enum drv_msg_code_enum {
+ DRV_MSG_CODE_NVM_PUT_FILE_BEGIN = DRV_MSG_CODE(0x0001),
+ DRV_MSG_CODE_NVM_PUT_FILE_DATA = DRV_MSG_CODE(0x0002),
+ DRV_MSG_CODE_NVM_GET_FILE_ATT = DRV_MSG_CODE(0x0003),
+ DRV_MSG_CODE_NVM_READ_NVRAM = DRV_MSG_CODE(0x0005),
+ DRV_MSG_CODE_NVM_WRITE_NVRAM = DRV_MSG_CODE(0x0006),
+ DRV_MSG_CODE_MCP_RESET = DRV_MSG_CODE(0x0009),
+ DRV_MSG_CODE_SET_VERSION = DRV_MSG_CODE(0x000f),
+ DRV_MSG_CODE_MCP_HALT = DRV_MSG_CODE(0x0010),
+ DRV_MSG_CODE_SET_VMAC = DRV_MSG_CODE(0x0011),
+ DRV_MSG_CODE_GET_VMAC = DRV_MSG_CODE(0x0012),
+ DRV_MSG_CODE_GET_STATS = DRV_MSG_CODE(0x0013),
+ DRV_MSG_CODE_TRANSCEIVER_READ = DRV_MSG_CODE(0x0016),
+ DRV_MSG_CODE_MASK_PARITIES = DRV_MSG_CODE(0x001a),
+ DRV_MSG_CODE_BIST_TEST = DRV_MSG_CODE(0x001e),
+ DRV_MSG_CODE_SET_LED_MODE = DRV_MSG_CODE(0x0020),
+ DRV_MSG_CODE_RESOURCE_CMD = DRV_MSG_CODE(0x0023),
+ DRV_MSG_CODE_MDUMP_CMD = DRV_MSG_CODE(0x0025),
+ DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL = DRV_MSG_CODE(0x002b),
+ DRV_MSG_CODE_OS_WOL = DRV_MSG_CODE(0x002e),
+ DRV_MSG_CODE_GET_TLV_DONE = DRV_MSG_CODE(0x002f),
+ DRV_MSG_CODE_FEATURE_SUPPORT = DRV_MSG_CODE(0x0030),
+ DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT = DRV_MSG_CODE(0x0031),
+ DRV_MSG_CODE_GET_ENGINE_CONFIG = DRV_MSG_CODE(0x0037),
+ DRV_MSG_CODE_GET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003e),
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003f),
+ DRV_MSG_CODE_INITIATE_PF_FLR = DRV_MSG_CODE(0x0201),
+ DRV_MSG_CODE_LOAD_REQ = DRV_MSG_CODE(0x1000),
+ DRV_MSG_CODE_LOAD_DONE = DRV_MSG_CODE(0x1100),
+ DRV_MSG_CODE_INIT_HW = DRV_MSG_CODE(0x1200),
+ DRV_MSG_CODE_CANCEL_LOAD_REQ = DRV_MSG_CODE(0x1300),
+ DRV_MSG_CODE_UNLOAD_REQ = DRV_MSG_CODE(0x2000),
+ DRV_MSG_CODE_UNLOAD_DONE = DRV_MSG_CODE(0x2100),
+ DRV_MSG_CODE_INIT_PHY = DRV_MSG_CODE(0x2200),
+ DRV_MSG_CODE_LINK_RESET = DRV_MSG_CODE(0x2300),
+ DRV_MSG_CODE_SET_DCBX = DRV_MSG_CODE(0x2500),
+ DRV_MSG_CODE_OV_UPDATE_CURR_CFG = DRV_MSG_CODE(0x2600),
+ DRV_MSG_CODE_OV_UPDATE_BUS_NUM = DRV_MSG_CODE(0x2700),
+ DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS = DRV_MSG_CODE(0x2800),
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER = DRV_MSG_CODE(0x2900),
+ DRV_MSG_CODE_NIG_DRAIN = DRV_MSG_CODE(0x3000),
+ DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE = DRV_MSG_CODE(0x3100),
+ DRV_MSG_CODE_BW_UPDATE_ACK = DRV_MSG_CODE(0x3200),
+ DRV_MSG_CODE_OV_UPDATE_MTU = DRV_MSG_CODE(0x3300),
+ DRV_MSG_GET_RESOURCE_ALLOC_MSG = DRV_MSG_CODE(0x3400),
+ DRV_MSG_SET_RESOURCE_VALUE_MSG = DRV_MSG_CODE(0x3500),
+ DRV_MSG_CODE_OV_UPDATE_WOL = DRV_MSG_CODE(0x3800),
+ DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE = DRV_MSG_CODE(0x3900),
+ DRV_MSG_CODE_S_TAG_UPDATE_ACK = DRV_MSG_CODE(0x3b00),
+ DRV_MSG_CODE_GET_OEM_UPDATES = DRV_MSG_CODE(0x4100),
+ DRV_MSG_CODE_GET_PPFID_BITMAP = DRV_MSG_CODE(0x4300),
+ DRV_MSG_CODE_VF_DISABLED_DONE = DRV_MSG_CODE(0xc000),
+ DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001),
+ DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002),
+ DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004),
+};
+
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
+#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
+
+/* DRV_MSG_CODE_RETAIN_VMAC parameters */
+#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_SHIFT 0
+#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_MASK 0xf
+
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_MASK 0x70
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_L2 0
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_ISCSI 1
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_FCOE 2
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWNN 3
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWPN 4
+
+#define DRV_MSG_CODE_MCP_RESET_FORCE 0xf04ce
+
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+
+#define BW_MAX_MASK 0x000000ff
+#define BW_MAX_OFFSET 0
+#define BW_MIN_MASK 0x0000ff00
+#define BW_MIN_OFFSET 8
+
+#define DRV_MSG_FAN_FAILURE_TYPE BIT(0)
+#define DRV_MSG_TEMPERATURE_FAILURE_TYPE BIT(1)
+
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+#define RESOURCE_OPCODE_REQ 1
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+#define RESOURCE_OPCODE_GNT 1
+#define RESOURCE_OPCODE_BUSY 2
+#define RESOURCE_OPCODE_RELEASED 3
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
+#define RESOURCE_DUMP 0
+
+/* DRV_MSG_CODE_MDUMP_CMD parameters */
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x000000ff
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
+
+#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
+
+#define DRV_MSG_CODE_MDUMP_FREE_DRIVER_BUF 0x0b
+#define DRV_MSG_CODE_MDUMP_GEN_LINK_DUMP 0x0c
+#define DRV_MSG_CODE_MDUMP_GEN_IDLE_CHK 0x0d
+
+/* DRV_MSG_CODE_MDUMP_CMD options */
+#define MDUMP_DRV_PARAM_OPTION_MASK 0x00000f00
+#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_OFFSET 8
+#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_MASK 0x100
+
+/* DRV_MSG_CODE_EXT_PHY_READ/DRV_MSG_CODE_EXT_PHY_WRITE parameters */
+#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
+#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_MASK 0x00600000
+
+/* DRV_MSG_CODE_PMBUS_READ/DRV_MSG_CODE_PMBUS_WRITE parameters */
+#define DRV_MB_PARAM_PMBUS_CMD_SHIFT 0
+#define DRV_MB_PARAM_PMBUS_CMD_MASK 0xFF
+#define DRV_MB_PARAM_PMBUS_LEN_SHIFT 8
+#define DRV_MB_PARAM_PMBUS_LEN_MASK 0x300
+#define DRV_MB_PARAM_PMBUS_DATA_SHIFT 16
+#define DRV_MB_PARAM_PMBUS_DATA_MASK 0xFFFF0000
+
+/* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+/* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+/* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+/* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_SHIFT 0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_SHIFT 4
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_MASK 0x00000010
+#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_SHIFT 4
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_MASK 0x000000ff
+#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_SHIFT 0
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+
+#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
+ DRV_MB_PARAM_WOL_DISABLED | \
+ DRV_MB_PARAM_WOL_ENABLED)
+#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
+#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
+#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
+
+#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEB | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEPA)
+#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
+#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
+#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000
+
+ /* Resource Allocation params - Driver version support */
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00
+
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+
+/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff
+
+/* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000
+
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_IGNORE 0x0000ffff
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_SHIFT 21
+#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_MASK 0x00200000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
+
+/*DRV_MSG_CODE_GET_PERM_MAC parametres*/
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_SHIFT 0
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MASK 0xF
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_PF 0
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_BMC 1
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_VF 2
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_LLDP 3
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MAX 4
+#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_SHIFT 8
+#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_MASK 0xFFFF00
+
+#define FW_MSG_CODE(_code_) ((_code_) << FW_MSG_CODE_OFFSET)
+enum fw_msg_code_enum {
+ FW_MSG_CODE_UNSUPPORTED = FW_MSG_CODE(0x0000),
+ FW_MSG_CODE_NVM_OK = FW_MSG_CODE(0x0001),
+ FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK = FW_MSG_CODE(0x0040),
+ FW_MSG_CODE_PHY_OK = FW_MSG_CODE(0x0011),
+ FW_MSG_CODE_OK = FW_MSG_CODE(0x0016),
+ FW_MSG_CODE_ERROR = FW_MSG_CODE(0x0017),
+ FW_MSG_CODE_TRANSCEIVER_DIAG_OK = FW_MSG_CODE(0x0016),
+ FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT = FW_MSG_CODE(0x0002),
+ FW_MSG_CODE_MDUMP_INVALID_CMD = FW_MSG_CODE(0x0003),
+ FW_MSG_CODE_OS_WOL_SUPPORTED = FW_MSG_CODE(0x0080),
+ FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE = FW_MSG_CODE(0x0087),
+ FW_MSG_CODE_DRV_LOAD_ENGINE = FW_MSG_CODE(0x1010),
+ FW_MSG_CODE_DRV_LOAD_PORT = FW_MSG_CODE(0x1011),
+ FW_MSG_CODE_DRV_LOAD_FUNCTION = FW_MSG_CODE(0x1012),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_PDA = FW_MSG_CODE(0x1020),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 = FW_MSG_CODE(0x1021),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG = FW_MSG_CODE(0x1022),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_HSI = FW_MSG_CODE(0x1023),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE = FW_MSG_CODE(0x1030),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT = FW_MSG_CODE(0x1031),
+ FW_MSG_CODE_DRV_LOAD_DONE = FW_MSG_CODE(0x1110),
+ FW_MSG_CODE_DRV_UNLOAD_ENGINE = FW_MSG_CODE(0x2011),
+ FW_MSG_CODE_DRV_UNLOAD_PORT = FW_MSG_CODE(0x2012),
+ FW_MSG_CODE_DRV_UNLOAD_FUNCTION = FW_MSG_CODE(0x2013),
+ FW_MSG_CODE_DRV_UNLOAD_DONE = FW_MSG_CODE(0x2110),
+ FW_MSG_CODE_RESOURCE_ALLOC_OK = FW_MSG_CODE(0x3400),
+ FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN = FW_MSG_CODE(0x3500),
+ FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE = FW_MSG_CODE(0x3b00),
+ FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE = FW_MSG_CODE(0xb001),
+ FW_MSG_CODE_DEBUG_NOT_ENABLED = FW_MSG_CODE(0xb00a),
+ FW_MSG_CODE_DEBUG_DATA_SEND_OK = FW_MSG_CODE(0xb00b),
+};
+
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+/* Get PF RDMA protocol command response */
+#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
+#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
+#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
+#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
+
+/* Get MFW feature support response */
+#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1)
+#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO BIT(2)
+#define FW_MB_PARAM_FEATURE_SUPPORT_LP_PRES_DET BIT(3)
+#define FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD BIT(4)
+#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6)
+#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP BIT(7)
+#define FW_MB_PARAM_FEATURE_SUPPORT_VF_DPM BIT(8)
+#define FW_MB_PARAM_FEATURE_SUPPORT_IDLE_CHK BIT(9)
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16)
+#define FW_MB_PARAM_FEATURE_SUPPORT_DISABLE_LLDP BIT(17)
+#define FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK BIT(18)
+#define FW_MB_PARAM_FEATURE_SUPPORT_RESTORE_DEFAULT_CFG BIT(19)
+
+#define FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED 0x00000001
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0)
+
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
+
+#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff
+#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
+
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
+
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
+ MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
+ MFW_DRV_MSG_GET_TLV_REQ,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED,
+ MFW_DRV_MSG_GENERIC_IDC,
+ MFW_DRV_MSG_XCVR_TX_FAULT,
+ MFW_DRV_MSG_XCVR_RX_LOS,
+ MFW_DRV_MSG_GET_FCOE_CAP,
+ MFW_DRV_MSG_GEN_LINK_DUMP,
+ MFW_DRV_MSG_GEN_IDLE_CHK,
+ MFW_DRV_MSG_DCBX_ADMIN_CFG_APPLIED,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) ((((msgs) - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) ((msg_id) >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) (((msg_id) & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+enum public_sections {
+ PUBLIC_DRV_MB,
+ PUBLIC_MFW_MB,
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+/* Runtime data needs about 1/2K. We use 2K to be on the safe side.
+ * Please make sure data does not exceed this size.
+ */
+#define NUM_RUNTIME_DWORDS 16
+struct drv_init_hw_stc {
+ u32 init_hw_bitmask[NUM_RUNTIME_DWORDS];
+ u32 init_hw_data[NUM_RUNTIME_DWORDS * 32];
+};
+
+struct mcp_public_data {
+ u32 num_sections;
+ u32 sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+#define I2C_TRANSCEIVER_ADDR 0xa0
+#define MAX_I2C_TRANSACTION_SIZE 16
+#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256
+
+/* OCBB definitions */
+enum tlvs {
+ /* Category 1: Device Properties */
+ DRV_TLV_CLP_STR,
+ DRV_TLV_CLP_STR_CTD,
+ /* Category 6: Device Configuration */
+ DRV_TLV_SCSI_TO,
+ DRV_TLV_R_T_TOV,
+ DRV_TLV_R_A_TOV,
+ DRV_TLV_E_D_TOV,
+ DRV_TLV_CR_TOV,
+ DRV_TLV_BOOT_TYPE,
+ /* Category 8: Port Configuration */
+ DRV_TLV_NPIV_ENABLED,
+ /* Category 10: Function Configuration */
+ DRV_TLV_FEATURE_FLAGS,
+ DRV_TLV_LOCAL_ADMIN_ADDR,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_1,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_2,
+ DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
+ DRV_TLV_LSO_MIN_SEGMENT_COUNT,
+ DRV_TLV_PROMISCUOUS_MODE,
+ DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
+ DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
+ DRV_TLV_OS_DRIVER_STATES,
+ DRV_TLV_PXE_BOOT_PROGRESS,
+ /* Category 12: FC/FCoE Configuration */
+ DRV_TLV_NPIV_STATE,
+ DRV_TLV_NUM_OF_NPIV_IDS,
+ DRV_TLV_SWITCH_NAME,
+ DRV_TLV_SWITCH_PORT_NUM,
+ DRV_TLV_SWITCH_PORT_ID,
+ DRV_TLV_VENDOR_NAME,
+ DRV_TLV_SWITCH_MODEL,
+ DRV_TLV_SWITCH_FW_VER,
+ DRV_TLV_QOS_PRIORITY_PER_802_1P,
+ DRV_TLV_PORT_ALIAS,
+ DRV_TLV_PORT_STATE,
+ DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_LINK_FAILURE_COUNT,
+ DRV_TLV_FCOE_BOOT_PROGRESS,
+ /* Category 13: iSCSI Configuration */
+ DRV_TLV_TARGET_LLMNR_ENABLED,
+ DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
+ DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
+ DRV_TLV_AUTHENTICATION_METHOD,
+ DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
+ DRV_TLV_MAX_FRAME_SIZE,
+ DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_ISCSI_BOOT_PROGRESS,
+ /* Category 20: Device Data */
+ DRV_TLV_PCIE_BUS_RX_UTILIZATION,
+ DRV_TLV_PCIE_BUS_TX_UTILIZATION,
+ DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
+ DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
+ DRV_TLV_NCSI_RX_BYTES_RECEIVED,
+ DRV_TLV_NCSI_TX_BYTES_SENT,
+ /* Category 22: Base Port Data */
+ DRV_TLV_RX_DISCARDS,
+ DRV_TLV_RX_ERRORS,
+ DRV_TLV_TX_ERRORS,
+ DRV_TLV_TX_DISCARDS,
+ DRV_TLV_RX_FRAMES_RECEIVED,
+ DRV_TLV_TX_FRAMES_SENT,
+ /* Category 23: FC/FCoE Port Data */
+ DRV_TLV_RX_BROADCAST_PACKETS,
+ DRV_TLV_TX_BROADCAST_PACKETS,
+ /* Category 28: Base Function Data */
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
+ DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_PF_RX_FRAMES_RECEIVED,
+ DRV_TLV_RX_BYTES_RECEIVED,
+ DRV_TLV_PF_TX_FRAMES_SENT,
+ DRV_TLV_TX_BYTES_SENT,
+ DRV_TLV_IOV_OFFLOAD,
+ DRV_TLV_PCI_ERRORS_CAP_ID,
+ DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
+ DRV_TLV_UNCORRECTABLE_ERROR_MASK,
+ DRV_TLV_CORRECTABLE_ERROR_STATUS,
+ DRV_TLV_CORRECTABLE_ERROR_MASK,
+ DRV_TLV_PCI_ERRORS_AECC_REGISTER,
+ DRV_TLV_TX_QUEUES_EMPTY,
+ DRV_TLV_RX_QUEUES_EMPTY,
+ DRV_TLV_TX_QUEUES_FULL,
+ DRV_TLV_RX_QUEUES_FULL,
+ /* Category 29: FC/FCoE Function Data */
+ DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
+ DRV_TLV_FCOE_RX_BYTES_RECEIVED,
+ DRV_TLV_FCOE_TX_FRAMES_SENT,
+ DRV_TLV_FCOE_TX_BYTES_SENT,
+ DRV_TLV_CRC_ERROR_COUNT,
+ DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_1_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_2_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_3_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_4_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_5_TIMESTAMP,
+ DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
+ DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
+ DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
+ DRV_TLV_DISPARITY_ERROR_COUNT,
+ DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_RJT,
+ DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
+ DRV_TLV_FDISCS_SENT_COUNT,
+ DRV_TLV_FDISC_ACCS_RECEIVED,
+ DRV_TLV_FDISC_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_SENT_COUNT,
+ DRV_TLV_PLOGI_ACCS_RECEIVED,
+ DRV_TLV_PLOGI_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_1_TIMESTAMP,
+ DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_2_TIMESTAMP,
+ DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_3_TIMESTAMP,
+ DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_4_TIMESTAMP,
+ DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_5_TIMESTAMP,
+ DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
+ DRV_TLV_LOGOS_ISSUED,
+ DRV_TLV_LOGO_ACCS_RECEIVED,
+ DRV_TLV_LOGO_RJTS_RECEIVED,
+ DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_1_TIMESTAMP,
+ DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_2_TIMESTAMP,
+ DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_3_TIMESTAMP,
+ DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_4_TIMESTAMP,
+ DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_5_TIMESTAMP,
+ DRV_TLV_LOGOS_RECEIVED,
+ DRV_TLV_ACCS_ISSUED,
+ DRV_TLV_PRLIS_ISSUED,
+ DRV_TLV_ACCS_RECEIVED,
+ DRV_TLV_ABTS_SENT_COUNT,
+ DRV_TLV_ABTS_ACCS_RECEIVED,
+ DRV_TLV_ABTS_RJTS_RECEIVED,
+ DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_1_TIMESTAMP,
+ DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_2_TIMESTAMP,
+ DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_3_TIMESTAMP,
+ DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_4_TIMESTAMP,
+ DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_5_TIMESTAMP,
+ DRV_TLV_RSCNS_RECEIVED,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
+ DRV_TLV_LUN_RESETS_ISSUED,
+ DRV_TLV_ABORT_TASK_SETS_ISSUED,
+ DRV_TLV_TPRLOS_SENT,
+ DRV_TLV_NOS_SENT_COUNT,
+ DRV_TLV_NOS_RECEIVED_COUNT,
+ DRV_TLV_OLS_COUNT,
+ DRV_TLV_LR_COUNT,
+ DRV_TLV_LRR_COUNT,
+ DRV_TLV_LIP_SENT_COUNT,
+ DRV_TLV_LIP_RECEIVED_COUNT,
+ DRV_TLV_EOFA_COUNT,
+ DRV_TLV_EOFNI_COUNT,
+ DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
+ DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_BUSY_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
+ DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
+ DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
+ /* Category 30: iSCSI Function Data */
+ DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
+ DRV_TLV_ISCSI_PDU_TX_BYTES_SENT,
+ DRV_TLV_RDMA_DRV_VERSION
+};
+
+#define I2C_DEV_ADDR_A2 0xa2
+#define SFP_EEPROM_A2_TEMPERATURE_ADDR 0x60
+#define SFP_EEPROM_A2_TEMPERATURE_SIZE 2
+#define SFP_EEPROM_A2_VCC_ADDR 0x62
+#define SFP_EEPROM_A2_VCC_SIZE 2
+#define SFP_EEPROM_A2_TX_BIAS_ADDR 0x64
+#define SFP_EEPROM_A2_TX_BIAS_SIZE 2
+#define SFP_EEPROM_A2_TX_POWER_ADDR 0x66
+#define SFP_EEPROM_A2_TX_POWER_SIZE 2
+#define SFP_EEPROM_A2_RX_POWER_ADDR 0x68
+#define SFP_EEPROM_A2_RX_POWER_SIZE 2
+
+#define I2C_DEV_ADDR_A0 0xa0
+#define QSFP_EEPROM_A0_TEMPERATURE_ADDR 0x16
+#define QSFP_EEPROM_A0_TEMPERATURE_SIZE 2
+#define QSFP_EEPROM_A0_VCC_ADDR 0x1a
+#define QSFP_EEPROM_A0_VCC_SIZE 2
+#define QSFP_EEPROM_A0_TX1_BIAS_ADDR 0x2a
+#define QSFP_EEPROM_A0_TX1_BIAS_SIZE 2
+#define QSFP_EEPROM_A0_TX1_POWER_ADDR 0x32
+#define QSFP_EEPROM_A0_TX1_POWER_SIZE 2
+#define QSFP_EEPROM_A0_RX1_POWER_ADDR 0x22
+#define QSFP_EEPROM_A0_RX1_POWER_SIZE 2
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+
+ u32 mac_addr_lo;
+};
+
+struct nvm_cfg1_glob {
+ u32 generic_cont0;
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+
+ u32 engineering_change[3];
+ u32 manufacturing_id;
+ u32 serial_number[4];
+ u32 pcie_cfg;
+ u32 mgmt_traffic;
+
+ u32 core_cfg;
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15
+
+ u32 e_lane_cfg1;
+ u32 e_lane_cfg2;
+ u32 f_lane_cfg1;
+ u32 f_lane_cfg2;
+ u32 mps10_preemphasis;
+ u32 mps10_driver_current;
+ u32 mps25_preemphasis;
+ u32 mps25_driver_current;
+ u32 pci_id;
+ u32 pci_subsys_id;
+ u32 bar;
+ u32 mps10_txfir_main;
+ u32 mps10_txfir_post;
+ u32 mps25_txfir_main;
+ u32 mps25_txfir_post;
+ u32 manufacture_ver;
+ u32 manufacture_time;
+ u32 led_global_settings;
+ u32 generic_cont1;
+
+ u32 mbi_version;
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+
+ u32 mbi_date;
+ u32 misc_sig;
+
+ u32 device_capabilities;
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10
+
+ u32 power_dissipated;
+ u32 power_consumed;
+ u32 efi_version;
+ u32 multi_network_modes_capability;
+ u32 nvm_cfg_version;
+ u32 nvm_cfg_new_option_seq;
+ u32 nvm_cfg_removed_option_seq;
+ u32 nvm_cfg_updated_value_seq;
+ u32 extended_serial_number[8];
+ u32 option_kit_pn[8];
+ u32 spare_pn[8];
+ u32 mps25_active_txfir_pre;
+ u32 mps25_active_txfir_main;
+ u32 mps25_active_txfir_post;
+ u32 features;
+ u32 tx_rx_eq_25g_hlpc;
+ u32 tx_rx_eq_25g_llpc;
+ u32 tx_rx_eq_25g_ac;
+ u32 tx_rx_eq_10g_pc;
+ u32 tx_rx_eq_10g_ac;
+ u32 tx_rx_eq_1g;
+ u32 tx_rx_eq_25g_bt;
+ u32 tx_rx_eq_10g_bt;
+ u32 generic_cont4;
+ u32 preboot_debug_mode_std;
+ u32 preboot_debug_mode_ext;
+ u32 ext_phy_cfg1;
+ u32 clocks;
+ u32 pre2_generic_cont_1;
+ u32 pre2_generic_cont_2;
+ u32 pre2_generic_cont_3;
+ u32 tx_rx_eq_50g_hlpc;
+ u32 tx_rx_eq_50g_mlpc;
+ u32 tx_rx_eq_50g_llpc;
+ u32 tx_rx_eq_50g_ac;
+ u32 trace_modules;
+ u32 pcie_class_code_fcoe;
+ u32 pcie_class_code_iscsi;
+ u32 no_provisioned_mac;
+ u32 lowest_mbi_version;
+ u32 generic_cont5;
+ u32 pre2_generic_cont_4;
+ u32 reserved[40];
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[1];
+};
+
+struct nvm_cfg1_port {
+ u32 rel_to_opt123;
+ u32 rel_to_opt124;
+
+ u32 generic_cont0;
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+
+ u32 pcie_cfg;
+ u32 features;
+
+ u32 speed_cap_mask;
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+
+ u32 link_settings;
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
+
+ u32 phy_cfg;
+ u32 mgmt_traffic;
+
+ u32 ext_phy;
+ /* EEE power saving mode */
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
+
+ u32 mba_cfg1;
+ u32 mba_cfg2;
+ u32 vf_cfg;
+ struct nvm_cfg_mac_address lldp_mac_address;
+ u32 led_port_settings;
+ u32 transceiver_00;
+ u32 device_ids;
+
+ u32 board_cfg;
+#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
+
+ u32 mnm_10g_cap;
+ u32 mnm_10g_ctrl;
+ u32 mnm_10g_misc;
+ u32 mnm_25g_cap;
+ u32 mnm_25g_ctrl;
+ u32 mnm_25g_misc;
+ u32 mnm_40g_cap;
+ u32 mnm_40g_ctrl;
+ u32 mnm_40g_misc;
+ u32 mnm_50g_cap;
+ u32 mnm_50g_ctrl;
+ u32 mnm_50g_misc;
+ u32 mnm_100g_cap;
+ u32 mnm_100g_ctrl;
+ u32 mnm_100g_misc;
+
+ u32 temperature;
+ u32 ext_phy_cfg1;
+
+ u32 extended_speed;
+#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff
+#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400
+
+ u32 extended_fec_mode;
+ u32 port_generic_cont_01;
+ u32 port_generic_cont_02;
+ u32 phy_temp_monitor;
+ u32 reserved[109];
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address;
+ u32 rsrv1;
+ u32 rsrv2;
+ u32 device_id;
+ u32 cmn_cfg;
+ u32 pci_cfg;
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
+ u32 preboot_generic_cfg;
+ u32 features;
+ u32 mf_mode_feature;
+ u32 reserved[6];
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob;
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
+};
+
+struct board_info {
+ u16 vendor_id;
+ u16 eth_did_suffix;
+ u16 sub_vendor_id;
+ u16 sub_device_id;
+ char *board_name;
+ char *friendly_name;
+};
+
+struct trace_module_info {
+ char *module_name;
+};
+
+#define NUM_TRACE_MODULES 25
+
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_2 2
+#define PORT_3 3
+
+extern struct spad_layout g_spad;
+struct spad_layout {
+ struct nvm_cfg nvm_cfg;
+ struct mcp_public_data public_data;
+};
+
+#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)(addr) - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size) \
+ ((u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_OFFSET) | \
+ (((u32)(_size) >> 2) << OFFSIZE_SIZE_OFFSET)))
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + \
+ __builtin_offsetof(struct static_init, f))
+
+/* This section is located at a fixed location in the beginning of the
+ * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
+ * All the rest of data has a floating location which differs from version to
+ * version, and is pointed by the mcp_meta_data below.
+ * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
+ * with it from nvram in order to clear this portion.
+ */
+struct static_init {
+ u32 num_sections;
+ offsize_t sections[SPAD_SECTION_MAX];
+#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
+
+ u32 tim_hash[8];
+#define PRESERVED_TIM_HASH ((u8 *)(STRUCT_OFFSET(tim_hash)))
+ u32 tpu_hash[8];
+#define PRESERVED_TPU_HASH ((u8 *)(STRUCT_OFFSET(tpu_hash)))
+ u32 secure_pcie_fw_ver;
+#define SECURE_PCIE_FW_VER (*((u32 *)(STRUCT_OFFSET(secure_pcie_fw_ver))))
+ u32 secure_running_mfw;
+#define SECURE_RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(secure_running_mfw))))
+ struct mcp_trace trace;
+};
+
+#define CRC_MAGIC_VALUE 0xDEBB20E3
+#define CRC32_POLYNOMIAL 0xEDB88320
+#define _KB(x) ((x) * 1024)
+#define _MB(x) (_KB(x) * 1024)
+#define NVM_CRC_SIZE (sizeof(u32))
+enum nvm_sw_arbitrator {
+ NVM_SW_ARB_HOST,
+ NVM_SW_ARB_MCP,
+ NVM_SW_ARB_UART,
+ NVM_SW_ARB_RESERVED
+};
+
+struct legacy_bootstrap_region {
+ u32 magic_value;
+#define NVM_MAGIC_VALUE 0x669955aa
+ u32 sram_start_addr;
+ u32 code_len;
+ u32 code_start_addr;
+ u32 crc;
+};
+
+struct nvm_code_entry {
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
+ u32 sram_start_addr;
+ u32 sram_run_addr;
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+ NVM_TYPE_INIT_HW = 0x19,
+ NVM_TYPE_DEFAULT_CFG = 0x1a,
+ NVM_TYPE_MDUMP = 0x1b,
+ NVM_TYPE_NVM_META = 0x1c,
+ NVM_TYPE_ISCSI_CFG = 0x1d,
+ NVM_TYPE_FCOE_CFG = 0x1f,
+ NVM_TYPE_ETH_PHY_FW1 = 0x20,
+ NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_BDN = 0x22,
+ NVM_TYPE_8485X_PHY_FW = 0x23,
+ NVM_TYPE_PUB_KEY = 0x24,
+ NVM_TYPE_RECOVERY = 0x25,
+ NVM_TYPE_PLDM = 0x26,
+ NVM_TYPE_UPK1 = 0x27,
+ NVM_TYPE_UPK2 = 0x28,
+ NVM_TYPE_MASTER_KC = 0x29,
+ NVM_TYPE_BACKUP_KC = 0x2a,
+ NVM_TYPE_HW_DUMP = 0x2b,
+ NVM_TYPE_HW_DUMP_OUT = 0x2c,
+ NVM_TYPE_BIN_NVM_META = 0x30,
+ NVM_TYPE_ROM_TEST = 0xf0,
+ NVM_TYPE_88X33X0_PHY_FW = 0x31,
+ NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
+ NVM_TYPE_IDLE_CHK = 0x33,
+ NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 100
+
+struct nvm_dir_meta {
+ u32 dir_id;
+ u32 nvm_dir_addr;
+ u32 num_images;
+ u32 next_mfw_to_run;
+};
+
+struct nvm_dir {
+ s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK 0x00000001
+#define NVM_DIR_SEQ_MASK 0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+#define NVM_DIR_UPDATE_SEQ(_seq, swap_mfw)\
+ ({ \
+ _seq = (((_seq + 2) & \
+ NVM_DIR_SEQ_MASK) | \
+ (NVM_DIR_NEXT_MFW(_seq ^ (swap_mfw))));\
+ })
+
+#define IS_DIR_SEQ_VALID(seq) (((seq) & NVM_DIR_SEQ_MASK) != \
+ NVM_DIR_SEQ_MASK)
+
+ u32 num_images;
+ u32 rsrv;
+ struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
+ ((_num_images) - 1) *\
+ sizeof(struct nvm_code_entry) +\
+ NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+ u32 format_revision;
+#define VPD_IMAGE_VERSION 1
+
+ u8 vpd_data[1];
+};
+
+#define DIR_ID_1 (0)
+#define DIR_ID_2 (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1 (0)
+#define MFW_BUNDLE_2 (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE)
+#define LEGACY_ASIC_MIM_MAX_SIZE (_KB(1200))
+
+#define FPGA_MIM_MAX_SIZE (0x40000)
+
+#define LIM_MAX_SIZE ((2 * FLASH_PAGE_SIZE) - \
+ sizeof(struct legacy_bootstrap_region) \
+ - NVM_RSV_SIZE)
+#define LIM_OFFSET (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE (44)
+#define GET_MIM_MAX_SIZE(is_asic, is_e4) (LEGACY_ASIC_MIM_MAX_SIZE)
+#define GET_MIM_OFFSET(idx, is_asic, is_e4) (NVM_OFFSET(dir[MAX_MFW_BUNDLES])\
+ + (((idx) == NVM_TYPE_MIM2) ? \
+ GET_MIM_MAX_SIZE(is_asic, is_e4)\
+ : 0))
+#define GET_NVM_FIXED_AREA_SIZE(is_asic, is_e4) (sizeof(struct nvm_image) + \
+ GET_MIM_MAX_SIZE(is_asic,\
+ is_e4) * 2)
+
+union nvm_dir_union {
+ struct nvm_dir dir;
+ u8 page[FLASH_PAGE_SIZE];
+};
+
+struct nvm_image {
+ struct legacy_bootstrap_region bootstrap;
+ u8 rsrv[NVM_RSV_SIZE];
+ u8 lim_image[LIM_MAX_SIZE];
+ union nvm_dir_union dir[MAX_MFW_BUNDLES];
+};
+
+#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->(f)))))
+
+struct hw_set_info {
+ u32 reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+ u32 bank_num;
+ u32 pf_num;
+ u32 operation;
+#define READ_OP 1
+#define WRITE_OP 2
+#define RMW_SET_OP 3
+#define RMW_CLR_OP 4
+
+ u32 reg_addr;
+ u32 reg_data;
+
+ u32 reset_type;
+#define POR_RESET_TYPE BIT(0)
+#define HARD_RESET_TYPE BIT(1)
+#define CORE_RESET_TYPE BIT(2)
+#define MCP_RESET_TYPE BIT(3)
+#define PERSET_ASSERT BIT(4)
+#define PERSET_DEASSERT BIT(5)
+};
+
+struct hw_set_image {
+ u32 format_version;
+#define HW_SET_IMAGE_VERSION 1
+ u32 no_hw_sets;
+ struct hw_set_info hw_sets[1];
+};
+
+#define MAX_SUPPORTED_NVM_OPTIONS 1000
+
+#define NVM_META_BIN_OPTION_OFFSET_MASK 0x0000ffff
+#define NVM_META_BIN_OPTION_OFFSET_SHIFT 0
+#define NVM_META_BIN_OPTION_LEN_MASK 0x00ff0000
+#define NVM_META_BIN_OPTION_LEN_OFFSET 16
+#define NVM_META_BIN_OPTION_ENTITY_MASK 0x03000000
+#define NVM_META_BIN_OPTION_ENTITY_SHIFT 24
+#define NVM_META_BIN_OPTION_ENTITY_GLOB 0
+#define NVM_META_BIN_OPTION_ENTITY_PORT 1
+#define NVM_META_BIN_OPTION_ENTITY_FUNC 2
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_MASK 0x0c000000
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_SHIFT 26
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_USER 0
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_FIXED 1
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_FORCED 2
+
+struct nvm_meta_bin_t {
+ u32 magic;
+#define NVM_META_BIN_MAGIC 0x669955bb
+ u32 version;
+#define NVM_META_BIN_VERSION 1
+ u32 num_options;
+ u32 options[0];
+};
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index b8c5641b29a8..5d725f59db24 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -26,12 +26,12 @@ static struct qed_ooo_archipelago
u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
struct qed_ooo_archipelago *p_archipelago;
- if (idx >= p_ooo_info->max_num_archipelagos)
+ if (unlikely(idx >= p_ooo_info->max_num_archipelagos))
return NULL;
p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
- if (list_empty(&p_archipelago->isles_list))
+ if (unlikely(list_empty(&p_archipelago->isles_list)))
return NULL;
return p_archipelago;
@@ -46,7 +46,7 @@ static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
u8 the_num_of_isle = 1;
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
- if (!p_archipelago) {
+ if (unlikely(!p_archipelago)) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return NULL;
@@ -362,7 +362,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
if (ooo_isle > 1) {
p_prev_isle = qed_ooo_seek_isle(p_hwfn,
p_ooo_info, cid, ooo_isle - 1);
- if (!p_prev_isle) {
+ if (unlikely(!p_prev_isle)) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n",
ooo_isle - 1, cid);
@@ -370,7 +370,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
}
}
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
- if (!p_archipelago && (ooo_isle != 1)) {
+ if (unlikely(!p_archipelago && ooo_isle != 1)) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return;
@@ -381,7 +381,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_isle, list_entry);
list_del(&p_isle->list_entry);
- if (!list_empty(&p_isle->buffers_list)) {
+ if (unlikely(!list_empty(&p_isle->buffers_list))) {
DP_NOTICE(p_hwfn, "Free isle is not empty\n");
INIT_LIST_HEAD(&p_isle->buffers_list);
}
@@ -418,13 +418,13 @@ void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_isle *p_isle = NULL;
p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
- if (!p_isle) {
+ if (unlikely(!p_isle)) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n", ooo_isle, cid);
return;
}
- if (buffer_side == QED_OOO_LEFT_BUF)
+ if (unlikely(buffer_side == QED_OOO_LEFT_BUF))
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
else
list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
@@ -438,7 +438,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle + 1);
- if (!p_right_isle) {
+ if (unlikely(!p_right_isle)) {
DP_NOTICE(p_hwfn,
"Right isle %d is not found(cid %d)\n",
left_isle + 1, cid);
@@ -450,7 +450,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
if (left_isle) {
p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle);
- if (!p_left_isle) {
+ if (unlikely(!p_left_isle)) {
DP_NOTICE(p_hwfn,
"Left isle %d is not found(cid %d)\n",
left_isle, cid);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 4f4b79250a2b..7f3e84b8622d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -22,6 +22,7 @@
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -33,7 +34,6 @@
#include "qed_roce.h"
#include "qed_sp.h"
-
int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 max_count, char *name)
{
@@ -865,8 +865,8 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
}
qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+ addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_COMMON_QUEUE_CONS, qz_num);
REG_WR16(p_hwfn, addr, prod);
@@ -1903,7 +1903,6 @@ void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
}
-
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
p_hwfn->db_bar_no_edpm = true;
@@ -1966,7 +1965,7 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
u8 *old_mac_address,
- u8 *new_mac_address)
+ const u8 *new_mac_address)
{
int rc = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 6a1de3a25257..2753723011dd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -168,16 +168,19 @@ static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp)
return false;
}
+
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
#else
-static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) {}
-static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
+static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
+ {return -EINVAL; }
static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index da1b7fdcbda7..6f1a52e6beb2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -126,6 +126,8 @@
0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
+#define QM_REG_RLGLBLUPPERBOUND \
+ 0x2f3c00UL
#define TCFC_REG_WEAK_ENABLE_VF \
0x2d0704UL
#define TCFC_REG_STRONG_ENABLE_PF \
@@ -576,7 +578,7 @@
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
-#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
@@ -595,8 +597,8 @@
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2 0x100930UL
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
@@ -606,7 +608,10 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
-
+#define QM_REG_WFQVPUPPERBOUND \
+ 0x2fb000UL
+#define QM_REG_WFQVPCRD \
+ 0x2fc000UL
#define PGLCS_REG_DBG_SELECT_K2_E5 \
0x001d14UL
#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
@@ -1437,29 +1442,29 @@
0x1401140UL
#define XSEM_REG_SYNC_DBG_EMPTY \
0x1401160UL
-#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define XSEM_REG_SLOW_DBG_ACTIVE \
0x1401400UL
-#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define XSEM_REG_SLOW_DBG_MODE \
0x1401404UL
-#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define XSEM_REG_DBG_FRAME_MODE \
0x1401408UL
#define XSEM_REG_DBG_GPRE_VECT \
0x1401410UL
-#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define XSEM_REG_DBG_MODE1_CFG \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
0x1440000UL
#define YSEM_REG_SYNC_DBG_EMPTY \
0x1501160UL
-#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define YSEM_REG_SLOW_DBG_ACTIVE \
0x1501400UL
-#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define YSEM_REG_SLOW_DBG_MODE \
0x1501404UL
-#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define YSEM_REG_DBG_FRAME_MODE \
0x1501408UL
#define YSEM_REG_DBG_GPRE_VECT \
0x1501410UL
-#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define YSEM_REG_DBG_MODE1_CFG \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
0x1540000UL
@@ -1467,15 +1472,15 @@
0x1601140UL
#define PSEM_REG_SYNC_DBG_EMPTY \
0x1601160UL
-#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define PSEM_REG_SLOW_DBG_ACTIVE \
0x1601400UL
-#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define PSEM_REG_SLOW_DBG_MODE \
0x1601404UL
-#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define PSEM_REG_DBG_FRAME_MODE \
0x1601408UL
#define PSEM_REG_DBG_GPRE_VECT \
0x1601410UL
-#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define PSEM_REG_DBG_MODE1_CFG \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
0x1640000UL
@@ -1483,15 +1488,15 @@
0x1701140UL
#define TSEM_REG_SYNC_DBG_EMPTY \
0x1701160UL
-#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define TSEM_REG_SLOW_DBG_ACTIVE \
0x1701400UL
-#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define TSEM_REG_SLOW_DBG_MODE \
0x1701404UL
-#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define TSEM_REG_DBG_FRAME_MODE \
0x1701408UL
#define TSEM_REG_DBG_GPRE_VECT \
0x1701410UL
-#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define TSEM_REG_DBG_MODE1_CFG \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
0x1740000UL
@@ -1499,15 +1504,15 @@
0x1801140UL
#define MSEM_REG_SYNC_DBG_EMPTY \
0x1801160UL
-#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define MSEM_REG_SLOW_DBG_ACTIVE \
0x1801400UL
-#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define MSEM_REG_SLOW_DBG_MODE \
0x1801404UL
-#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define MSEM_REG_DBG_FRAME_MODE \
0x1801408UL
#define MSEM_REG_DBG_GPRE_VECT \
0x1801410UL
-#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define MSEM_REG_DBG_MODE1_CFG \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
@@ -1517,21 +1522,21 @@
20480
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
-#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define USEM_REG_SLOW_DBG_ACTIVE \
0x1901400UL
-#define USEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define USEM_REG_SLOW_DBG_MODE \
0x1901404UL
-#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define USEM_REG_DBG_FRAME_MODE \
0x1901408UL
#define USEM_REG_DBG_GPRE_VECT \
0x1901410UL
-#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define USEM_REG_DBG_MODE1_CFG \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \
0x000748UL
-#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \
+#define SEM_FAST_REG_DBG_MODSRC_DISABLE \
0x00074cUL
#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \
0x000750UL
@@ -1561,7 +1566,7 @@
0x341500UL
#define BRB_REG_BIG_RAM_DATA_SIZE \
64
-#define SEM_FAST_REG_STALL_0_BB_K2 \
+#define SEM_FAST_REG_STALL_0 \
0x000488UL
#define SEM_FAST_REG_STALLED \
0x000494UL
@@ -1619,35 +1624,35 @@
0x008c14UL
#define NWS_REG_NWS_CMU_K2 \
0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
0x0006c4UL
-#define MS_REG_MS_CMU_K2_E5 \
+#define MS_REG_MS_CMU_K2 \
0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_REG_PHY0_K2_E5 \
+#define PHY_PCIE_REG_PHY0_K2 \
0x620000UL
-#define PHY_PCIE_REG_PHY1_K2_E5 \
+#define PHY_PCIE_REG_PHY1_K2 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f16a157bb95a..071b4aeaddf2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -77,6 +77,14 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
* Beyond the added delay we clear the bitmap anyway.
*/
while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid bitmap to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return;
+
msleep(100);
if (wait_count++ > 20) {
DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
@@ -784,7 +792,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
-
/* Free ORQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
index e27dd9a4547e..7a3bd749e1e4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -6,47 +6,47 @@
#include <linux/types.h>
/**
- * @brief qed_selftest_memory - Perform memory test
+ * qed_selftest_memory(): Perform memory test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_memory(struct qed_dev *cdev);
/**
- * @brief qed_selftest_interrupt - Perform interrupt test
+ * qed_selftest_interrupt(): Perform interrupt test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_interrupt(struct qed_dev *cdev);
/**
- * @brief qed_selftest_register - Perform register test
+ * qed_selftest_register(): Perform register test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_register(struct qed_dev *cdev);
/**
- * @brief qed_selftest_clock - Perform clock test
+ * qed_selftest_clock(): Perform clock test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_clock(struct qed_dev *cdev);
/**
- * @brief qed_selftest_nvram - Perform nvram test
+ * qed_selftest_nvram(): Perform nvram test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_nvram(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 60ff3222bf55..4fb02a5579ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -23,31 +23,26 @@ enum spq_mode {
};
struct qed_spq_comp_cb {
- void (*function)(struct qed_hwfn *,
- void *,
- union event_ring_data *,
+ void (*function)(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
u8 fw_return_code);
void *cookie;
};
/**
- * @brief qed_eth_cqe_completion - handles the completion of a
- * ramrod on the cqe ring
+ * qed_eth_cqe_completion(): handles the completion of a
+ * ramrod on the cqe ring.
*
- * @param p_hwfn
- * @param cqe
+ * @p_hwfn: HW device data.
+ * @cqe: CQE.
*
- * @return int
+ * Return: Int.
*/
int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe);
-/**
- * @file
- *
- * QED Slow-hwfn queue interface
- */
-
+ /* QED Slow-hwfn queue interface */
union ramrod_data {
struct pf_start_ramrod_data pf_start;
struct pf_update_ramrod_data pf_update;
@@ -58,7 +53,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
- struct rx_update_gft_filter_data rx_update_gft;
+ struct rx_update_gft_filter_ramrod_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
@@ -207,117 +202,128 @@ struct qed_spq {
};
/**
- * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
- * Pends it to the future list.
+ * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
*
- * @param p_hwfn
- * @param p_req
+ * @p_hwfn: HW device data.
+ * @p_ent: Ent.
+ * @fw_return_code: Return code from firmware.
*
- * @return int
+ * Return: Int.
*/
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *fw_return_code);
/**
- * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_spq_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_spq_setup - Reset the SPQ to its start state.
+ * qed_spq_setup(): Reset the SPQ to its start state.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_spq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ * qed_spq_free(): Deallocates the given SPQ struct.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_spq_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_spq_get_entry - Obtain an entrry from the spq
- * free pool list.
- *
- *
+ * qed_spq_get_entry(): Obtain an entrry from the spq
+ * free pool list.
*
- * @param p_hwfn
- * @param pp_ent
+ * @p_hwfn: HW device data.
+ * @pp_ent: PP ENT.
*
- * @return int
+ * Return: Int.
*/
int
qed_spq_get_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent);
/**
- * @brief qed_spq_return_entry - Return an entry to spq free
- * pool list
+ * qed_spq_return_entry(): Return an entry to spq free pool list.
*
- * @param p_hwfn
- * @param p_ent
+ * @p_hwfn: HW device data.
+ * @p_ent: P ENT.
+ *
+ * Return: Void.
*/
void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
/**
- * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ * qed_eq_alloc(): Allocates & initializes an EQ struct.
*
- * @param p_hwfn
- * @param num_elem number of elements in the eq
+ * @p_hwfn: HW device data.
+ * @num_elem: number of elements in the eq.
*
- * @return int
+ * Return: Int.
*/
int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
/**
- * @brief qed_eq_setup - Reset the EQ to its start state.
+ * qed_eq_setup(): Reset the EQ to its start state.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_eq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_eq_free - deallocates the given EQ struct.
+ * qed_eq_free(): deallocates the given EQ struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_eq_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_eq_prod_update - update the FW with default EQ producer
+ * qed_eq_prod_update(): update the FW with default EQ producer.
+ *
+ * @p_hwfn: HW device data.
+ * @prod: Prod.
*
- * @param p_hwfn
- * @param prod
+ * Return: Void.
*/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
u16 prod);
/**
- * @brief qed_eq_completion - Completes currently pending EQ elements
+ * qed_eq_completion(): Completes currently pending EQ elements.
*
- * @param p_hwfn
- * @param cookie
+ * @p_hwfn: HW device data.
+ * @cookie: Cookie.
*
- * @return int
+ * Return: Int.
*/
int qed_eq_completion(struct qed_hwfn *p_hwfn,
void *cookie);
/**
- * @brief qed_spq_completion - Completes a single event
+ * qed_spq_completion(): Completes a single event.
*
- * @param p_hwfn
- * @param echo - echo value from cookie (used for determining completion)
- * @param p_data - data from cookie (used in callback function if applicable)
+ * @p_hwfn: HW device data.
+ * @echo: echo value from cookie (used for determining completion).
+ * @fw_return_code: FW return code.
+ * @p_data: data from cookie (used in callback function if applicable).
*
- * @return int
+ * Return: Int.
*/
int qed_spq_completion(struct qed_hwfn *p_hwfn,
__le16 echo,
@@ -325,44 +331,43 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
union event_ring_data *p_data);
/**
- * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u32 - SPQ CID
+ * Return: u32 - SPQ CID.
*/
u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_alloc - Allocates & initializes an ConsQ
- * struct
+ * qed_consq_alloc(): Allocates & initializes an ConsQ struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_consq_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_setup - Reset the ConsQ to its start state.
+ * qed_consq_setup(): Reset the ConsQ to its start state.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return Void.
*/
void qed_consq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_free - deallocates the given ConsQ struct.
+ * qed_consq_free(): deallocates the given ConsQ struct.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return Void.
*/
void qed_consq_free(struct qed_hwfn *p_hwfn);
int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
-/**
- * @file
- *
- * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
- */
+/* Slow-hwfn low-level commands (Ramrods) function definitions. */
#define QED_SP_EQ_COMPLETION 0x01
#define QED_SP_CQE_COMPLETION 0x02
@@ -377,12 +382,15 @@ struct qed_sp_init_data {
};
/**
- * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
- * Should be called on in error flows after initializing the SPQ entry
- * and before posting it.
+ * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the
+ * entry if allocated. Should be called on in error
+ * flows after initializing the SPQ entry
+ * and before posting it.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ent: Ent.
*
- * @param p_hwfn
- * @param p_ent
+ * Return: Void.
*/
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
@@ -394,7 +402,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data *p_data);
/**
- * @brief qed_sp_pf_start - PF Function Start Ramrod
+ * qed_sp_pf_start(): PF Function Start Ramrod.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_tunn: P_tunn.
+ * @allow_npar_tx_switch: Allow NPAR TX Switch.
+ *
+ * Return: Int.
*
* This ramrod is sent to initialize a physical function (PF). It will
* configure the function related parameters and write its completion to the
@@ -404,12 +419,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* allocated by the driver on host memory and its parameters are written
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_tunn
- * @param allow_npar_tx_switch
- *
- * @return int
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
@@ -418,47 +427,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
bool allow_npar_tx_switch);
/**
- * @brief qed_sp_pf_update - PF Function Update Ramrod
+ * qed_sp_pf_update(): PF Function Update Ramrod.
*
- * This ramrod updates function-related parameters. Every parameter can be
- * updated independently, according to configuration flags.
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Int.
*
- * @return int
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
*/
int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_sp_pf_update_stag - Update firmware of new outer tag
+ * qed_sp_pf_update_stag(): Update firmware of new outer tag.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_sp_pf_stop - PF Function Stop Ramrod
- *
- * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
- * sent and the last completion written to the PFs Event Ring. This ramrod also
- * deletes the context for the Slowhwfn connection on this PF.
- *
- * @note Not required for first packet.
- *
- * @param p_hwfn
- *
- * @return int
- */
-
-/**
- * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
+ * qed_sp_pf_update_ufp(): PF ufp update Ramrod.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
@@ -470,11 +465,11 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ * qed_sp_heartbeat_ramrod(): Send empty Ramrod.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index b4ed54ffef9b..648176dfb871 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -369,8 +369,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
- DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+
+ /* Place consolidation queue address in ramrod */
+ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr,
qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
+ page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain);
+ p_ramrod->consolid_q_num_pages = page_cnt;
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
@@ -401,8 +405,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
- p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
- p_ramrod->num_vfs = (u8) p_iov->total_vfs;
+ p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8)p_iov->total_vfs;
}
p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 0bc1a0aeb56e..e0473729b161 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -20,6 +20,7 @@
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_iscsi.h"
@@ -31,8 +32,8 @@
#include "qed_rdma.h"
/***************************************************************************
-* Structures & Definitions
-***************************************************************************/
+ * Structures & Definitions
+ ***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
@@ -42,8 +43,8 @@
#define SPQ_BLOCK_SLEEP_MS (5)
/***************************************************************************
-* Blocking Imp. (BLOCK/EBLOCK mode)
-***************************************************************************/
+ * Blocking Imp. (BLOCK/EBLOCK mode)
+ ***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie,
union event_ring_data *data, u8 fw_return_code)
@@ -149,8 +150,8 @@ err:
}
/***************************************************************************
-* SPQ entries inner API
-***************************************************************************/
+ * SPQ entries inner API
+ ***************************************************************************/
static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
@@ -184,12 +185,12 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* HSI access
-***************************************************************************/
+ * HSI access
+ ***************************************************************************/
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
- struct e4_core_conn_context *p_cxt;
+ struct core_conn_context *p_cxt;
struct qed_cxt_info cxt_info;
u16 physical_q;
int rc;
@@ -207,23 +208,20 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
- p_cxt->xstorm_st_context.spq_base_lo =
+ p_cxt->xstorm_st_context.spq_base_addr.lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.spq_base_hi =
+ p_cxt->xstorm_st_context.spq_base_addr.hi =
DMA_HI_LE(p_spq->chain.p_phys_addr);
-
- DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
- p_hwfn->p_consq->chain.p_phys_addr);
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
@@ -265,8 +263,8 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* Asynchronous events
-***************************************************************************/
+ * Asynchronous events
+ ***************************************************************************/
static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
@@ -311,12 +309,12 @@ qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* EQ API
-***************************************************************************/
+ * EQ API
+ ***************************************************************************/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
{
- u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+ u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_EQE_CONS, p_hwfn->rel_pf_id);
REG_WR16(p_hwfn, addr, prod);
}
@@ -433,8 +431,8 @@ void qed_eq_free(struct qed_hwfn *p_hwfn)
}
/***************************************************************************
-* CQE API - manipulate EQ functionality
-***************************************************************************/
+ * CQE API - manipulate EQ functionality
+ ***************************************************************************/
static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
@@ -464,8 +462,8 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* Slow hwfn Queue (spq)
-***************************************************************************/
+ * Slow hwfn Queue (spq)
+ ***************************************************************************/
void qed_spq_setup(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
@@ -548,7 +546,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
int ret;
/* SPQ struct */
- p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+ p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
if (!p_spq)
return -ENOMEM;
@@ -676,7 +674,6 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) {
-
if (list_empty(&p_spq->free_pool)) {
list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
p_spq->unlimited_pending_count++;
@@ -725,8 +722,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* Accessor
-***************************************************************************/
+ * Accessor
+ ***************************************************************************/
u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
{
if (!p_hwfn->p_spq)
@@ -735,8 +732,8 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
}
/***************************************************************************
-* Posting new Ramrods
-***************************************************************************/
+ * Posting new Ramrods
+ ***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
struct list_head *head, u32 keep_reserve)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ed2b6fe5a78d..8ac38828ba45 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -11,6 +11,7 @@
#include <linux/qed/qed_iov_if.h>
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -19,12 +20,13 @@
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data, u8 fw_return_code);
static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
+static u16 qed_vf_from_entity_id(__le16 entity_id)
+{
+ return le16_to_cpu(entity_id) - MAX_NUM_PFS;
+}
+
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{
u8 legacy = 0;
@@ -169,8 +171,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else
- DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
- relative_vf_id);
+ DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n",
+ __func__, relative_vf_id);
return vf;
}
@@ -308,7 +310,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
struct qed_dmae_params params;
struct qed_vf_info *p_vf;
- p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
return -EINVAL;
@@ -420,7 +422,7 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
bulletin_p = p_iov_info->bulletins_phys;
if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
DP_ERR(p_hwfn,
- "qed_iov_setup_vfdb called without allocating mem first\n");
+ "%s called without allocating mem first\n", __func__);
return;
}
@@ -464,7 +466,7 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+ "%s for %d VFs\n", __func__, num_vfs);
/* Allocate PF Mailbox buffer (per-VF) */
p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
@@ -500,10 +502,10 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
QED_MSG_IOV,
"PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
p_iov_info->mbx_msg_virt_addr,
- (u64) p_iov_info->mbx_msg_phys_addr,
+ (u64)p_iov_info->mbx_msg_phys_addr,
p_iov_info->mbx_reply_virt_addr,
- (u64) p_iov_info->mbx_reply_phys_addr,
- p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+ (u64)p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
return 0;
}
@@ -608,7 +610,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
if (rc)
return rc;
- /* We want PF IOV to be synonemous with the existance of p_iov_info;
+ /* We want PF IOV to be synonemous with the existence of p_iov_info;
* In case the capability is published but there are no VFs, simply
* de-allocate the struct.
*/
@@ -714,12 +716,12 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
int i;
/* Set VF masks and configuration - pretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
/* iterate over all queues, clear sb consumer */
for (i = 0; i < vf->num_sbs; i++)
@@ -734,7 +736,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
{
u32 igu_vf_conf;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
@@ -746,7 +748,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
}
static int
@@ -807,7 +809,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
@@ -816,7 +818,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.hw_mode);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
vf->state = VF_FREE;
@@ -904,7 +906,7 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
p_block->igu_sb_id * sizeof(u64), 2, NULL);
}
- vf->num_sbs = (u8) num_rx_queues;
+ vf->num_sbs = (u8)num_rx_queues;
return vf->num_sbs;
}
@@ -988,7 +990,7 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
- DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL;
}
@@ -1092,7 +1094,7 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) {
- DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL;
}
@@ -1220,8 +1222,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
* channel would be re-set to ready prior to that.
*/
REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1);
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
mbx->req_virt->first_tlv.reply_address,
@@ -1545,7 +1547,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
memset(resp, 0, sizeof(*resp));
/* Write the PF version so that VF would know which version
- * is supported - might be later overriden. This guarantees that
+ * is supported - might be later overridden. This guarantees that
* VF could recognize legacy PF based on lack of versions in reply.
*/
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
@@ -1603,7 +1605,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->cdev->chip_num;
pfdev_info->db_size = 0;
- pfdev_info->indices_per_sb = PIS_PER_SB_E4;
+ pfdev_info->indices_per_sb = PIS_PER_SB;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -1897,7 +1899,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
int sb_id;
int rc;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Failed to get VF info, invalid vfid [%d]\n",
@@ -1957,7 +1959,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
rc = qed_sp_eth_vport_start(p_hwfn, &params);
if (rc) {
DP_ERR(p_hwfn,
- "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+ "%s returned error %d\n", __func__, rc);
status = PFVF_STATUS_FAILURE;
} else {
vf->vport_instance++;
@@ -1993,8 +1995,8 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc) {
- DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
- rc);
+ DP_ERR(p_hwfn, "%s returned error %d\n",
+ __func__, rc);
status = PFVF_STATUS_FAILURE;
}
@@ -2138,10 +2140,10 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
* calculate on their own and clean the producer prior to this.
*/
if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
- REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
- 0);
+ qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY +
+ SEM_FAST_REG_INT_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
+ req->rx_qid), 0);
rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
req->bd_max_bytes,
@@ -3030,7 +3032,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
goto out;
}
p_rss_params = vzalloc(sizeof(*p_rss_params));
- if (p_rss_params == NULL) {
+ if (!p_rss_params) {
status = PFVF_STATUS_FAILURE;
goto out;
}
@@ -3550,6 +3552,7 @@ out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
sizeof(struct pfvf_def_resp_tlv), status);
}
+
static int
qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
@@ -3557,7 +3560,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
int cnt;
u32 val;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
for (cnt = 0; cnt < 50; cnt++) {
val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
@@ -3565,7 +3568,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
break;
msleep(20);
}
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
if (cnt == 50) {
DP_ERR(p_hwfn,
@@ -3577,48 +3580,73 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
return 0;
}
+#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
+
static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
- int i, cnt;
+ u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
+ u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
+ u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
+ u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
+ u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
+ u8 port_id, tc, tc_id = 0, voq = 0;
+ int cnt;
- /* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
- u32 prod;
+ memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32));
+ memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32));
- cons[i] = qed_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
- prod = qed_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
- i * 0x40);
- distance[i] = prod - cons[i];
+ /* Read initial consumers & producers */
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC;
+ voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
+ cons[voq] = qed_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
+ prod = qed_rd(p_hwfn, p_ptt,
+ prod_voq0_addr + voq * 0x40);
+ distance[voq] = prod - cons[voq];
+ }
}
/* Wait for consumers to pass the producers */
- i = 0;
+ port_id = 0;
+ tc = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS_E4; i++) {
- u32 tmp;
+ for (; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ?
+ tc : PURE_LB_TC;
+ voq = VOQ(port_id,
+ tc_id, max_phys_tcs_per_port);
+ tmp = qed_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
+ if (distance[voq] > tmp - cons[voq])
+ break;
+ }
- tmp = qed_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
- if (distance[i] > tmp - cons[i])
+ if (tc == max_phys_tcs_per_port + 1)
+ tc = 0;
+ else
break;
}
- if (i == MAX_NUM_VOQS_E4)
+ if (port_id == max_ports_per_engine)
break;
msleep(20);
}
if (cnt == 50) {
- DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
- p_vf->abs_vf_id, i);
+ DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n",
+ p_vf->abs_vf_id, (int)voq);
+
+ DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n",
+ (int)voq, (int)port_id, (int)tc_id);
+
return -EBUSY;
}
@@ -3680,8 +3708,8 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
* doesn't do that as a part of FLR.
*/
REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_VF_PF_CHANNEL_READY, vfid), 1);
/* VF_STOPPED has to be set only after final cleanup
* but prior to re-enabling the VF.
@@ -3842,7 +3870,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx;
struct qed_vf_info *p_vf;
- p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
return;
@@ -3979,7 +4007,7 @@ static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
u16 abs_vfid)
{
- u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn,
@@ -3989,7 +4017,7 @@ static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
return NULL;
}
- return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
}
static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
@@ -4013,13 +4041,13 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
- struct malicious_vf_eqe_data *p_data)
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data)
{
struct qed_vf_info *p_vf;
- p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
-
+ p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id
+ (p_data->entity_id));
if (!p_vf)
return;
@@ -4036,16 +4064,13 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
}
}
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
- union event_ring_data *data, u8 fw_return_code)
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+ union event_ring_data *data, u8 fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
&data->vf_pf_channel.msg_addr);
- case COMMON_EVENT_MALICIOUS_VF:
- qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
- return 0;
default:
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
opcode);
@@ -4075,7 +4100,7 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
struct qed_dmae_params params;
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return -EINVAL;
@@ -4176,7 +4201,7 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf_info;
u64 feature;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Can not set forced MAC, invalid vfid [%d]\n", vfid);
@@ -4226,7 +4251,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
- p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info)
return false;
@@ -4237,7 +4262,7 @@ static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
- p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info)
return true;
@@ -4248,7 +4273,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return false;
@@ -4266,7 +4291,7 @@ static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
goto out;
}
- vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf)
goto out;
@@ -4345,7 +4370,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
return rc;
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
- return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
+ return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val,
+ QM_RL_TYPE_NORMAL);
}
static int
@@ -4376,7 +4402,7 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
struct qed_wfq_data *vf_vp_wfq;
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return 0;
@@ -4395,8 +4421,10 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
*/
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
{
+ /* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(flag, &hwfn->iov_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
@@ -4407,8 +4435,8 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev)
int i;
for_each_hwfn(cdev, i)
- queue_delayed_work(cdev->hwfns[i].iov_wq,
- &cdev->hwfns[i].iov_task, 0);
+ queue_delayed_work(cdev->hwfns[i].iov_wq,
+ &cdev->hwfns[i].iov_task, 0);
}
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
@@ -4416,8 +4444,8 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
int i, j;
for_each_hwfn(cdev, i)
- if (cdev->hwfns[i].iov_wq)
- flush_workqueue(cdev->hwfns[i].iov_wq);
+ if (cdev->hwfns[i].iov_wq)
+ flush_workqueue(cdev->hwfns[i].iov_wq);
/* Mark VFs for disablement */
qed_iov_set_vfs_to_disable(cdev, true);
@@ -5010,7 +5038,7 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
}
qed_for_each_vf(hwfn, i)
- qed_iov_post_vf_bulletin(hwfn, i, ptt);
+ qed_iov_post_vf_bulletin(hwfn, i, ptt);
qed_ptt_release(hwfn, ptt);
}
@@ -5196,7 +5224,6 @@ void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
}
- flush_workqueue(cdev->hwfns[i].iov_wq);
destroy_workqueue(cdev->hwfns[i].iov_wq);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index eacd6457f195..f448e3dd6c8b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -142,7 +142,7 @@ struct qed_vf_queue {
enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
- VF_ACQUIRED, /* VF, acquired, but not initalized */
+ VF_ACQUIRED, /* VF, acquired, but not initialized */
VF_ENABLED, /* VF, Enabled */
VF_RESET, /* VF, FLR'd, pending cleanup */
VF_STOPPED /* VF, Stopped */
@@ -250,29 +250,31 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#ifdef CONFIG_QED_SRIOV
/**
- * @brief Check if given VF ID @vfid is valid
- * w.r.t. @b_enabled_only value
- * if b_enabled_only = true - only enabled VF id is valid
- * else any VF id less than max_vfs is valid
+ * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled
+ * VF id is valid.
+ * else any VF id less than max_vfs is valid.
*
- * @param p_hwfn
- * @param rel_vf_id - Relative VF ID
- * @param b_enabled_only - consider only enabled VF
- * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ * @p_hwfn: HW device data.
+ * @rel_vf_id: Relative VF ID.
+ * @b_enabled_only: consider only enabled VF.
+ * @b_non_malicious: true iff we want to validate vf isn't malicious.
*
- * @return bool - true for valid VF ID
+ * Return: bool - true for valid VF ID
*/
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
int rel_vf_id,
bool b_enabled_only, bool b_non_malicious);
/**
- * @brief - Given a VF index, return index of next [including that] active VF.
+ * qed_iov_get_next_active_vf(): Given a VF index, return index of
+ * next [including that] active VF.
*
- * @param p_hwfn
- * @param rel_vf_id
+ * @p_hwfn: HW device data.
+ * @rel_vf_id: VF ID.
*
- * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ * Return: MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
@@ -280,83 +282,117 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
int vfid, u16 vxlan_port, u16 geneve_port);
/**
- * @brief Read sriov related information and allocated resources
- * reads from configuration space, shmem, etc.
+ * qed_iov_hw_info(): Read sriov related information and allocated resources
+ * reads from configuration space, shmem, etc.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
+ * qed_add_tlv(): place a given tlv on the tlv buffer at next offset
*
- * @param p_hwfn
- * @param p_iov
- * @param type
- * @param length
+ * @p_hwfn: HW device data.
+ * @offset: offset.
+ * @type: Type
+ * @length: Length.
*
- * @return pointer to the newly placed tlv
+ * Return: pointer to the newly placed tlv
*/
void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
/**
- * @brief list the types and lengths of the tlvs on the buffer
+ * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer
*
- * @param p_hwfn
- * @param tlvs_list
+ * @p_hwfn: HW device data.
+ * @tlvs_list: Tlvs_list.
+ *
+ * Return: Void.
*/
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
/**
- * @brief qed_iov_alloc - allocate sriov related resources
+ * qed_sriov_vfpf_malicious(): Handle malicious VF/PF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_data: Pointer to data.
+ *
+ * Return: Void.
+ */
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data);
+
+/**
+ * qed_sriov_eqe_event(): Callback for SRIOV events.
+ *
+ * @p_hwfn: HW device data.
+ * @opcode: Opcode.
+ * @echo: Echo.
+ * @data: data
+ * @fw_return_code: FW return code.
+ *
+ * Return: Int.
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+ union event_ring_data *data, u8 fw_return_code);
+
+/**
+ * qed_iov_alloc(): allocate sriov related resources
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_iov_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_iov_setup - setup sriov related resources
+ * qed_iov_setup(): setup sriov related resources
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_iov_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_iov_free - free sriov related resources
+ * qed_iov_free(): free sriov related resources
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_iov_free(struct qed_hwfn *p_hwfn);
/**
- * @brief free sriov related memory that was allocated during hw_prepare
+ * qed_iov_free_hw_info(): free sriov related memory that was
+ * allocated during hw_prepare
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
- * @brief Mark structs of vfs that have been FLR-ed.
+ * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed.
*
- * @param p_hwfn
- * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ * @p_hwfn: HW device data.
+ * @disabled_vfs: bitmask of all VFs on path that were FLRed
*
- * @return true iff one of the PF's vfs got FLRed. false otherwise.
+ * Return: true iff one of the PF's vfs got FLRed. false otherwise.
*/
bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
- * @brief Search extended TLVs in request/reply buffer.
+ * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer.
*
- * @param p_hwfn
- * @param p_tlvs_list - Pointer to tlvs list
- * @param req_type - Type of TLV
+ * @p_hwfn: HW device data.
+ * @p_tlvs_list: Pointer to tlvs list
+ * @req_type: Type of TLV
*
- * @return pointer to tlv type if found, otherwise returns NULL.
+ * Return: pointer to tlv type if found, otherwise returns NULL.
*/
void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
@@ -442,6 +478,18 @@ static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
}
+
+static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode,
+ __le16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ return 0;
+}
#endif
#define qed_for_each_vf(_p_hwfn, _i) \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 72a38d53d33f..597cd9cd57b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -27,7 +27,7 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
"preparing to send 0x%04x tlv over vf pf channel\n",
type);
- /* Reset Requst offset */
+ /* Reset Request offset */
p_iov->offset = (u8 *)p_iov->vf2pf_request;
/* Clear mailbox - both request and reply */
@@ -444,7 +444,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
u32 reg;
int rc;
- /* Set number of hwfns - might be overriden once leading hwfn learns
+ /* Set number of hwfns - might be overridden once leading hwfn learns
* actual configuration from PF.
*/
if (IS_LEAD_HWFN(p_hwfn))
@@ -504,7 +504,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
QED_MSG_IOV,
"VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
p_iov->vf2pf_request,
- (u64) p_iov->vf2pf_request_phys,
+ (u64)p_iov->vf2pf_request_phys,
p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
/* Allocate Bulletin board */
@@ -561,6 +561,7 @@ free_p_iov:
return -ENOMEM;
}
+
#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
@@ -1285,8 +1286,8 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
- req->opcode = (u8) p_ucast->opcode;
- req->type = (u8) p_ucast->type;
+ req->opcode = (u8)p_ucast->opcode;
+ req->type = (u8)p_ucast->type;
memcpy(req->mac, p_ucast->mac, ETH_ALEN);
req->vlan = p_ucast->vlan;
@@ -1372,7 +1373,7 @@ exit:
int
qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
- u8 *p_mac)
+ const u8 *p_mac)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_bulletin_update_mac_tlv *p_req;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 60d2bb64e65f..306b5f4bc632 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -48,7 +48,7 @@ struct channel_tlv {
u16 length;
};
-/* header of first vf->pf tlv carries the offset used to calculate reponse
+/* header of first vf->pf tlv carries the offset used to calculate response
* buffer address
*/
struct vfpf_first_tlv {
@@ -85,8 +85,8 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
-#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
-#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI BIT(0) /* VF pre-FP hsi version */
+#define VFPF_ACQUIRE_CAP_100G BIT(1) /* VF can support 100g */
/* A requirement for supporting multi-Tx queues on a single queue-zone,
* VF would pass qids as additional information whenever passing queue
* references.
@@ -688,13 +688,16 @@ struct qed_vf_iov {
};
/**
- * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
- * Coalesce value '0' will omit the configuration.
+ * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue.
+ * Coalesce value '0' will omit the
+ * configuration.
*
- * @param p_hwfn
- * @param rx_coal - coalesce value in micro second for rx queue
- * @param tx_coal - coalesce value in micro second for tx queue
- * @param p_cid - queue cid
+ * @p_hwfn: HW device data.
+ * @rx_coal: coalesce value in micro second for rx queue.
+ * @tx_coal: coalesce value in micro second for tx queue.
+ * @p_cid: queue cid.
+ *
+ * Return: Int.
*
**/
int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
@@ -702,148 +705,172 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 tx_coal, struct qed_queue_cid *p_cid);
/**
- * @brief VF - Get coalesce per VF's relative queue.
+ * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue.
*
- * @param p_hwfn
- * @param p_coal - coalesce value in micro second for VF queues.
- * @param p_cid - queue cid
+ * @p_hwfn: HW device data.
+ * @p_coal: coalesce value in micro second for VF queues.
+ * @p_cid: queue cid.
*
+ * Return: Int.
**/
int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
u16 *p_coal, struct qed_queue_cid *p_cid);
#ifdef CONFIG_QED_SRIOV
/**
- * @brief Read the VF bulletin and act on it if needed
+ * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed.
*
- * @param p_hwfn
- * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
+ * @p_hwfn: HW device data.
+ * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise.
*
- * @return enum _qed_status
+ * Return: enum _qed_status.
*/
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
/**
- * @brief Get link paramters for VF from qed
+ * qed_vf_get_link_params(): Get link parameters for VF from qed
+ *
+ * @p_hwfn: HW device data.
+ * @params: the link params structure to be filled for the VF.
*
- * @param p_hwfn
- * @param params - the link params structure to be filled for the VF
+ * Return: Void.
*/
void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params);
/**
- * @brief Get link state for VF from qed
+ * qed_vf_get_link_state(): Get link state for VF from qed.
+ *
+ * @p_hwfn: HW device data.
+ * @link: the link state structure to be filled for the VF
*
- * @param p_hwfn
- * @param link - the link state structure to be filled for the VF
+ * Return: Void.
*/
void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *link);
/**
- * @brief Get link capabilities for VF from qed
+ * qed_vf_get_link_caps(): Get link capabilities for VF from qed.
*
- * @param p_hwfn
- * @param p_link_caps - the link capabilities structure to be filled for the VF
+ * @p_hwfn: HW device data.
+ * @p_link_caps: the link capabilities structure to be filled for the VF
+ *
+ * Return: Void.
*/
void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps);
/**
- * @brief Get number of Rx queues allocated for VF by qed
+ * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed
+ *
+ * @p_hwfn: HW device data.
+ * @num_rxqs: allocated RX queues
*
- * @param p_hwfn
- * @param num_rxqs - allocated RX queues
+ * Return: Void.
*/
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/**
- * @brief Get number of Rx queues allocated for VF by qed
+ * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed
*
- * @param p_hwfn
- * @param num_txqs - allocated RX queues
+ * @p_hwfn: HW device data.
+ * @num_txqs: allocated RX queues
+ *
+ * Return: Void.
*/
void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
/**
- * @brief Get number of available connections [both Rx and Tx] for VF
+ * qed_vf_get_num_cids(): Get number of available connections
+ * [both Rx and Tx] for VF
+ *
+ * @p_hwfn: HW device data.
+ * @num_cids: allocated number of connections
*
- * @param p_hwfn
- * @param num_cids - allocated number of connections
+ * Return: Void.
*/
void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
/**
- * @brief Get port mac address for VF
+ * qed_vf_get_port_mac(): Get port mac address for VF.
*
- * @param p_hwfn
- * @param port_mac - destination location for port mac
+ * @p_hwfn: HW device data.
+ * @port_mac: destination location for port mac
+ *
+ * Return: Void.
*/
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
/**
- * @brief Get number of VLAN filters allocated for VF by qed
+ * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated
+ * for VF by qed.
+ *
+ * @p_hwfn: HW device data.
+ * @num_vlan_filters: allocated VLAN filters
*
- * @param p_hwfn
- * @param num_rxqs - allocated VLAN filters
+ * Return: Void.
*/
void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters);
/**
- * @brief Get number of MAC filters allocated for VF by qed
+ * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated
+ * for VF by qed
*
- * @param p_hwfn
- * @param num_rxqs - allocated MAC filters
+ * @p_hwfn: HW device data.
+ * @num_mac_filters: allocated MAC filters
+ *
+ * Return: Void.
*/
void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
/**
- * @brief Check if VF can set a MAC address
+ * qed_vf_check_mac(): Check if VF can set a MAC address
*
- * @param p_hwfn
- * @param mac
+ * @p_hwfn: HW device data.
+ * @mac: Mac.
*
- * @return bool
+ * Return: bool.
*/
bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
/**
- * @brief Set firmware version information in dev_info from VFs acquire response tlv
+ * qed_vf_get_fw_version(): Set firmware version information
+ * in dev_info from VFs acquire response tlv
+ *
+ * @p_hwfn: HW device data.
+ * @fw_major: FW major.
+ * @fw_minor: FW minor.
+ * @fw_rev: FW rev.
+ * @fw_eng: FW eng.
*
- * @param p_hwfn
- * @param fw_major
- * @param fw_minor
- * @param fw_rev
- * @param fw_eng
+ * Return: Void.
*/
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng);
/**
- * @brief hw preparation for VF
- * sends ACQUIRE message
+ * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
/**
- * @brief VF - start the RX Queue by sending a message to the PF
- * @param p_hwfn
- * @param p_cid - Only relative fields are relevant
- * @param bd_max_bytes - maximum number of bytes per bd
- * @param bd_chain_phys_addr - physical address of bd chain
- * @param cqe_pbl_addr - physical address of pbl
- * @param cqe_pbl_size - pbl size
- * @param pp_prod - pointer to the producer to be
- * used in fastpath
+ * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF
+ *
+ * @p_hwfn: HW device data.
+ * @p_cid: Only relative fields are relevant
+ * @bd_max_bytes: maximum number of bytes per bd
+ * @bd_chain_phys_addr: physical address of bd chain
+ * @cqe_pbl_addr: physical address of pbl
+ * @cqe_pbl_size: pbl size
+ * @pp_prod: pointer to the producer to be used in fastpath
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid,
@@ -853,18 +880,16 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u16 cqe_pbl_size, void __iomem **pp_prod);
/**
- * @brief VF - start the TX queue by sending a message to the
- * PF.
+ * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the
+ * PF.
*
- * @param p_hwfn
- * @param tx_queue_id - zero based within the VF
- * @param sb - status block for this queue
- * @param sb_index - index within the status block
- * @param bd_chain_phys_addr - physical address of tx chain
- * @param pp_doorbell - pointer to address to which to
- * write the doorbell too..
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
+ * @pbl_addr: PBL address.
+ * @pbl_size: PBL Size.
+ * @pp_doorbell: pointer to address to which to write the doorbell too.
*
- * @return int
+ * Return: Int.
*/
int
qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
@@ -873,90 +898,91 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 pbl_size, void __iomem **pp_doorbell);
/**
- * @brief VF - stop the RX queue by sending a message to the PF
+ * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF.
*
- * @param p_hwfn
- * @param p_cid
- * @param cqe_completion
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
+ * @cqe_completion: CQE Completion.
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid, bool cqe_completion);
/**
- * @brief VF - stop the TX queue by sending a message to the PF
+ * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF.
*
- * @param p_hwfn
- * @param tx_qid
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
/**
- * @brief VF - send a vport update command
+ * qed_vf_pf_vport_update(): VF - send a vport update command.
*
- * @param p_hwfn
- * @param params
+ * @p_hwfn: HW device data.
+ * @p_params: Params
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params);
/**
+ * qed_vf_pf_reset(): VF - send a close message to PF.
*
- * @brief VF - send a close message to PF
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
- *
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
/**
- * @brief VF - free vf`s memories
+ * qed_vf_pf_release(): VF - free vf`s memories.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given
* sb_id. For VFs igu sbs don't have to be contiguous
*
- * @param p_hwfn
- * @param sb_id
+ * @p_hwfn: HW device data.
+ * @sb_id: SB ID.
*
- * @return INLINE u16
+ * Return: INLINE u16
*/
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
- * @brief Stores [or removes] a configured sb_info.
+ * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info.
+ *
+ * @p_hwfn: HW device data.
+ * @sb_id: zero-based SB index [for fastpath]
+ * @p_sb: may be NULL [during removal].
*
- * @param p_hwfn
- * @param sb_id - zero-based SB index [for fastpath]
- * @param sb_info - may be NULL [during removal].
+ * Return: Void.
*/
void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
u16 sb_id, struct qed_sb_info *p_sb);
/**
- * @brief qed_vf_pf_vport_start - perform vport start for VF.
+ * qed_vf_pf_vport_start(): perform vport start for VF.
*
- * @param p_hwfn
- * @param vport_id
- * @param mtu
- * @param inner_vlan_removal
- * @param tpa_mode
- * @param max_buffers_per_cqe,
- * @param only_untagged - default behavior regarding vlan acceptance
+ * @p_hwfn: HW device data.
+ * @vport_id: Vport ID.
+ * @mtu: MTU.
+ * @inner_vlan_removal: Innter VLAN removal.
+ * @tpa_mode: TPA mode
+ * @max_buffers_per_cqe: Max buffer pre CQE.
+ * @only_untagged: default behavior regarding vlan acceptance
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
@@ -966,11 +992,11 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 max_buffers_per_cqe, u8 only_untagged);
/**
- * @brief qed_vf_pf_vport_stop - stop the VF's vport
+ * qed_vf_pf_vport_stop(): stop the VF's vport
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
@@ -981,42 +1007,49 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd);
/**
- * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
+ * qed_vf_pf_int_cleanup(): clean the SB of the VF
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link params in a given bulletin board
+ * __qed_vf_get_link_params(): return the link params in a given bulletin board
*
- * @param p_hwfn
- * @param p_params - pointer to a struct to fill with link params
- * @param p_bulletin
+ * @p_hwfn: HW device data.
+ * @p_params: pointer to a struct to fill with link params
+ * @p_bulletin: Bulletin.
+ *
+ * Return: Void.
*/
void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *p_params,
struct qed_bulletin_content *p_bulletin);
/**
- * @brief - return the link state in a given bulletin board
+ * __qed_vf_get_link_state(): return the link state in a given bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_link: pointer to a struct to fill with link state
+ * @p_bulletin: Bulletin.
*
- * @param p_hwfn
- * @param p_link - pointer to a struct to fill with link state
- * @param p_bulletin
+ * Return: Void.
*/
void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
struct qed_bulletin_content *p_bulletin);
/**
- * @brief - return the link capabilities in a given bulletin board
+ * __qed_vf_get_link_caps(): return the link capabilities in a given
+ * bulletin board
*
- * @param p_hwfn
- * @param p_link - pointer to a struct to fill with link capabilities
- * @param p_bulletin
+ * @p_hwfn: HW device data.
+ * @p_link_caps: pointer to a struct to fill with link capabilities
+ * @p_bulletin: Bulletin.
+ *
+ * Return: Void.
*/
void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps,
@@ -1029,11 +1062,15 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
/**
- * @brief - Ask PF to update the MAC address in it's bulletin board
+ * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in
+ * it's bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_mac: mac address to be updated in bulletin board
*
- * @param p_mac - mac address to be updated in bulletin board
+ * Return: Int.
*/
-int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
+int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
@@ -1222,7 +1259,7 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
}
static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
- u8 *p_mac)
+ const u8 *p_mac)
{
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index f99b085b56a5..3010833ddde3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -557,7 +557,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
return;
}
- ether_addr_copy(edev->ndev->dev_addr, mac);
+ eth_hw_addr_set(edev->ndev, mac);
__qede_unlock(edev);
}
@@ -617,7 +617,7 @@ void qede_fill_rss_params(struct qede_dev *edev,
static int qede_set_ucast_rx_mac(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
- unsigned char mac[ETH_ALEN])
+ const unsigned char mac[ETH_ALEN])
{
struct qed_filter_ucast_params ucast;
@@ -1101,7 +1101,7 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
goto out;
}
- ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(ndev, addr->sa_data);
DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
if (edev->state != QEDE_STATE_OPEN) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9837bdb89cd4..06c6a5813606 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -836,7 +836,7 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
/* Set network device HW mac */
- ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+ eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
ndev->mtu = edev->dev_info.common.mtu;
}
@@ -1176,19 +1176,17 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->devlink = qed_ops->common->devlink_register(cdev);
if (IS_ERR(edev->devlink)) {
DP_NOTICE(edev, "Cannot register devlink\n");
+ rc = PTR_ERR(edev->devlink);
edev->devlink = NULL;
- /* Go on, we can live without devlink */
+ goto err3;
}
} else {
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qed_devlink *qdl;
edev = netdev_priv(ndev);
-
- if (edev->devlink) {
- struct qed_devlink *qdl = devlink_priv(edev->devlink);
-
- qdl->cdev = cdev;
- }
+ qdl = devlink_priv(edev->devlink);
+ qdl->cdev = cdev;
edev->cdev = cdev;
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
@@ -1397,7 +1395,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
static int qede_alloc_mem_sb(struct qede_dev *edev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
@@ -2802,10 +2800,13 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
}
/**
- * qede_io_error_detected - called when PCI error is detected
+ * qede_io_error_detected(): Called when PCI error is detected
+ *
* @pdev: Pointer to PCI device
* @state: The current pci connection state
*
+ *Return: pci_ers_result_t.
+ *
* This function is called after a PCI bus error affecting
* this device has been detected.
*/
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index c00ad57575ea..1e6d72adfe43 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -508,10 +508,12 @@ static void eeprom_readword(struct ql3_adapter *qdev,
static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
{
- __le16 *p = (__le16 *)ndev->dev_addr;
- p[0] = cpu_to_le16(addr[0]);
- p[1] = cpu_to_le16(addr[1]);
- p[2] = cpu_to_le16(addr[2]);
+ __le16 buf[ETH_ALEN / 2];
+
+ buf[0] = cpu_to_le16(addr[0]);
+ buf[1] = cpu_to_le16(addr[1]);
+ buf[2] = cpu_to_le16(addr[2]);
+ eth_hw_addr_set(ndev, (u8 *)buf);
}
static int ql_get_nvram_params(struct ql3_adapter *qdev)
@@ -3564,7 +3566,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
/* Program lower 32 bits of the MAC address */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 75960a29f80e..ed84f0f97623 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -304,7 +304,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
if (ret)
return ret;
- memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, mac_addr);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
@@ -356,7 +356,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
qlcnic_delete_adapter_mac(adapter);
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
qlcnic_set_multi(adapter->netdev);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 87b8c032195d..06104d2ff5b3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -420,7 +420,7 @@ static void emac_mac_dma_config(struct emac_adapter *adpt)
}
/* set MAC address */
-static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr)
+static void emac_set_mac_address(struct emac_adapter *adpt, const u8 *addr)
{
u32 sta;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 9015a38eaced..a55c52696d49 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -545,13 +545,10 @@ static int emac_probe_resources(struct platform_device *pdev,
struct emac_adapter *adpt)
{
struct net_device *netdev = adpt->netdev;
- char maddr[ETH_ALEN];
int ret = 0;
/* get mac address */
- if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN))
- ether_addr_copy(netdev->dev_addr, maddr);
- else
+ if (device_get_ethdev_address(&pdev->dev, netdev))
eth_hw_addr_random(netdev);
/* Core 0 interrupt */
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 8427fe1b8fd1..955cce644392 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -968,7 +968,7 @@ qca_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, qcaspi_devs);
- ret = of_get_mac_address(spi->dev.of_node, qca->net_dev->dev_addr);
+ ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
dev_info(&spi->dev, "Using random MAC address: %pM\n",
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index ce3f7ce31adc..27c4f43176aa 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -347,7 +347,7 @@ static int qca_uart_probe(struct serdev_device *serdev)
of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
- ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr);
+ ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
dev_info(&serdev->dev, "Using random MAC address: %pM\n",
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 13d8eb43a485..1b2119b1d48a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -224,7 +224,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
- eth_random_addr(rmnet_dev->dev_addr);
+ eth_hw_addr_random(rmnet_dev);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 01ef5efd7bc2..a6bf7d505178 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -453,7 +453,7 @@ static void r6040_down(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 *adrp;
+ const u16 *adrp;
/* Stop MAC */
iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
@@ -462,7 +462,7 @@ static void r6040_down(struct net_device *dev)
r6040_reset_mac(lp);
/* Restore MAC Address to MIDx */
- adrp = (u16 *) dev->dev_addr;
+ adrp = (const u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
@@ -731,13 +731,13 @@ static void r6040_mac_address(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 *adrp;
+ const u16 *adrp;
/* Reset MAC */
r6040_reset_mac(lp);
/* Restore MAC Address */
- adrp = (u16 *) dev->dev_addr;
+ adrp = (const u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
@@ -849,13 +849,13 @@ static void r6040_multicast_list(struct net_device *dev)
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
- u16 *adrp;
+ const u16 *adrp;
u16 hash_table[4] = { 0 };
spin_lock_irqsave(&lp->lock, flags);
/* Keep our MAC Address */
- adrp = (u16 *)dev->dev_addr;
+ adrp = (const u16 *)dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
@@ -1031,8 +1031,8 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *ioaddr;
int err, io_size = R6040_IO_SIZE;
static int card_idx = -1;
+ u16 addr[ETH_ALEN / 2];
int bar = 0;
- u16 *adrp;
pr_info("%s\n", version);
@@ -1102,14 +1102,14 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set MAC address */
card_idx++;
- adrp = (u16 *)dev->dev_addr;
- adrp[0] = ioread16(ioaddr + MID_0L);
- adrp[1] = ioread16(ioaddr + MID_0M);
- adrp[2] = ioread16(ioaddr + MID_0H);
+ addr[0] = ioread16(ioaddr + MID_0L);
+ addr[1] = ioread16(ioaddr + MID_0M);
+ addr[2] = ioread16(ioaddr + MID_0H);
+ eth_hw_addr_set(dev, (u8 *)addr);
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
- if (!(adrp[0] || adrp[1] || adrp[2])) {
+ if (!(addr[0] || addr[1] || addr[2])) {
netdev_warn(dev, "MAC address not initialized, "
"generating random\n");
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 2b84b4565e64..4f39f843bb3a 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1624,7 +1624,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&cp->lock);
@@ -1889,6 +1889,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *regs;
resource_size_t pciaddr;
unsigned int addr_len, i, pci_using_dac;
+ __le16 addr[ETH_ALEN / 2];
pr_info_once("%s", version);
@@ -1979,8 +1980,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
/* read MAC address from EEPROM */
addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
- ((__le16 *) (dev->dev_addr))[i] =
- cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
+ addr[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
+ eth_hw_addr_set(dev, (u8 *)addr);
dev->netdev_ops = &cp_netdev_ops;
netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 2e6923cc653e..15b40fd93cd2 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -945,6 +945,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
{
struct net_device *dev = NULL;
struct rtl8139_private *tp;
+ __le16 addr[ETH_ALEN / 2];
int i, addr_len, option;
void __iomem *ioaddr;
static int board_idx = -1;
@@ -994,8 +995,8 @@ static int rtl8139_init_one(struct pci_dev *pdev,
addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
- ((__le16 *) (dev->dev_addr))[i] =
- cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
+ addr[i] = cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
+ eth_hw_addr_set(dev, (u8 *)addr);
/* The Rtl8139-specific entries in the device structure. */
dev->netdev_ops = &rtl8139_netdev_ops;
@@ -2238,7 +2239,7 @@ static int rtl8139_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&tp->lock);
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index b6c849b258a0..6cbcb3164367 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -368,6 +368,7 @@ static int __init atp_probe1(long ioaddr)
static void __init get_node_ID(struct net_device *dev)
{
long ioaddr = dev->base_addr;
+ __be16 addr[ETH_ALEN / 2];
int sa_offset = 0;
int i;
@@ -379,8 +380,9 @@ static void __init get_node_ID(struct net_device *dev)
sa_offset = 15;
for (i = 0; i < 3; i++)
- ((__be16 *)dev->dev_addr)[i] =
+ addr[i] =
cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
+ eth_hw_addr_set(dev, (u8 *)addr);
write_reg(ioaddr, CMR2, CMR2_NULL);
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0199914440ab..ee6c9c842012 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5217,7 +5217,7 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp)
static void rtl_init_mac_address(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
- u8 *mac_addr = dev->dev_addr;
+ u8 mac_addr[ETH_ALEN];
int rc;
rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
@@ -5235,6 +5235,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
eth_hw_addr_random(dev);
dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
done:
+ eth_hw_addr_set(dev, mac_addr);
rtl_rar_set(tp, mac_addr);
}
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 47c5377e4f42..08062d73df10 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -81,6 +81,7 @@ enum ravb_reg {
RQC3 = 0x00A0,
RQC4 = 0x00A4,
RPC = 0x00B0,
+ RTC = 0x00B4, /* R-Car Gen3 and RZ/G2L only */
UFCW = 0x00BC,
UFCS = 0x00C0,
UFCV0 = 0x00C4,
@@ -187,19 +188,23 @@ enum ravb_reg {
PIR = 0x0520,
PSR = 0x0528,
PIPR = 0x052c,
+ CXR31 = 0x0530, /* RZ/G2L only */
MPR = 0x0558,
PFTCR = 0x055c,
PFRCR = 0x0560,
GECMR = 0x05b0,
MAHR = 0x05c0,
MALR = 0x05c8,
- TROCR = 0x0700, /* R-Car Gen3 only */
+ TROCR = 0x0700, /* R-Car Gen3 and RZ/G2L only */
+ CXR41 = 0x0708, /* RZ/G2L only */
+ CXR42 = 0x0710, /* RZ/G2L only */
CEFCR = 0x0740,
FRECR = 0x0748,
TSFRCR = 0x0750,
TLFRCR = 0x0758,
RFCR = 0x0760,
MAFCR = 0x0778,
+ CSR0 = 0x0800, /* RZ/G2L only */
};
@@ -810,10 +815,11 @@ enum ECMR_BIT {
ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */
ECMR_RXF = 0x00020000,
ECMR_PFR = 0x00040000,
- ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 only */
+ ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 and RZ/G2L */
ECMR_RZPF = 0x00100000,
ECMR_DPAD = 0x00200000,
ECMR_RCSC = 0x00800000,
+ ECMR_RCPT = 0x02000000, /* Documented for RZ/G2L only */
ECMR_TRCCM = 0x04000000,
};
@@ -823,6 +829,7 @@ enum ECSR_BIT {
ECSR_MPD = 0x00000002,
ECSR_LCHNG = 0x00000004,
ECSR_PHYI = 0x00000008,
+ ECSR_PFRI = 0x00000010, /* Documented for R-Car Gen3 and RZ/G2L */
};
/* ECSIPR */
@@ -857,9 +864,13 @@ enum MPR_BIT {
/* GECMR */
enum GECMR_BIT {
- GECMR_SPEED = 0x00000001,
- GECMR_SPEED_100 = 0x00000000,
- GECMR_SPEED_1000 = 0x00000001,
+ GECMR_SPEED = 0x00000001,
+ GECMR_SPEED_100 = 0x00000000,
+ GECMR_SPEED_1000 = 0x00000001,
+ GBETH_GECMR_SPEED = 0x00000030,
+ GBETH_GECMR_SPEED_10 = 0x00000000,
+ GBETH_GECMR_SPEED_100 = 0x00000010,
+ GBETH_GECMR_SPEED_1000 = 0x00000020,
};
/* The Ethernet AVB descriptor definitions. */
@@ -949,6 +960,16 @@ enum RAVB_QUEUE {
RAVB_NC, /* Network Control Queue */
};
+enum CXR31_BIT {
+ CXR31_SEL_LINK0 = 0x00000001,
+ CXR31_SEL_LINK1 = 0x00000008,
+};
+
+enum CSR0_BIT {
+ CSR0_TPE = 0x00000010,
+ CSR0_RPE = 0x00000020,
+};
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
@@ -956,6 +977,9 @@ enum RAVB_QUEUE {
#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
+#define GBETH_RX_BUFF_MAX 8192
+#define GBETH_RX_DESC_DATA_SIZE 4080
+
struct ravb_tstamp_skb {
struct list_head list;
struct sk_buff *skb;
@@ -985,8 +1009,8 @@ struct ravb_hw_info {
void *(*alloc_rx_desc)(struct net_device *ndev, int q);
bool (*receive)(struct net_device *ndev, int *quota, int q);
void (*set_rate)(struct net_device *ndev);
- int (*set_rx_csum_feature)(struct net_device *ndev, netdev_features_t features);
- void (*dmac_init)(struct net_device *ndev);
+ int (*set_feature)(struct net_device *ndev, netdev_features_t features);
+ int (*dmac_init)(struct net_device *ndev);
void (*emac_init)(struct net_device *ndev);
const char (*gstrings_stats)[ETH_GSTRING_LEN];
size_t gstrings_size;
@@ -994,14 +1018,20 @@ struct ravb_hw_info {
netdev_features_t net_features;
int stats_len;
size_t max_rx_len;
+ u32 tccr_mask;
+ u32 rx_max_buf_size;
unsigned aligned_tx: 1;
/* hardware features */
unsigned internal_delay:1; /* AVB-DMAC has internal delays */
unsigned tx_counters:1; /* E-MAC has TX counters */
+ unsigned carrier_counters:1; /* E-MAC has carrier counters */
unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */
- unsigned no_ptp_cfg_active:1; /* AVB-DMAC does not support gPTP active in config mode */
- unsigned ptp_cfg_active:1; /* AVB-DMAC has gPTP support active in config mode */
+ unsigned gptp:1; /* AVB-DMAC has gPTP support */
+ unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */
+ unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */
+ unsigned magic_pkt:1; /* E-MAC supports magic packet detection */
+ unsigned half_duplex:1; /* E-MAC supports half duplex mode */
};
struct ravb_private {
@@ -1018,9 +1048,11 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
+ struct ravb_rx_desc *gbeth_rx_ring;
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
+ struct sk_buff *rx_1st_skb;
struct sk_buff **rx_skb[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
u32 rx_over_errors;
@@ -1056,6 +1088,8 @@ struct ravb_private {
unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */
unsigned int num_tx_desc; /* TX descriptors per packet */
+ int duplex;
+
const struct ravb_hw_info *info;
struct reset_control *rstc;
};
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0f85f2d97b18..b4c597f4040c 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -83,7 +83,24 @@ static int ravb_config(struct net_device *ndev)
return error;
}
-static void ravb_set_rate(struct net_device *ndev)
+static void ravb_set_rate_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ switch (priv->speed) {
+ case 10: /* 10BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
+ break;
+ case 100: /* 100BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
+ break;
+ }
+}
+
+static void ravb_set_rate_rcar(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -115,17 +132,19 @@ static void ravb_read_mac_address(struct device_node *np,
{
int ret;
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
u32 mahr = ravb_read(ndev, MAHR);
u32 malr = ravb_read(ndev, MALR);
+ u8 addr[ETH_ALEN];
- ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
- ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
- ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
- ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
- ndev->dev_addr[4] = (malr >> 8) & 0xFF;
- ndev->dev_addr[5] = (malr >> 0) & 0xFF;
+ addr[0] = (mahr >> 24) & 0xFF;
+ addr[1] = (mahr >> 16) & 0xFF;
+ addr[2] = (mahr >> 8) & 0xFF;
+ addr[3] = (mahr >> 0) & 0xFF;
+ addr[4] = (malr >> 8) & 0xFF;
+ addr[5] = (malr >> 0) & 0xFF;
+ eth_hw_addr_set(ndev, addr);
}
}
@@ -217,7 +236,32 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
return free_num;
}
-static void ravb_rx_ring_free(struct net_device *ndev, int q)
+static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned int ring_size;
+ unsigned int i;
+
+ if (!priv->gbeth_rx_ring)
+ return;
+
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ }
+ ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
+ priv->rx_desc_dma[q]);
+ priv->gbeth_rx_ring = NULL;
+}
+
+static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
@@ -283,7 +327,38 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format(struct net_device *ndev, int q)
+static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_rx_desc *rx_desc;
+ unsigned int rx_ring_size;
+ dma_addr_t dma_addr;
+ unsigned int i;
+
+ rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ memset(priv->gbeth_rx_ring, 0, rx_ring_size);
+ /* Build RX ring buffer */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ /* RX descriptor */
+ rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+ dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ /* We just set the data size to 0 for a failed mapping which
+ * should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->ds_cc = cpu_to_le16(0);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->die_dt = DT_FEMPTY;
+ }
+ rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
+}
+
+static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_ex_rx_desc *rx_desc;
@@ -356,7 +431,20 @@ static void ravb_ring_format(struct net_device *ndev, int q)
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
-static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
+static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned int ring_size;
+
+ ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+
+ priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ return priv->gbeth_rx_ring;
+}
+
+static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
@@ -426,7 +514,37 @@ error:
return -ENOMEM;
}
-static void ravb_rcar_emac_init(struct net_device *ndev)
+static void ravb_emac_init_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Receive frame limit set register */
+ ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+
+ /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
+ ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
+ ECMR_TE | ECMR_RE | ECMR_RCPT |
+ ECMR_TXF | ECMR_RXF, ECMR);
+
+ ravb_set_rate_gbeth(ndev);
+
+ /* Set MAC address */
+ ravb_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+
+ /* E-MAC status register clear */
+ ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
+ ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
+
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
+}
+
+static void ravb_emac_init_rcar(struct net_device *ndev)
{
/* Receive frame limit set register */
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
@@ -436,7 +554,7 @@ static void ravb_rcar_emac_init(struct net_device *ndev)
(ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
- ravb_set_rate(ndev);
+ ravb_set_rate_rcar(ndev);
/* Set MAC address */
ravb_write(ndev,
@@ -461,10 +579,58 @@ static void ravb_emac_init(struct net_device *ndev)
info->emac_init(ndev);
}
-static void ravb_rcar_dmac_init(struct net_device *ndev)
+static int ravb_dmac_init_gbeth(struct net_device *ndev)
+{
+ int error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+
+ /* Set DMAC RX */
+ ravb_write(ndev, 0x60000000, RCR);
+
+ /* Set Max Frame Length (RTC) */
+ ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
+
+ /* Set FIFO size */
+ ravb_write(ndev, 0x00222200, TGC);
+
+ ravb_write(ndev, 0, TCCR);
+
+ /* Frame receive */
+ ravb_write(ndev, RIC0_FRE0, RIC0);
+ /* Disable FIFO full warning */
+ ravb_write(ndev, 0x0, RIC1);
+ /* Receive FIFO full error, descriptor empty */
+ ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
+
+ ravb_write(ndev, TIC_FTE0, TIC);
+
+ return 0;
+}
+
+static int ravb_dmac_init_rcar(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+ error = ravb_ring_init(ndev, RAVB_NC);
+ if (error) {
+ ravb_ring_free(ndev, RAVB_BE);
+ return error;
+ }
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+ ravb_ring_format(ndev, RAVB_NC);
/* Set AVB RX */
ravb_write(ndev,
@@ -491,6 +657,8 @@ static void ravb_rcar_dmac_init(struct net_device *ndev)
ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
/* Frame transmitted, timestamp FIFO updated */
ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
+
+ return 0;
}
/* Device init function for Ethernet AVB */
@@ -505,20 +673,9 @@ static int ravb_dmac_init(struct net_device *ndev)
if (error)
return error;
- error = ravb_ring_init(ndev, RAVB_BE);
+ error = info->dmac_init(ndev);
if (error)
return error;
- error = ravb_ring_init(ndev, RAVB_NC);
- if (error) {
- ravb_ring_free(ndev, RAVB_BE);
- return error;
- }
-
- /* Descriptor format */
- ravb_ring_format(ndev, RAVB_BE);
- ravb_ring_format(ndev, RAVB_NC);
-
- info->dmac_init(ndev);
/* Setting the control will start the AVB-DMAC process. */
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
@@ -579,7 +736,151 @@ static void ravb_rx_csum(struct sk_buff *skb)
skb_trim(skb, skb->len - sizeof(__sum16));
}
-static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q)
+static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
+ struct ravb_rx_desc *desc)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+
+ skb = priv->rx_skb[RAVB_BE][entry];
+ priv->rx_skb[RAVB_BE][entry] = NULL;
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
+
+ return skb;
+}
+
+/* Packet receive function for Gigabit Ethernet */
+static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ struct net_device_stats *stats;
+ struct ravb_rx_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u8 desc_status;
+ int boguscnt;
+ u16 pkt_len;
+ u8 die_dt;
+ int entry;
+ int limit;
+
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ stats = &priv->stats[q];
+
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
+ desc = &priv->gbeth_rx_ring[entry];
+ while (desc->die_dt != DT_FEMPTY) {
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ desc_status = desc->msc;
+ pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+
+ if (--boguscnt < 0)
+ break;
+
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
+ if (!pkt_len)
+ continue;
+
+ if (desc_status & MSC_MC)
+ stats->multicast++;
+
+ if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
+ stats->rx_errors++;
+ if (desc_status & MSC_CRC)
+ stats->rx_crc_errors++;
+ if (desc_status & MSC_RFE)
+ stats->rx_frame_errors++;
+ if (desc_status & (MSC_RTLF | MSC_RTSF))
+ stats->rx_length_errors++;
+ if (desc_status & MSC_CEEF)
+ stats->rx_missed_errors++;
+ } else {
+ die_dt = desc->die_dt & 0xF0;
+ switch (die_dt) {
+ case DT_FSINGLE:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi[q], skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ break;
+ case DT_FSTART:
+ priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ break;
+ case DT_FMID:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_copy_to_linear_data_offset(priv->rx_1st_skb,
+ priv->rx_1st_skb->len,
+ skb->data,
+ pkt_len);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ dev_kfree_skb(skb);
+ break;
+ case DT_FEND:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_copy_to_linear_data_offset(priv->rx_1st_skb,
+ priv->rx_1st_skb->len,
+ skb->data,
+ pkt_len);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ dev_kfree_skb(skb);
+ priv->rx_1st_skb->protocol =
+ eth_type_trans(priv->rx_1st_skb, ndev);
+ napi_gro_receive(&priv->napi[q],
+ priv->rx_1st_skb);
+ stats->rx_packets++;
+ stats->rx_bytes += priv->rx_1st_skb->len;
+ break;
+ }
+ }
+
+ entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ }
+
+ /* Refill the RX ring buffers. */
+ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
+ entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+
+ if (!priv->rx_skb[q][entry]) {
+ skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ if (!skb)
+ break;
+ ravb_set_buffer_align(skb);
+ dma_addr = dma_map_single(ndev->dev.parent,
+ skb->data,
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ /* We just set the data size to 0 for a failed mapping
+ * which should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->ds_cc = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ priv->rx_skb[q][entry] = skb;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY;
+ }
+
+ *quota -= limit - (++boguscnt);
+
+ return boguscnt <= 0;
+}
+
+/* Packet receive function for Ethernet AVB */
+static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -717,11 +1018,13 @@ static void ravb_rcv_snd_enable(struct net_device *ndev)
/* function for waiting dma process finished */
static int ravb_stop_dma(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
/* Wait for stopping the hardware TX process */
- error = ravb_wait(ndev, TCCR,
- TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
+ error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
+
if (error)
return error;
@@ -859,6 +1162,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
irqreturn_t result = IRQ_NONE;
u32 iss;
@@ -875,8 +1179,13 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
/* Network control and best effort queue RX/TX */
- for (q = RAVB_NC; q >= RAVB_BE; q--) {
- if (ravb_queue_interrupt(ndev, q))
+ if (info->nc_queues) {
+ for (q = RAVB_NC; q >= RAVB_BE; q--) {
+ if (ravb_queue_interrupt(ndev, q))
+ result = IRQ_HANDLED;
+ }
+ } else {
+ if (ravb_queue_interrupt(ndev, RAVB_BE))
result = IRQ_HANDLED;
}
}
@@ -966,16 +1275,25 @@ static int ravb_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ bool gptp = info->gptp || info->ccc_gac;
+ struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
+ unsigned int entry;
+ if (!gptp) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ }
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (ravb_rx(ndev, &quota, q))
- goto out;
+ if (gptp || desc->die_dt != DT_FEMPTY) {
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
+ }
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1000,7 +1318,8 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (info->nc_queues)
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
@@ -1009,6 +1328,13 @@ out:
return budget - quota;
}
+static void ravb_set_duplex_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
+}
+
/* PHY state control function */
static void ravb_adjust_link(struct net_device *ndev)
{
@@ -1025,6 +1351,12 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_rcv_snd_disable(ndev);
if (phydev->link) {
+ if (info->half_duplex && phydev->duplex != priv->duplex) {
+ new_state = true;
+ priv->duplex = phydev->duplex;
+ ravb_set_duplex_gbeth(ndev);
+ }
+
if (phydev->speed != priv->speed) {
new_state = true;
priv->speed = phydev->speed;
@@ -1039,6 +1371,8 @@ static void ravb_adjust_link(struct net_device *ndev)
new_state = true;
priv->link = 0;
priv->speed = 0;
+ if (info->half_duplex)
+ priv->duplex = -1;
}
/* Enable TX and RX right over here, if E-MAC change is ignored */
@@ -1061,6 +1395,7 @@ static int ravb_phy_init(struct net_device *ndev)
{
struct device_node *np = ndev->dev.parent->of_node;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct phy_device *phydev;
struct device_node *pn;
phy_interface_t iface;
@@ -1068,6 +1403,7 @@ static int ravb_phy_init(struct net_device *ndev)
priv->link = 0;
priv->speed = 0;
+ priv->duplex = -1;
/* Try connecting to PHY */
pn = of_parse_phandle(np, "phy-handle", 0);
@@ -1106,15 +1442,17 @@ static int ravb_phy_init(struct net_device *ndev)
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
- /* 10BASE, Pause and Asym Pause is not supported */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+ if (!info->half_duplex) {
+ /* 10BASE, Pause and Asym Pause is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
- /* Half Duplex is not supported */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ /* Half Duplex is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ }
phy_attached_info(phydev);
@@ -1157,6 +1495,24 @@ static void ravb_set_msglevel(struct net_device *ndev, u32 value)
priv->msg_enable = value;
}
+static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
+ "rx_queue_0_current",
+ "tx_queue_0_current",
+ "rx_queue_0_dirty",
+ "tx_queue_0_dirty",
+ "rx_queue_0_packets",
+ "tx_queue_0_packets",
+ "rx_queue_0_bytes",
+ "tx_queue_0_bytes",
+ "rx_queue_0_mcast_packets",
+ "rx_queue_0_errors",
+ "rx_queue_0_crc_errors",
+ "rx_queue_0_frame_errors",
+ "rx_queue_0_length_errors",
+ "rx_queue_0_csum_offload_errors",
+ "rx_queue_0_over_errors",
+};
+
static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_queue_0_current",
"tx_queue_0_current",
@@ -1208,11 +1564,14 @@ static void ravb_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *estats, u64 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ int num_rx_q;
int i = 0;
int q;
+ num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
/* Device-specific stats */
- for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
+ for (q = RAVB_BE; q < num_rx_q; q++) {
struct net_device_stats *stats = &priv->stats[q];
data[i++] = priv->cur_rx[q];
@@ -1274,7 +1633,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
if (netif_running(ndev)) {
netif_device_detach(ndev);
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
error = ravb_stop_dma(ndev);
@@ -1287,7 +1646,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
}
/* Set new parameters */
@@ -1306,7 +1666,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_device_attach(ndev);
@@ -1319,6 +1679,7 @@ static int ravb_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *hw_info = priv->info;
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -1332,7 +1693,8 @@ static int ravb_get_ts_info(struct net_device *ndev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_ALL);
- info->phc_index = ptp_clock_index(priv->ptp.clock);
+ if (hw_info->gptp || hw_info->ccc_gac)
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
return 0;
}
@@ -1348,8 +1710,9 @@ static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
return -EOPNOTSUPP;
priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
@@ -1403,7 +1766,8 @@ static int ravb_open(struct net_device *ndev)
int error;
napi_enable(&priv->napi[RAVB_BE]);
- napi_enable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_enable(&priv->napi[RAVB_NC]);
if (!info->multi_irqs) {
error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
@@ -1446,7 +1810,7 @@ static int ravb_open(struct net_device *ndev)
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
@@ -1460,7 +1824,7 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
out_free_irq_nc_tx:
if (!info->multi_irqs)
@@ -1477,7 +1841,8 @@ out_free_irq_emac:
out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
return error;
}
@@ -1508,7 +1873,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
@@ -1526,7 +1891,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
}
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
/* Device init */
error = ravb_dmac_init(ndev);
@@ -1543,7 +1909,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
out:
/* Initialise PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
@@ -1553,6 +1919,7 @@ out:
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
u16 q = skb_get_queue_mapping(skb);
struct ravb_tstamp_skb *ts_skb;
@@ -1629,28 +1996,30 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
desc->dptr = cpu_to_le32(dma_addr);
/* TX timestamp required */
- if (q == RAVB_NC) {
- ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
- if (!ts_skb) {
- if (num_tx_desc > 1) {
- desc--;
- dma_unmap_single(ndev->dev.parent, dma_addr,
- len, DMA_TO_DEVICE);
+ if (info->gptp || info->ccc_gac) {
+ if (q == RAVB_NC) {
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ if (num_tx_desc > 1) {
+ desc--;
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ len, DMA_TO_DEVICE);
+ }
+ goto unmap;
}
- goto unmap;
+ ts_skb->skb = skb_get(skb);
+ ts_skb->tag = priv->ts_skb_tag++;
+ priv->ts_skb_tag &= 0x3ff;
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+
+ /* TAG and timestamp required flag */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
+ desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
}
- ts_skb->skb = skb_get(skb);
- ts_skb->tag = priv->ts_skb_tag++;
- priv->ts_skb_tag &= 0x3ff;
- list_add_tail(&ts_skb->list, &priv->ts_skb_list);
- /* TAG and timestamp required flag */
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
- desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
+ skb_tx_timestamp(skb);
}
-
- skb_tx_timestamp(skb);
/* Descriptor type must be set after all the above writes */
dma_wmb();
if (num_tx_desc > 1) {
@@ -1698,28 +2067,45 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
nstats = &ndev->stats;
stats0 = &priv->stats[RAVB_BE];
- stats1 = &priv->stats[RAVB_NC];
if (info->tx_counters) {
nstats->tx_dropped += ravb_read(ndev, TROCR);
ravb_write(ndev, 0, TROCR); /* (write clear) */
}
- nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
- nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
- nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
- nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
- nstats->multicast = stats0->multicast + stats1->multicast;
- nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
- nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
- nstats->rx_frame_errors =
- stats0->rx_frame_errors + stats1->rx_frame_errors;
- nstats->rx_length_errors =
- stats0->rx_length_errors + stats1->rx_length_errors;
- nstats->rx_missed_errors =
- stats0->rx_missed_errors + stats1->rx_missed_errors;
- nstats->rx_over_errors =
- stats0->rx_over_errors + stats1->rx_over_errors;
+ if (info->carrier_counters) {
+ nstats->collisions += ravb_read(ndev, CXR41);
+ ravb_write(ndev, 0, CXR41); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
+ ravb_write(ndev, 0, CXR42); /* (write clear) */
+ }
+
+ nstats->rx_packets = stats0->rx_packets;
+ nstats->tx_packets = stats0->tx_packets;
+ nstats->rx_bytes = stats0->rx_bytes;
+ nstats->tx_bytes = stats0->tx_bytes;
+ nstats->multicast = stats0->multicast;
+ nstats->rx_errors = stats0->rx_errors;
+ nstats->rx_crc_errors = stats0->rx_crc_errors;
+ nstats->rx_frame_errors = stats0->rx_frame_errors;
+ nstats->rx_length_errors = stats0->rx_length_errors;
+ nstats->rx_missed_errors = stats0->rx_missed_errors;
+ nstats->rx_over_errors = stats0->rx_over_errors;
+ if (info->nc_queues) {
+ stats1 = &priv->stats[RAVB_NC];
+
+ nstats->rx_packets += stats1->rx_packets;
+ nstats->tx_packets += stats1->tx_packets;
+ nstats->rx_bytes += stats1->rx_bytes;
+ nstats->tx_bytes += stats1->tx_bytes;
+ nstats->multicast += stats1->multicast;
+ nstats->rx_errors += stats1->rx_errors;
+ nstats->rx_crc_errors += stats1->rx_crc_errors;
+ nstats->rx_frame_errors += stats1->rx_frame_errors;
+ nstats->rx_length_errors += stats1->rx_length_errors;
+ nstats->rx_missed_errors += stats1->rx_missed_errors;
+ nstats->rx_over_errors += stats1->rx_over_errors;
+ }
return nstats;
}
@@ -1752,7 +2138,7 @@ static int ravb_close(struct net_device *ndev)
ravb_write(ndev, 0, TIC);
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
@@ -1761,10 +2147,12 @@ static int ravb_close(struct net_device *ndev)
"device will be stopped after h/w processes are done.\n");
/* Clear the timestamp list */
- list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
- list_del(&ts_skb->list);
- kfree_skb(ts_skb->skb);
- kfree(ts_skb);
+ if (info->gptp || info->ccc_gac) {
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+ list_del(&ts_skb->list);
+ kfree_skb(ts_skb->skb);
+ kfree(ts_skb);
+ }
}
/* PHY disconnect */
@@ -1784,12 +2172,14 @@ static int ravb_close(struct net_device *ndev)
}
free_irq(ndev->irq, ndev);
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
return 0;
}
@@ -1918,8 +2308,15 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int ravb_set_features_rx_csum(struct net_device *ndev,
- netdev_features_t features)
+static int ravb_set_features_gbeth(struct net_device *ndev,
+ netdev_features_t features)
+{
+ /* Place holder */
+ return 0;
+}
+
+static int ravb_set_features_rcar(struct net_device *ndev,
+ netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
@@ -1937,7 +2334,7 @@ static int ravb_set_features(struct net_device *ndev,
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- return info->set_rx_csum_feature(ndev, features);
+ return info->set_feature(ndev, features);
}
static const struct net_device_ops ravb_netdev_ops = {
@@ -2001,43 +2398,72 @@ static int ravb_mdio_release(struct ravb_private *priv)
}
static const struct ravb_hw_info ravb_gen3_hw_info = {
- .rx_ring_free = ravb_rx_ring_free,
- .rx_ring_format = ravb_rx_ring_format,
- .alloc_rx_desc = ravb_alloc_rx_desc,
- .receive = ravb_rcar_rx,
- .set_rate = ravb_set_rate,
- .set_rx_csum_feature = ravb_set_features_rx_csum,
- .dmac_init = ravb_rcar_dmac_init,
- .emac_init = ravb_rcar_emac_init,
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
- .ptp_cfg_active = 1,
+ .ccc_gac = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
};
static const struct ravb_hw_info ravb_gen2_hw_info = {
- .rx_ring_free = ravb_rx_ring_free,
- .rx_ring_format = ravb_rx_ring_format,
- .alloc_rx_desc = ravb_alloc_rx_desc,
- .receive = ravb_rcar_rx,
- .set_rate = ravb_set_rate,
- .set_rx_csum_feature = ravb_set_features_rx_csum,
- .dmac_init = ravb_rcar_dmac_init,
- .emac_init = ravb_rcar_emac_init,
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
.aligned_tx = 1,
- .no_ptp_cfg_active = 1,
+ .gptp = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
+static const struct ravb_hw_info gbeth_hw_info = {
+ .rx_ring_free = ravb_rx_ring_free_gbeth,
+ .rx_ring_format = ravb_rx_ring_format_gbeth,
+ .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
+ .receive = ravb_rx_gbeth,
+ .set_rate = ravb_set_rate_gbeth,
+ .set_feature = ravb_set_features_gbeth,
+ .dmac_init = ravb_dmac_init_gbeth,
+ .emac_init = ravb_emac_init_gbeth,
+ .gstrings_stats = ravb_gstrings_stats_gbeth,
+ .gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
+ .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
+ .tccr_mask = TCCR_TSRQ0,
+ .rx_max_buf_size = SZ_8K,
+ .aligned_tx = 1,
+ .tx_counters = 1,
+ .carrier_counters = 1,
+ .half_duplex = 1,
};
static const struct of_device_id ravb_match_table[] = {
@@ -2046,6 +2472,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
@@ -2080,13 +2507,15 @@ static void ravb_set_config_mode(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- if (info->no_ptp_cfg_active) {
+ if (info->gptp) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
/* Set CSEL value */
ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else {
+ } else if (info->ccc_gac) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
CCC_GAC | CCC_CSEL_HPB);
+ } else {
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
}
}
@@ -2192,8 +2621,11 @@ static int ravb_probe(struct platform_device *pdev)
priv->pdev = pdev;
priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
- priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
- priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ if (info->nc_queues) {
+ priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ }
+
priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
@@ -2252,7 +2684,7 @@ static int ravb_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->refclk);
- ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
@@ -2269,13 +2701,15 @@ static int ravb_probe(struct platform_device *pdev)
/* Set AVB config mode */
ravb_set_config_mode(ndev);
- /* Set GTI value */
- error = ravb_set_gti(ndev);
- if (error)
- goto out_disable_refclk;
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+ error = ravb_set_gti(ndev);
+ if (error)
+ goto out_disable_refclk;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ }
if (info->internal_delay) {
ravb_parse_delay_mode(np, ndev);
@@ -2301,7 +2735,7 @@ static int ravb_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&priv->ts_skb_list);
/* Initialise PTP Clock driver */
- if (info->ptp_cfg_active)
+ if (info->ccc_gac)
ravb_ptp_init(ndev, pdev);
/* Debug message level */
@@ -2323,7 +2757,8 @@ static int ravb_probe(struct platform_device *pdev)
}
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
- netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+ if (info->nc_queues)
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
/* Network device register */
error = register_netdev(ndev);
@@ -2341,7 +2776,9 @@ static int ravb_probe(struct platform_device *pdev)
return 0;
out_napi_del:
- netif_napi_del(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
+
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
out_dma_free:
@@ -2349,7 +2786,7 @@ out_dma_free:
priv->desc_bat_dma);
/* Stop PTP Clock driver */
- if (info->ptp_cfg_active)
+ if (info->ccc_gac)
ravb_ptp_stop(ndev);
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
@@ -2369,7 +2806,7 @@ static int ravb_remove(struct platform_device *pdev)
const struct ravb_hw_info *info = priv->info;
/* Stop PTP Clock driver */
- if (info->ptp_cfg_active)
+ if (info->ccc_gac)
ravb_ptp_stop(ndev);
clk_disable_unprepare(priv->refclk);
@@ -2380,7 +2817,8 @@ static int ravb_remove(struct platform_device *pdev)
ravb_write(ndev, CCC_OPC_RESET, CCC);
pm_runtime_put_sync(&pdev->dev);
unregister_netdev(ndev);
- netif_napi_del(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
pm_runtime_disable(&pdev->dev);
@@ -2394,6 +2832,7 @@ static int ravb_remove(struct platform_device *pdev)
static int ravb_wol_setup(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
/* Disable interrupts by clearing the interrupt masks. */
ravb_write(ndev, 0, RIC0);
@@ -2402,7 +2841,8 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Only allow ECI interrupts */
synchronize_irq(priv->emac_irq);
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
@@ -2415,9 +2855,11 @@ static int ravb_wol_setup(struct net_device *ndev)
static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int ret;
- napi_enable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_enable(&priv->napi[RAVB_NC]);
napi_enable(&priv->napi[RAVB_BE]);
/* Disable MagicPacket */
@@ -2468,13 +2910,15 @@ static int __maybe_unused ravb_resume(struct device *dev)
/* Set AVB config mode */
ravb_set_config_mode(ndev);
- /* Set GTI value */
- ret = ravb_set_gti(ndev);
- if (ret)
- return ret;
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+ ret = ravb_set_gti(ndev);
+ if (ret)
+ return ret;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ }
if (info->internal_delay)
ravb_set_delay_mode(ndev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1374faa229a2..a3fbb2221c9a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1153,17 +1153,19 @@ static void update_mac_address(struct net_device *ndev)
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
{
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
- memcpy(ndev->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(ndev, mac);
} else {
u32 mahr = sh_eth_read(ndev, MAHR);
u32 malr = sh_eth_read(ndev, MALR);
-
- ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
- ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
- ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
- ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
- ndev->dev_addr[4] = (malr >> 8) & 0xFF;
- ndev->dev_addr[5] = (malr >> 0) & 0xFF;
+ u8 addr[ETH_ALEN];
+
+ addr[0] = (mahr >> 24) & 0xFF;
+ addr[1] = (mahr >> 16) & 0xFF;
+ addr[2] = (mahr >> 8) & 0xFF;
+ addr[3] = (mahr >> 0) & 0xFF;
+ addr[4] = (malr >> 8) & 0xFF;
+ addr[5] = (malr >> 0) & 0xFF;
+ eth_hw_addr_set(ndev, addr);
}
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 3364b6a56bd1..ba4062881eed 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1954,7 +1954,7 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
if (err)
return err;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -2545,11 +2545,13 @@ static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
{
const struct rocker *rocker = rocker_port->rocker;
const struct pci_dev *pdev = rocker->pdev;
+ u8 addr[ETH_ALEN];
int err;
- err = rocker_cmd_get_port_settings_macaddr(rocker_port,
- rocker_port->dev->dev_addr);
- if (err) {
+ err = rocker_cmd_get_port_settings_macaddr(rocker_port, addr);
+ if (!err) {
+ eth_hw_addr_set(rocker_port->dev, addr);
+ } else {
dev_warn(&pdev->dev, "failed to get mac address, using random\n");
eth_hw_addr_random(rocker_port->dev);
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 049dc6cf4611..0f45107db8dd 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -329,7 +329,7 @@ struct sxgbe_core_ops {
/* Set power management mode (e.g. magic frame) */
void (*pmt)(void __iomem *ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
- void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
+ void (*set_umac_addr)(void __iomem *ioaddr, const unsigned char *addr,
unsigned int reg_n);
void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
index e96e2bd295ef..7d9f257de92a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -85,7 +85,8 @@ static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
}
/* Set/Get Unicast MAC addresses */
-static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void sxgbe_core_set_umac_addr(void __iomem *ioaddr,
+ const unsigned char *addr,
unsigned int reg_n)
{
u32 high_word, low_word;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 6781aa636d58..32161a56726c 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -931,10 +931,13 @@ static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
{
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ u8 addr[ETH_ALEN];
+
priv->hw->mac->get_umac_addr((void __iomem *)
- priv->ioaddr,
- priv->dev->dev_addr, 0);
- if (!is_valid_ether_addr(priv->dev->dev_addr))
+ priv->ioaddr, addr, 0);
+ if (is_valid_ether_addr(addr))
+ eth_hw_addr_set(priv->dev, addr);
+ else
eth_hw_addr_random(priv->dev);
}
dev_info(priv->device, "device MAC address %pM\n",
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 4639ed9438a3..926532466691 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -118,7 +118,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
/* Get MAC address if available (DT) */
- of_get_mac_address(node, priv->dev->dev_addr);
+ of_get_ethdev_address(node, priv->dev);
/* Get the TX/RX IRQ numbers */
for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 37ff25a84030..96065dfc747b 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -167,7 +167,7 @@ static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
struct sgiseeq_private *sp = netdev_priv(dev);
struct sockaddr *sa = addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sa->sa_data);
spin_lock_irq(&sp->tx_lock);
__sgiseeq_set_mac_address(dev);
@@ -764,7 +764,7 @@ static int sgiseeq_probe(struct platform_device *pdev)
setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
- memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac);
#ifdef DEBUG
gpriv = sp;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e7e2223aebbf..cf366ed2557c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1038,7 +1038,7 @@ int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
}
int efx_ef10_vport_add_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
+ unsigned int port_id, const u8 *mac)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
@@ -1050,7 +1050,7 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx,
}
int efx_ef10_vport_del_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
+ unsigned int port_id, const u8 *mac)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 518268ce2064..6aa81229b68a 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -1250,7 +1250,7 @@ int ef100_probe_pf(struct efx_nic *efx)
if (rc)
goto fail;
/* Assign MAC address */
- memcpy(net_dev->dev_addr, net_dev->perm_addr, ETH_ALEN);
+ eth_hw_addr_set(net_dev, net_dev->perm_addr);
memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
return 0;
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 752d6406f07e..7f5aa4a8c451 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -480,7 +480,7 @@ static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id,
return rc;
}
-int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct ef10_vf *vf;
@@ -523,7 +523,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
goto fail;
if (vf->efx)
- ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
+ eth_hw_addr_set(vf->efx->net_dev, mac);
}
ether_addr_copy(vf->mac, mac);
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index cfe556d17313..3c703ca878b0 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -39,7 +39,7 @@ static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
void efx_ef10_sriov_fini(struct efx_nic *efx);
static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
-int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac);
int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
u16 vlan, u8 qos);
@@ -60,9 +60,9 @@ int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
int efx_ef10_vport_add_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac);
+ unsigned int port_id, const u8 *mac);
int efx_ef10_vport_del_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac);
+ unsigned int port_id, const u8 *mac);
int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
u32 *port_flags, u32 *vadaptor_flags,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 43ef4f529028..6960a2fe2b53 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -136,7 +136,7 @@ static int efx_probe_port(struct efx_nic *efx)
return rc;
/* Initialise MAC address to permanent address */
- ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
+ eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 896b59253197..f187631b2c5c 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -181,11 +181,11 @@ int efx_set_mac_address(struct net_device *net_dev, void *data)
/* save old address */
ether_addr_copy(old_addr, net_dev->dev_addr);
- ether_addr_copy(net_dev->dev_addr, new_addr);
+ eth_hw_addr_set(net_dev, new_addr);
if (efx->type->set_mac_address) {
rc = efx->type->set_mac_address(efx);
if (rc) {
- ether_addr_copy(net_dev->dev_addr, old_addr);
+ eth_hw_addr_set(net_dev, old_addr);
return rc;
}
}
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index bf1443539a1a..bd552c7dffcb 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -563,20 +563,14 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
- u32 supported;
mutex_lock(&efx->mac_lock);
efx_mcdi_phy_get_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
/* Both MACs support pause frames (bidirectional and respond-only) */
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
-
- supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
if (LOOPBACK_INTERNAL(efx)) {
cmd->base.speed = link_state->speed;
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 423bdf81200f..c68837a951f4 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1044,7 +1044,7 @@ static int ef4_probe_port(struct ef4_nic *efx)
return rc;
/* Initialise MAC address to permanent address */
- ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
+ eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
return 0;
}
@@ -2162,11 +2162,11 @@ static int ef4_set_mac_address(struct net_device *net_dev, void *data)
/* save old address */
ether_addr_copy(old_addr, net_dev->dev_addr);
- ether_addr_copy(net_dev->dev_addr, new_addr);
+ eth_hw_addr_set(net_dev, new_addr);
if (efx->type->set_mac_address) {
rc = efx->type->set_mac_address(efx);
if (rc) {
- ether_addr_copy(net_dev->dev_addr, old_addr);
+ eth_hw_addr_set(net_dev, old_addr);
return rc;
}
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 4bd3ef8f3384..c4fe3c48ac46 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -132,16 +132,27 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
SET_BIT(FIBRE);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
SET_BIT(1000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(1000baseX_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
+ SET_BIT(10000baseCR_Full);
+ SET_BIT(10000baseLR_Full);
+ SET_BIT(10000baseSR_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
SET_BIT(40000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(40000baseSR4_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
SET_BIT(100000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(100000baseSR4_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
SET_BIT(25000baseCR_Full);
+ SET_BIT(25000baseSR_Full);
+ }
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
SET_BIT(50000baseCR2_Full);
break;
@@ -192,15 +203,19 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
+ TEST_BIT(1000baseX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
+ TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
+ TEST_BIT(10000baseSR_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
+ TEST_BIT(40000baseSR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (TEST_BIT(100000baseCR4_Full))
+ if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
- if (TEST_BIT(25000baseCR_Full))
+ if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
if (TEST_BIT(50000baseCR2_Full))
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f6981810039d..cc15ee8812d9 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1440,7 +1440,7 @@ struct efx_nic_type {
bool (*sriov_wanted)(struct efx_nic *efx);
void (*sriov_reset)(struct efx_nic *efx);
void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
- int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
+ int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac);
int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
u8 qos);
int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index a39c5143b386..797e51802ccb 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -648,7 +648,7 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
} else if (rc == -EINVAL) {
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
} else if (rc == -EPERM) {
- netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ pci_info(efx->pci_dev, "no PTP support\n");
return rc;
} else {
efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
@@ -824,7 +824,7 @@ static int efx_ptp_disable(struct efx_nic *efx)
* should only have been called during probe.
*/
if (rc == -ENOSYS || rc == -EPERM)
- netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ pci_info(efx->pci_dev, "no PTP support\n");
else if (rc)
efx_mcdi_display_error(efx, MC_CMD_PTP,
MC_CMD_PTP_IN_DISABLE_LEN,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 83dcfcae3d4b..f12851a527d9 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1057,7 +1057,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
return;
if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
- netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
+ pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");
return;
}
if (count > 0 && count > max_vfs)
@@ -1591,7 +1591,7 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
-int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
{
struct siena_nic_data *nic_data = efx->nic_data;
struct siena_vf *vf;
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h
index e441c89c25ce..e548c4daf189 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.h
+++ b/drivers/net/ethernet/sfc/siena_sriov.h
@@ -46,7 +46,7 @@ bool efx_siena_sriov_wanted(struct efx_nic *efx);
void efx_siena_sriov_reset(struct efx_nic *efx);
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
-int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac);
int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
u16 vlan, u8 qos);
int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 062f7844c496..e2d009866a7b 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -243,7 +243,7 @@ static int ioc3_set_mac_address(struct net_device *dev, void *addr)
struct ioc3_private *ip = netdev_priv(dev);
struct sockaddr *sa = addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sa->sa_data);
spin_lock_irq(&ip->ioc3_lock);
__ioc3_set_mac_address(dev);
@@ -920,7 +920,7 @@ static int ioc3eth_probe(struct platform_device *pdev)
ioc3_mii_start(ip);
ioc3_ssram_disc(ip);
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, mac_addr);
/* The IOC3-specific entries in the device structure. */
dev->watchdog_timeo = 5 * HZ;
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index efce834d8ee6..6d850ea2b94c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -836,7 +836,7 @@ static int meth_probe(struct platform_device *pdev)
dev->watchdog_timeo = timeout;
dev->irq = MACE_ETHERNET_IRQ;
dev->base_addr = (unsigned long)&mace->eth;
- memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, o2meth_eaddr);
priv = netdev_priv(dev);
priv->pdev = pdev;
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 1fd08a04bd4e..ff4197f5e46d 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1400,6 +1400,7 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem* port_base;
struct net_device *dev;
struct sc92031_priv *priv;
+ u8 addr[ETH_ALEN];
u32 mac0, mac1;
err = pci_enable_device(pdev);
@@ -1458,12 +1459,13 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mac0 = ioread32(port_base + MAC0);
mac1 = ioread32(port_base + MAC0 + 4);
- dev->dev_addr[0] = mac0 >> 24;
- dev->dev_addr[1] = mac0 >> 16;
- dev->dev_addr[2] = mac0 >> 8;
- dev->dev_addr[3] = mac0;
- dev->dev_addr[4] = mac1 >> 8;
- dev->dev_addr[5] = mac1;
+ addr[0] = mac0 >> 24;
+ addr[1] = mac0 >> 16;
+ addr[2] = mac0 >> 8;
+ addr[3] = mac0;
+ addr[4] = mac1 >> 8;
+ addr[5] = mac1;
+ eth_hw_addr_set(dev, addr);
err = register_netdev(dev);
if (err < 0)
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 3d1a18a01ce5..216bb2d34d7c 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1070,7 +1070,7 @@ static int sis190_open(struct net_device *dev)
/*
* Rx and Tx descriptors need 256 bytes alignment.
- * pci_alloc_consistent() guarantees a stronger alignment.
+ * dma_alloc_coherent() guarantees a stronger alignment.
*/
tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES,
&tp->tx_dma, GFP_KERNEL);
@@ -1586,6 +1586,7 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
{
struct sis190_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ __le16 addr[ETH_ALEN / 2];
u16 sig;
int i;
@@ -1606,8 +1607,9 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
for (i = 0; i < ETH_ALEN / 2; i++) {
u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
+ addr[i] = cpu_to_le16(w);
}
+ eth_hw_addr_set(dev, (u8 *)addr);
sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
@@ -1629,6 +1631,7 @@ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
static const u16 ids[] = { 0x0965, 0x0966, 0x0968 };
struct sis190_private *tp = netdev_priv(dev);
struct pci_dev *isa_bridge;
+ u8 addr[ETH_ALEN];
u8 reg, tmp8;
unsigned int i;
@@ -1657,8 +1660,9 @@ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
for (i = 0; i < ETH_ALEN; i++) {
outb(0x9 + i, 0x78);
- dev->dev_addr[i] = inb(0x79);
+ addr[i] = inb(0x79);
}
+ eth_hw_addr_set(dev, addr);
outb(0x12, 0x78);
reg = inb(0x79);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 60a0c0e9ded2..cc2d907c4c4b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -258,6 +258,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
+ u16 addr[ETH_ALEN / 2];
u16 signature;
int i;
@@ -271,7 +272,8 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
/* get MAC address from EEPROM */
for (i = 0; i < 3; i++)
- ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ addr[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ eth_hw_addr_set(net_dev, (u8 *)addr);
return 1;
}
@@ -290,6 +292,7 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
struct net_device *net_dev)
{
struct pci_dev *isa_bridge = NULL;
+ u8 addr[ETH_ALEN];
u8 reg;
int i;
@@ -306,8 +309,9 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
for (i = 0; i < 6; i++) {
outb(0x09 + i, 0x70);
- ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
+ addr[i] = inb(0x71);
}
+ eth_hw_addr_set(net_dev, addr);
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@@ -331,6 +335,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
+ u16 addr[ETH_ALEN / 2];
u32 rfcrSave;
u32 i;
@@ -345,8 +350,9 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
/* load MAC addr to filter data register */
for (i = 0 ; i < 3 ; i++) {
sw32(rfcr, (i << RFADDR_shift));
- *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
+ addr[i] = sr16(rfdr);
}
+ eth_hw_addr_set(net_dev, (u8 *)addr);
/* enable packet filtering */
sw32(rfcr, rfcrSave | RFEN);
@@ -375,17 +381,18 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
+ u16 addr[ETH_ALEN / 2];
int wait, rc = 0;
sw32(mear, EEREQ);
for (wait = 0; wait < 2000; wait++) {
if (sr32(mear) & EEGNT) {
- u16 *mac = (u16 *)net_dev->dev_addr;
int i;
/* get MAC address from EEPROM */
for (i = 0; i < 3; i++)
- mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
+ addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
+ eth_hw_addr_set(net_dev, (u8 *)addr);
rc = 1;
break;
@@ -1098,7 +1105,7 @@ sis900_init_rxfilter (struct net_device * net_dev)
/* load MAC addr to filter data register */
for (i = 0 ; i < 3 ; i++) {
- u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
+ u32 w = (u32) *((const u16 *)(net_dev->dev_addr)+i);
sw32(rfcr, i << RFADDR_shift);
sw32(rfdr, w);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 44daf79a8f97..a0654e88444c 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -325,6 +325,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct epic_private *ep;
int i, ret, option = 0, duplex = 0;
+ __le16 addr[ETH_ALEN / 2];
void *ring_space;
dma_addr_t ring_dma;
@@ -416,7 +417,8 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Note: the '175 does not have a serial EEPROM. */
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
+ addr[i] = cpu_to_le16(er16(LAN0 + i*4));
+ eth_hw_addr_set(dev, (u8 *)addr);
if (debug > 2) {
dev_dbg(&pdev->dev, "EEPROM contents:\n");
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index b008b4e8a2a5..89381f796985 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1788,6 +1788,7 @@ static int smc911x_probe(struct net_device *dev)
struct dma_slave_config config;
dma_cap_mask_t mask;
#endif
+ u8 addr[ETH_ALEN];
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
@@ -1892,7 +1893,8 @@ static int smc911x_probe(struct net_device *dev)
spin_lock_init(&lp->lock);
/* Get the MAC address */
- SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+ SMC_GET_MAC_ADDR(lp, addr);
+ eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */
smc911x_reset(dev);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 42fc37c7887a..37c822e27207 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -347,6 +347,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
static int cvt_ascii_address(struct net_device *dev, char *s)
{
+ u8 mac[ETH_ALEN];
int i, j, da, c;
if (strlen(s) != 12)
@@ -359,8 +360,9 @@ static int cvt_ascii_address(struct net_device *dev, char *s)
da += ((c >= '0') && (c <= '9')) ?
(c - '0') : ((c & 0x0f) + 9);
}
- dev->dev_addr[i] = da;
+ mac[i] = da;
}
+ eth_hw_addr_set(dev, mac);
return 0;
}
@@ -539,6 +541,7 @@ static int mot_setup(struct pcmcia_device *link)
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
int i, wait, loop;
+ u8 mac[ETH_ALEN];
u_int addr;
/* Read Ethernet address from Serial EEPROM */
@@ -559,9 +562,10 @@ static int mot_setup(struct pcmcia_device *link)
return -1;
addr = inw(ioaddr + GENERAL);
- dev->dev_addr[2*i] = addr & 0xff;
- dev->dev_addr[2*i+1] = (addr >> 8) & 0xff;
+ mac[2*i] = addr & 0xff;
+ mac[2*i+1] = (addr >> 8) & 0xff;
}
+ eth_hw_addr_set(dev, mac);
return 0;
}
@@ -666,14 +670,13 @@ static int pcmcia_osi_mac(struct pcmcia_device *p_dev,
void *priv)
{
struct net_device *dev = priv;
- int i;
if (tuple->TupleDataLen < 8)
return -EINVAL;
if (tuple->TupleData[0] != 0x04)
return -EINVAL;
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
+
+ eth_hw_addr_set(dev, &tuple->TupleData[2]);
return 0;
};
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 813ea941b91a..a31c159e96ea 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1851,6 +1851,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
int retval;
unsigned int val, revision_register;
const char *version_string;
+ u8 addr[ETH_ALEN];
DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
@@ -1922,7 +1923,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
/* Get the MAC address */
SMC_SELECT_BANK(lp, 1);
- SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+ SMC_GET_MAC_ADDR(lp, addr);
+ eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */
smc_reset(dev);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 199a97339280..7a50ba00f8ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1503,7 +1503,7 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
/* Sets the device MAC address to dev_addr, called with mac_lock held */
static void
-smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
+smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, const u8 dev_addr[6])
{
u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
@@ -1939,7 +1939,7 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&pdata->mac_lock);
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
@@ -2162,13 +2162,15 @@ static void smsc911x_read_mac_address(struct net_device *dev)
struct smsc911x_data *pdata = netdev_priv(dev);
u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+ u8 addr[ETH_ALEN];
- dev->dev_addr[0] = (u8)(mac_low32);
- dev->dev_addr[1] = (u8)(mac_low32 >> 8);
- dev->dev_addr[2] = (u8)(mac_low32 >> 16);
- dev->dev_addr[3] = (u8)(mac_low32 >> 24);
- dev->dev_addr[4] = (u8)(mac_high16);
- dev->dev_addr[5] = (u8)(mac_high16 >> 8);
+ addr[0] = (u8)(mac_low32);
+ addr[1] = (u8)(mac_low32 >> 8);
+ addr[2] = (u8)(mac_low32 >> 16);
+ addr[3] = (u8)(mac_low32 >> 24);
+ addr[4] = (u8)(mac_high16);
+ addr[5] = (u8)(mac_high16 >> 8);
+ eth_hw_addr_set(dev, addr);
}
/* Initializing private device structures, only called from probe */
@@ -2375,7 +2377,7 @@ static int smsc911x_probe_config(struct smsc911x_platform_config *config,
phy_interface = PHY_INTERFACE_MODE_NA;
config->phy_interface = phy_interface;
- device_get_mac_address(dev, config->mac, ETH_ALEN);
+ device_get_mac_address(dev, config->mac);
err = device_property_read_u32(dev, "reg-io-width", &width);
if (err == -ENXIO)
@@ -2525,7 +2527,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
SMSC_TRACE(pdata, probe,
"MAC Address is specified by configuration");
} else if (is_valid_ether_addr(pdata->config.mac)) {
- memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pdata->config.mac);
SMSC_TRACE(pdata, probe,
"MAC Address specified by platform data");
} else {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index fdbd2a43e267..d937af18973e 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -404,7 +404,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
static void smsc9420_set_mac_address(struct net_device *dev)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
- u8 *dev_addr = dev->dev_addr;
+ const u8 *dev_addr = dev->dev_addr;
u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
@@ -416,6 +416,7 @@ static void smsc9420_set_mac_address(struct net_device *dev)
static void smsc9420_check_mac_address(struct net_device *dev)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
+ u8 addr[ETH_ALEN];
/* Check if mac address has been specified when bringing interface up */
if (is_valid_ether_addr(dev->dev_addr)) {
@@ -427,15 +428,16 @@ static void smsc9420_check_mac_address(struct net_device *dev)
* it will already have been set */
u32 mac_high16 = smsc9420_reg_read(pd, ADDRH);
u32 mac_low32 = smsc9420_reg_read(pd, ADDRL);
- dev->dev_addr[0] = (u8)(mac_low32);
- dev->dev_addr[1] = (u8)(mac_low32 >> 8);
- dev->dev_addr[2] = (u8)(mac_low32 >> 16);
- dev->dev_addr[3] = (u8)(mac_low32 >> 24);
- dev->dev_addr[4] = (u8)(mac_high16);
- dev->dev_addr[5] = (u8)(mac_high16 >> 8);
-
- if (is_valid_ether_addr(dev->dev_addr)) {
+ addr[0] = (u8)(mac_low32);
+ addr[1] = (u8)(mac_low32 >> 8);
+ addr[2] = (u8)(mac_low32 >> 16);
+ addr[3] = (u8)(mac_low32 >> 24);
+ addr[4] = (u8)(mac_high16);
+ addr[5] = (u8)(mac_high16 >> 8);
+
+ if (is_valid_ether_addr(addr)) {
/* eeprom values are valid so use them */
+ eth_hw_addr_set(dev, addr);
netif_dbg(pd, probe, pd->dev,
"Mac Address is read from EEPROM\n");
} else {
@@ -788,7 +790,7 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
PKT_BUF_SZ, DMA_FROM_DEVICE);
if (dma_mapping_error(&pd->pdev->dev, mapping)) {
dev_kfree_skb_any(skb);
- netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
+ netif_warn(pd, rx_err, pd->dev, "dma_map_single failed!\n");
return -ENOMEM;
}
@@ -940,7 +942,7 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
DMA_TO_DEVICE);
if (dma_mapping_error(&pd->pdev->dev, mapping)) {
netif_warn(pd, tx_err, pd->dev,
- "pci_map_single failed, dropping packet\n");
+ "dma_map_single failed, dropping packet\n");
return NETDEV_TX_BUSY;
}
@@ -1551,7 +1553,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!pd->rx_ring)
goto out_free_io_4;
- /* descriptors are aligned due to the nature of pci_alloc_consistent */
+ /* descriptors are aligned due to the nature of dma_alloc_coherent */
pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
pd->tx_dma_addr = pd->rx_dma_addr +
sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index f80a2aef9972..de7d8bf2c226 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1978,7 +1978,6 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
static int netsec_probe(struct platform_device *pdev)
{
struct resource *mmio_res, *eeprom_res, *irq_res;
- u8 *mac, macbuf[ETH_ALEN];
struct netsec_priv *priv;
u32 hw_ver, phy_addr = 0;
struct net_device *ndev;
@@ -2034,21 +2033,19 @@ static int netsec_probe(struct platform_device *pdev)
goto free_ndev;
}
- mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
- if (mac)
- ether_addr_copy(ndev->dev_addr, mac);
-
- if (priv->eeprom_base &&
- (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+ ret = device_get_ethdev_address(&pdev->dev, ndev);
+ if (ret && priv->eeprom_base) {
void __iomem *macp = priv->eeprom_base +
NETSEC_EEPROM_MAC_ADDRESS;
-
- ndev->dev_addr[0] = readb(macp + 3);
- ndev->dev_addr[1] = readb(macp + 2);
- ndev->dev_addr[2] = readb(macp + 1);
- ndev->dev_addr[3] = readb(macp + 0);
- ndev->dev_addr[4] = readb(macp + 7);
- ndev->dev_addr[5] = readb(macp + 6);
+ u8 addr[ETH_ALEN];
+
+ addr[0] = readb(macp + 3);
+ addr[1] = readb(macp + 2);
+ addr[2] = readb(macp + 1);
+ addr[3] = readb(macp + 0);
+ addr[4] = readb(macp + 7);
+ addr[5] = readb(macp + 6);
+ eth_hw_addr_set(ndev, addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index ae31ed93aaf0..2c48f8b8ab71 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1599,7 +1599,7 @@ static int ave_probe(struct platform_device *pdev)
ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
/* if the mac address is invalid, use random mac address */
eth_hw_addr_random(ndev);
@@ -1935,6 +1935,17 @@ static const struct ave_soc_data ave_pxs3_data = {
.get_pinmode = ave_pxs3_get_pinmode,
};
+static const struct ave_soc_data ave_nx1_data = {
+ .is_desc_64bit = true,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_pxs3_get_pinmode,
+};
+
static const struct of_device_id of_ave_match[] = {
{
.compatible = "socionext,uniphier-pro4-ave4",
@@ -1956,6 +1967,10 @@ static const struct of_device_id of_ave_match[] = {
.compatible = "socionext,uniphier-pxs3-ave4",
.data = &ave_pxs3_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-ave4",
+ .data = &ave_nx1_data,
+ },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_ave_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b6d945ea903d..9160f9ed363a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -546,13 +546,13 @@ int dwmac4_setup(struct stmmac_priv *priv);
int dwxgmac2_setup(struct stmmac_priv *priv);
int dwxlgmac2_setup(struct stmmac_priv *priv);
-void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
-void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index fbfda55b4c52..5e731a72cce8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -71,6 +71,7 @@ err_remove_config_dt:
static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.40a"},
{ .compatible = "snps,dwmac-3.50a"},
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index ed817011a94a..6924a6aacbd5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
#include "stmmac_platform.h"
@@ -1528,6 +1529,8 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
return ret;
}
+ pm_runtime_get_sync(dev);
+
if (bsp_priv->integrated_phy)
rk_gmac_integrated_phy_powerup(bsp_priv);
@@ -1539,6 +1542,8 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
if (gmac->integrated_phy)
rk_gmac_integrated_phy_powerdown(gmac);
+ pm_runtime_put_sync(&gmac->pdev->dev);
+
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4422baeed3d8..617d0e4c6495 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -634,7 +634,7 @@ static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable)
* If addr is NULL, clear the slot
*/
static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr,
+ const unsigned char *addr,
unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index fc8759f146c7..76edb9b72675 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -104,7 +104,7 @@ static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space)
}
static void dwmac1000_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr,
+ const unsigned char *addr,
unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 90383abafa66..f5581db0ba9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -218,11 +218,18 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
readl(ioaddr + DMA_BUS_MODE + i * 4);
}
-static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
- struct dma_features *dma_cap)
+static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
+ if (!hw_cap) {
+ /* 0x00000000 is the value read on old hardware that does not
+ * implement this register
+ */
+ return -EOPNOTSUPP;
+ }
+
dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
@@ -252,6 +259,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
/* Alternate (enhanced) DESC mode */
dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+
+ return 0;
}
static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index ebcad8dd99db..75071a7d551a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -68,7 +68,7 @@ static int dwmac100_irq_status(struct mac_device_info *hw,
}
static void dwmac100_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr,
+ const unsigned char *addr,
unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index b21745368983..fd41db65fe1d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -322,7 +322,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
}
static void dwmac4_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr, unsigned int reg_n)
+ const unsigned char *addr, unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 5be8e6a631d9..d99fa028c646 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -347,8 +347,8 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
}
-static void dwmac4_get_hw_feature(void __iomem *ioaddr,
- struct dma_features *dma_cap)
+static int dwmac4_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
@@ -437,6 +437,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
+
+ return 0;
}
/* Enable/disable TSO feature and set MSS */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9292a1fab7d3..d1c605777985 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -187,7 +187,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
unsigned long data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index d1c31200bb91..caa4bfc4c1d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -239,7 +239,7 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
unsigned long data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index c4d78fa93663..c6c4d7948fe5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -335,7 +335,8 @@ static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
}
static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr, unsigned int reg_n)
+ const unsigned char *addr,
+ unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 906e985441a9..5e98355f422b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -371,8 +371,8 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
- struct dma_features *dma_cap)
+static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
u32 hw_cap;
@@ -445,6 +445,8 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
+
+ return 0;
}
static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 6dc1c98ebec8..f7dc447f05a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -203,8 +203,8 @@ struct stmmac_dma_ops {
int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir);
/* If supported then get the optional core features */
- void (*get_hw_feature)(void __iomem *ioaddr,
- struct dma_features *dma_cap);
+ int (*get_hw_feature)(void __iomem *ioaddr,
+ struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue);
void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
@@ -255,7 +255,7 @@ struct stmmac_dma_ops {
#define stmmac_dma_interrupt_status(__priv, __args...) \
stmmac_do_callback(__priv, dma, dma_interrupt, __args)
#define stmmac_get_hw_feature(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, get_hw_feature, __args)
+ stmmac_do_callback(__priv, dma, get_hw_feature, __args)
#define stmmac_rx_watchdog(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, rx_watchdog, __args)
#define stmmac_set_tx_ring_len(__priv, __args...) \
@@ -330,7 +330,8 @@ struct stmmac_ops {
/* Set power management mode (e.g. magic frame) */
void (*pmt)(struct mac_device_info *hw, unsigned long mode);
/* Set/Get Unicast MAC addresses */
- void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
+ void (*set_umac_addr)(struct mac_device_info *hw,
+ const unsigned char *addr,
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 553c4403258a..d3f350c25b9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -477,6 +477,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
stmmac_lpi_entry_timer_config(priv, 0);
del_timer_sync(&priv->eee_ctrl_timer);
stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ false);
}
mutex_unlock(&priv->lock);
return false;
@@ -486,6 +490,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ true);
}
if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
@@ -728,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
- if (priv->synopsys_id != DWMAC_CORE_5_10)
+ if (priv->synopsys_id < DWMAC_CORE_4_10)
ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -1034,7 +1042,7 @@ static void stmmac_mac_link_down(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, false);
priv->eee_active = false;
priv->tx_lpi_enabled = false;
- stmmac_eee_init(priv);
+ priv->eee_enabled = stmmac_eee_init(priv);
stmmac_set_eee_pls(priv, priv->hw, false);
if (priv->dma_cap.fpesel)
@@ -2810,9 +2818,13 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
*/
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
+ u8 addr[ETH_ALEN];
+
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
- stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
- if (!is_valid_ether_addr(priv->dev->dev_addr))
+ stmmac_get_umac_addr(priv, priv->hw, addr, 0);
+ if (is_valid_ether_addr(addr))
+ eth_hw_addr_set(priv->dev, addr);
+ else
eth_hw_addr_random(priv->dev);
dev_info(priv->device, "device MAC address %pM\n",
priv->dev->dev_addr);
@@ -3502,6 +3514,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request Rx MSI irq */
for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
+ if (i >= MTL_MAX_RX_QUEUES)
+ break;
if (priv->rx_irq[i] == 0)
continue;
@@ -3525,6 +3539,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request Tx MSI irq */
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ if (i >= MTL_MAX_TX_QUEUES)
+ break;
if (priv->tx_irq[i] == 0)
continue;
@@ -6807,7 +6823,7 @@ int stmmac_dvr_probe(struct device *device,
priv->tx_irq[i] = res->tx_irq[i];
if (!is_zero_ether_addr(res->mac))
- memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
+ eth_hw_addr_set(priv->dev, res->mac);
dev_set_drvdata(device, priv->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 62cec9bfcd33..232ac98943cd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -508,6 +508,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->pmt = 1;
}
+ if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
+ plat->has_gmac = 1;
+ plat->enh_desc = 1;
+ plat->tx_coe = 1;
+ plat->bugged_jumbo = 1;
+ plat->pmt = 1;
+ }
+
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a") ||
of_device_is_compatible(np, "snps,dwmac-4.20a") ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 0462dcc93e53..be3cb63675a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -36,7 +36,7 @@ struct stmmac_packet_attrs {
int vlan_id_in;
int vlan_id_out;
unsigned char *src;
- unsigned char *dst;
+ const unsigned char *dst;
u32 ip_src;
u32 ip_dst;
int tcp;
@@ -249,8 +249,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
struct net_device *orig_ndev)
{
struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ const unsigned char *dst = tpriv->packet->dst;
unsigned char *src = tpriv->packet->src;
- unsigned char *dst = tpriv->packet->dst;
struct stmmachdr *shdr;
struct ethhdr *ehdr;
struct udphdr *uhdr;
@@ -1104,13 +1104,13 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
goto cleanup_sel;
}
- actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
+ actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL);
if (!actions) {
ret = -ENOMEM;
goto cleanup_exts;
}
- act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
+ act = kcalloc(nk, sizeof(*act), GFP_KERNEL);
if (!act) {
ret = -ENOMEM;
goto cleanup_actions;
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index 309de38a7530..b0d3f9a2950c 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -73,6 +73,7 @@ config CASSINI
config SUNVNET_COMMON
tristate "Common routines to support Sun Virtual Networking"
depends on SUN_LDOMS
+ depends on INET
default m
config SUNVNET
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 287ae4c538aa..d2d4f47c7e28 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3027,7 +3027,7 @@ static void cas_mac_reset(struct cas *cp)
/* Must be invoked under cp->lock. */
static void cas_init_mac(struct cas *cp)
{
- unsigned char *e = &cp->dev->dev_addr[0];
+ const unsigned char *e = &cp->dev->dev_addr[0];
int i;
cas_mac_reset(cp);
@@ -3379,6 +3379,7 @@ static void cas_check_pci_invariants(struct cas *cp)
static int cas_check_invariants(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
+ u8 addr[ETH_ALEN];
u32 cfg;
int i;
@@ -3407,8 +3408,8 @@ static int cas_check_invariants(struct cas *cp)
/* finish phy determination. MDIO1 takes precedence over MDIO0 if
* they're both connected.
*/
- cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
- PCI_SLOT(pdev->devfn));
+ cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn));
+ eth_hw_addr_set(cp->dev, addr);
if (cp->phy_type & CAS_PHY_SERDES) {
cp->cas_flags |= CAS_FLAG_1000MB_CAP;
return 0; /* no more checking needed */
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 50bd4e3b0af9..6b59b14e74b1 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -230,7 +230,6 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
{
struct net_device *dev;
struct vnet_port *port;
- int i;
dev = alloc_etherdev_mqs(sizeof(*port), VNET_MAX_TXQS, 1);
if (!dev)
@@ -238,10 +237,8 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
dev->needed_headroom = VNET_PACKET_SKIP + 8;
dev->needed_tailroom = 8;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = hwaddr[i];
- dev->perm_addr[i] = dev->dev_addr[i];
- }
+ eth_hw_addr_set(dev, hwaddr);
+ ether_addr_copy(dev->perm_addr, dev->dev_addr);
sprintf(dev->name, "vif%d.%d", (int)handle, (int)port_id);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a68a01d1b2b1..ba8ad76313a9 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -2603,7 +2603,7 @@ static int niu_init_link(struct niu *np)
return 0;
}
-static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
+static void niu_set_primary_mac(struct niu *np, const unsigned char *addr)
{
u16 reg0 = addr[4] << 8 | addr[5];
u16 reg1 = addr[2] << 8 | addr[3];
@@ -6386,7 +6386,7 @@ static int niu_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev))
return 0;
@@ -8312,6 +8312,7 @@ static void niu_pci_vpd_validate(struct niu *np)
{
struct net_device *dev = np->dev;
struct niu_vpd *vpd = &np->vpd;
+ u8 addr[ETH_ALEN];
u8 val8;
if (!is_valid_ether_addr(&vpd->local_mac[0])) {
@@ -8344,17 +8345,20 @@ static void niu_pci_vpd_validate(struct niu *np)
return;
}
- memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
+ ether_addr_copy(addr, vpd->local_mac);
- val8 = dev->dev_addr[5];
- dev->dev_addr[5] += np->port;
- if (dev->dev_addr[5] < val8)
- dev->dev_addr[4]++;
+ val8 = addr[5];
+ addr[5] += np->port;
+ if (addr[5] < val8)
+ addr[4]++;
+
+ eth_hw_addr_set(dev, addr);
}
static int niu_pci_probe_sprom(struct niu *np)
{
struct net_device *dev = np->dev;
+ u8 addr[ETH_ALEN];
int len, i;
u64 val, sum;
u8 val8;
@@ -8446,27 +8450,29 @@ static int niu_pci_probe_sprom(struct niu *np)
val = nr64(ESPC_MAC_ADDR0);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
- dev->dev_addr[0] = (val >> 0) & 0xff;
- dev->dev_addr[1] = (val >> 8) & 0xff;
- dev->dev_addr[2] = (val >> 16) & 0xff;
- dev->dev_addr[3] = (val >> 24) & 0xff;
+ addr[0] = (val >> 0) & 0xff;
+ addr[1] = (val >> 8) & 0xff;
+ addr[2] = (val >> 16) & 0xff;
+ addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
- dev->dev_addr[4] = (val >> 0) & 0xff;
- dev->dev_addr[5] = (val >> 8) & 0xff;
+ addr[4] = (val >> 0) & 0xff;
+ addr[5] = (val >> 8) & 0xff;
- if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+ if (!is_valid_ether_addr(addr)) {
dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
- dev->dev_addr);
+ addr);
return -EINVAL;
}
- val8 = dev->dev_addr[5];
- dev->dev_addr[5] += np->port;
- if (dev->dev_addr[5] < val8)
- dev->dev_addr[4]++;
+ val8 = addr[5];
+ addr[5] += np->port;
+ if (addr[5] < val8)
+ addr[4]++;
+
+ eth_hw_addr_set(dev, addr);
val = nr64(ESPC_MOD_STR_LEN);
netif_printk(np, probe, KERN_DEBUG, np->dev,
@@ -9235,7 +9241,7 @@ static int niu_get_of_props(struct niu *np)
netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n",
dp, prop_len);
}
- memcpy(dev->dev_addr, mac_addr, dev->addr_len);
+ eth_hw_addr_set(dev, mac_addr);
if (!is_valid_ether_addr(&dev->dev_addr[0])) {
netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp);
netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index c646575e79d5..531a6f449afa 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -623,7 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, bool non_blocking)
void __iomem *cregs = bp->creg;
void __iomem *bregs = bp->bregs;
__u32 bblk_dvma = (__u32)bp->bblock_dvma;
- unsigned char *e = &bp->dev->dev_addr[0];
+ const unsigned char *e = &bp->dev->dev_addr[0];
/* Latch current counters into statistics. */
bigmac_get_counters(bp, bregs);
@@ -1076,7 +1076,6 @@ static int bigmac_ether_init(struct platform_device *op,
struct net_device *dev;
u8 bsizes, bsizes_more;
struct bigmac *bp;
- int i;
/* Get a new device struct for this interface. */
dev = alloc_etherdev(sizeof(struct bigmac));
@@ -1086,8 +1085,7 @@ static int bigmac_ether_init(struct platform_device *op,
if (version_printed++ == 0)
printk(KERN_INFO "%s", version);
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index d72018a60c0f..036856102c50 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1810,7 +1810,7 @@ static u32 gem_setup_multicast(struct gem *gp)
static void gem_init_mac(struct gem *gp)
{
- unsigned char *e = &gp->dev->dev_addr[0];
+ const unsigned char *e = &gp->dev->dev_addr[0];
writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
@@ -2087,7 +2087,7 @@ static void gem_stop_phy(struct gem *gp, int wol)
writel(mifcfg, gp->regs + MIF_CFG);
if (wol && gp->has_wol) {
- unsigned char *e = &gp->dev->dev_addr[0];
+ const unsigned char *e = &gp->dev->dev_addr[0];
u32 csr;
/* Setup wake-on-lan for MAGIC packet */
@@ -2431,13 +2431,13 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
static int gem_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *macaddr = (struct sockaddr *) addr;
+ const unsigned char *e = &dev->dev_addr[0];
struct gem *gp = netdev_priv(dev);
- unsigned char *e = &dev->dev_addr[0];
if (!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, macaddr->sa_data);
/* We'll just catch it later when the device is up'd or resumed */
if (!netif_running(dev) || !netif_device_present(dev))
@@ -2797,9 +2797,12 @@ static int gem_get_device_address(struct gem *gp)
return -1;
#endif
}
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
#else
- get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
+ u8 addr[ETH_ALEN];
+
+ get_gem_mac_nonobp(gp->pdev, addr);
+ eth_hw_addr_set(gp->dev, addr);
#endif
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 62f81b0d14ed..ad9029ae6848 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1395,13 +1395,13 @@ force_link:
/* hp->happy_lock must be held */
static int happy_meal_init(struct happy_meal *hp)
{
+ const unsigned char *e = &hp->dev->dev_addr[0];
void __iomem *gregs = hp->gregs;
void __iomem *etxregs = hp->etxregs;
void __iomem *erxregs = hp->erxregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *tregs = hp->tcvregs;
u32 regtmp, rxcfg;
- unsigned char *e = &hp->dev->dev_addr[0];
/* If auto-negotiation timer is running, kill it. */
del_timer(&hp->happy_timer);
@@ -2661,6 +2661,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
struct happy_meal *hp;
struct net_device *dev;
int i, qfe_slot = -1;
+ u8 addr[ETH_ALEN];
int err = -ENODEV;
sbus_dp = op->dev.parent->of_node;
@@ -2698,7 +2699,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
}
if (i < 6) { /* a mac address was given */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
} else {
const unsigned char *addr;
@@ -2707,9 +2709,9 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
addr = of_get_property(dp, "local-mac-address", &len);
if (qfe_slot != -1 && addr && len == ETH_ALEN)
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
else
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
}
hp = netdev_priv(dev);
@@ -2969,6 +2971,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
unsigned long hpreg_res;
int i, qfe_slot = -1;
char prom_name[64];
+ u8 addr[ETH_ALEN];
int err;
/* Now make sure pci_dev cookie is there. */
@@ -3044,7 +3047,8 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
}
if (i < 6) { /* a mac address was given */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
} else {
#ifdef CONFIG_SPARC
@@ -3055,12 +3059,15 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
(addr = of_get_property(dp, "local-mac-address", &len))
!= NULL &&
len == 6) {
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
} else {
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
}
#else
- get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
+ u8 addr[ETH_ALEN];
+
+ get_hme_mac_nonsparc(pdev, addr);
+ eth_hw_addr_set(dev, addr);
#endif
}
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 577cd9753d8e..efe0d33f6024 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -144,7 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
void __iomem *cregs = qep->qcregs;
void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs;
- unsigned char *e = &qep->dev->dev_addr[0];
+ const unsigned char *e = &qep->dev->dev_addr[0];
__u32 qblk_dvma = (__u32)qep->qblock_dvma;
u32 tmp;
int i;
@@ -844,7 +844,7 @@ static int qec_ether_init(struct platform_device *op)
if (!dev)
return -ENOMEM;
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
qe = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 58ee89223951..da8119625cf3 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -285,6 +285,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
struct vio_dev *vdev)
{
struct net_device *dev;
+ u8 addr[ETH_ALEN];
struct vnet *vp;
int err, i;
@@ -295,7 +296,8 @@ static struct vnet *vnet_new(const u64 *local_mac,
dev->needed_tailroom = 8;
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+ addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+ eth_hw_addr_set(dev, addr);
vp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index df26cea45904..5c9b6c90942b 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -78,7 +78,7 @@ static int xlgmac_init(struct xlgmac_pdata *pdata)
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->mac_regs;
xlgmac_read_mac_addr(pdata);
- memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->mac_addr);
/* Set all the function pointers */
xlgmac_init_all_ops(pdata);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index bf6c1c6779ff..76eb7db80f13 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -57,7 +57,7 @@ static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
return 0;
}
-static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr)
+static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, const u8 *addr)
{
unsigned int mac_addr_hi, mac_addr_lo;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 1db7104fef3a..d435519236e4 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -798,7 +798,7 @@ static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, saddr->sa_data);
hw_ops->set_mac_address(pdata, netdev->dev_addr);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
index 8598aaf3ec99..98e3a271e017 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -410,7 +410,7 @@ struct xlgmac_hw_ops {
void (*dev_xmit)(struct xlgmac_channel *channel);
int (*dev_read)(struct xlgmac_channel *channel);
- int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr);
+ int (*set_mac_address)(struct xlgmac_pdata *pdata, const u8 *addr);
int (*config_rx_mode)(struct xlgmac_pdata *pdata);
int (*enable_rx_csum)(struct xlgmac_pdata *pdata);
int (*disable_rx_csum)(struct xlgmac_pdata *pdata);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 6b409f9c5863..0775a5542f2f 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -832,7 +832,7 @@ static int bdx_set_mac(struct net_device *ndev, void *p)
if (netif_running(dev))
return -EBUSY
*/
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
bdx_restore_mac(ndev, priv);
RET(0);
}
@@ -840,6 +840,7 @@ static int bdx_set_mac(struct net_device *ndev, void *p)
static int bdx_read_mac(struct bdx_priv *priv)
{
u16 macAddress[3], i;
+ u8 addr[ETH_ALEN];
ENTER;
macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
@@ -849,9 +850,10 @@ static int bdx_read_mac(struct bdx_priv *priv)
macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
for (i = 0; i < 3; i++) {
- priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
- priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
+ addr[i * 2 + 1] = macAddress[i];
+ addr[i * 2] = macAddress[i] >> 8;
}
+ eth_hw_addr_set(priv->ndev, addr);
RET(0);
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 6e4d4f9e32e0..b05de9b61ad6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -61,7 +61,7 @@ struct am65_cpsw_regdump_item {
#define AM65_CPSW_REGDUMP_REC(mod, start, end) { \
.hdr.module_id = (mod), \
- .hdr.len = (((u32 *)(end)) - ((u32 *)(start)) + 1) * sizeof(u32) * 2 + \
+ .hdr.len = (end + 4 - start) * 2 + \
sizeof(struct am65_cpsw_regdump_hdr), \
.start_ofs = (start), \
.end_ofs = end, \
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index c2ea53ca92b6..c092cb61416a 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1918,7 +1918,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->port_id,
port->slave.mac_addr);
if (!is_valid_ether_addr(port->slave.mac_addr)) {
- random_ether_addr(port->slave.mac_addr);
+ eth_random_addr(port->slave.mac_addr);
dev_err(dev, "Use random MAC address\n");
}
}
@@ -1970,7 +1970,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
ndev_priv->msg_enable = AM65_CPSW_DEBUG;
SET_NETDEV_DEV(port->ndev, dev);
- ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr);
+ eth_hw_addr_set(port->ndev, port->slave.mac_addr);
port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
@@ -2429,7 +2429,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
dl_priv = devlink_priv(common->devlink);
dl_priv->common = common;
- devlink_register(common->devlink);
/* Provide devlink hook to switch mode when multiple external ports
* are present NUSS switchdev driver is enabled.
*/
@@ -2442,7 +2441,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
dev_err(dev, "devlink params reg fail ret:%d\n", ret);
goto dl_unreg;
}
- devlink_params_publish(common->devlink);
}
for (i = 1; i <= common->port_num; i++) {
@@ -2463,7 +2461,7 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
}
devlink_port_type_eth_set(dl_port, port->ndev);
}
-
+ devlink_register(common->devlink);
return ret;
dl_port_unreg:
@@ -2474,7 +2472,6 @@ dl_port_unreg:
devlink_port_unregister(dl_port);
}
dl_unreg:
- devlink_unregister(common->devlink);
devlink_free(common->devlink);
return ret;
}
@@ -2485,6 +2482,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
struct am65_cpsw_port *port;
int i;
+ devlink_unregister(common->devlink);
+
for (i = 1; i <= common->port_num; i++) {
port = am65_common_get_port(common, i);
dl_port = &port->devlink_port;
@@ -2493,13 +2492,11 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
}
if (!AM65_CPSW_IS_CPSW2G(common) &&
- IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
- devlink_params_unpublish(common->devlink);
- devlink_params_unregister(common->devlink, am65_cpsw_devlink_params,
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ devlink_params_unregister(common->devlink,
+ am65_cpsw_devlink_params,
ARRAY_SIZE(am65_cpsw_devlink_params));
- }
- devlink_unregister(common->devlink);
devlink_free(common->devlink);
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 02d4e51f7306..7449436fc87c 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1112,7 +1112,7 @@ static int cpmac_probe(struct platform_device *pdev)
priv->dev = dev;
priv->ring_size = 64;
priv->msg_enable = netif_msg_init(debug_level, 0xff);
- memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
+ eth_hw_addr_set(dev, pdata->dev_addr);
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
mdio_bus_id, phy_id);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 66f7ddd9b1f9..33142d505fc8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -985,7 +985,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
flags, vid);
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
- memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv->mac_addr);
for_each_slave(priv, cpsw_set_slave_mac, priv);
pm_runtime_put(cpsw->dev);
@@ -1460,7 +1460,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
priv_sl2->mac_addr);
}
- memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv_sl2->mac_addr);
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
@@ -1639,7 +1639,7 @@ static int cpsw_probe(struct platform_device *pdev)
dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
}
- memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv->mac_addr);
cpsw->slaves[0].ndev = ndev;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 204b4826303c..279e261e4720 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1000,7 +1000,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
flags, vid);
ether_addr_copy(priv->mac_addr, addr->sa_data);
- ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+ eth_hw_addr_set(ndev, priv->mac_addr);
cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
pm_runtime_put(cpsw->dev);
@@ -1401,7 +1401,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
dev_info(cpsw->dev, "Random MACID = %pM\n",
priv->mac_addr);
}
- ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
+ eth_hw_addr_set(ndev, slave_data->mac_addr);
ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
cpsw->slaves[i].ndev = ndev;
@@ -1810,7 +1810,6 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw)
dl_priv = devlink_priv(cpsw->devlink);
dl_priv->cpsw = cpsw;
- devlink_register(cpsw->devlink);
ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
ARRAY_SIZE(cpsw_devlink_params));
if (ret) {
@@ -1818,21 +1817,19 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw)
goto dl_unreg;
}
- devlink_params_publish(cpsw->devlink);
+ devlink_register(cpsw->devlink);
return ret;
dl_unreg:
- devlink_unregister(cpsw->devlink);
devlink_free(cpsw->devlink);
return ret;
}
static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
{
- devlink_params_unpublish(cpsw->devlink);
+ devlink_unregister(cpsw->devlink);
devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
ARRAY_SIZE(cpsw_devlink_params));
- devlink_unregister(cpsw->devlink);
devlink_free(cpsw->devlink);
}
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 43222a34cba0..dc70a6bfaa6a 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -669,10 +669,10 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
goto mux_fail;
}
- parent_names = devm_kzalloc(cpts->dev, (sizeof(char *) * num_parents),
- GFP_KERNEL);
+ parent_names = devm_kcalloc(cpts->dev, num_parents,
+ sizeof(*parent_names), GFP_KERNEL);
- mux_table = devm_kzalloc(cpts->dev, sizeof(*mux_table) * num_parents,
+ mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table),
GFP_KERNEL);
if (!mux_table || !parent_names) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index e8291d848839..2d2dcf70563f 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1132,7 +1132,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
/* Store mac addr in priv and rx channel and set it in EMAC hw */
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
- memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, sa->sa_data);
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
@@ -1402,7 +1402,6 @@ static int match_first_device(struct device *dev, const void *data)
static int emac_dev_open(struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
- u32 cnt;
struct resource *res;
int q, m, ret;
int res_num = 0, irq_num = 0;
@@ -1420,8 +1419,7 @@ static int emac_dev_open(struct net_device *ndev)
}
netif_carrier_off(ndev);
- for (cnt = 0; cnt < ETH_ALEN; cnt++)
- ndev->dev_addr[cnt] = priv->mac_addr[cnt];
+ eth_hw_addr_set(ndev, priv->mac_addr);
/* Configuration items */
priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
@@ -1899,7 +1897,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
if (!rc)
- ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+ eth_hw_addr_set(ndev, priv->mac_addr);
if (!is_valid_ether_addr(priv->mac_addr)) {
/* Use random MAC if still none obtained. */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index eda2961c0fe2..b818e4579f6f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2028,16 +2028,16 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
if (is_valid_ether_addr(efuse_mac_addr))
- ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
+ eth_hw_addr_set(ndev, efuse_mac_addr);
else
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
devm_iounmap(dev, efuse);
devm_release_mem_region(dev, res.start, size);
} else {
- ret = of_get_mac_address(node_interface, ndev->dev_addr);
+ ret = of_get_ethdev_address(node_interface, ndev);
if (ret)
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
}
ret = of_property_read_string(node_interface, "rx-channel",
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 77c448ad67ce..741c42c6a417 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -184,7 +184,7 @@ static void tlan_print_list(struct tlan_list *, char *, int);
static void tlan_read_and_clear_stats(struct net_device *, int);
static void tlan_reset_adapter(struct net_device *);
static void tlan_finish_reset(struct net_device *);
-static void tlan_set_mac(struct net_device *, int areg, char *mac);
+static void tlan_set_mac(struct net_device *, int areg, const char *mac);
static void __tlan_phy_print(struct net_device *);
static void tlan_phy_print(struct net_device *);
@@ -817,6 +817,7 @@ static int tlan_init(struct net_device *dev)
int err;
int i;
struct tlan_priv *priv;
+ u8 addr[ETH_ALEN];
priv = netdev_priv(dev);
@@ -842,7 +843,7 @@ static int tlan_init(struct net_device *dev)
for (i = 0; i < ETH_ALEN; i++)
err |= tlan_ee_read_byte(dev,
(u8) priv->adapter->addr_ofs + i,
- (u8 *) &dev->dev_addr[i]);
+ addr + i);
if (err) {
pr_err("%s: Error reading MAC from eeprom: %d\n",
dev->name, err);
@@ -850,11 +851,12 @@ static int tlan_init(struct net_device *dev)
/* Olicom OC-2325/OC-2326 have the address byte-swapped */
if (priv->adapter->addr_ofs == 0xf8) {
for (i = 0; i < ETH_ALEN; i += 2) {
- char tmp = dev->dev_addr[i];
- dev->dev_addr[i] = dev->dev_addr[i + 1];
- dev->dev_addr[i + 1] = tmp;
+ char tmp = addr[i];
+ addr[i] = addr[i + 1];
+ addr[i + 1] = tmp;
}
}
+ eth_hw_addr_set(dev, addr);
netif_carrier_off(dev);
@@ -2346,7 +2348,7 @@ tlan_finish_reset(struct net_device *dev)
*
**************************************************************/
-static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
+static void tlan_set_mac(struct net_device *dev, int areg, const char *mac)
{
int i;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 55e652624bd7..3dbfb1b20649 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1477,7 +1477,7 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
__func__, status);
return -EINVAL;
}
- memcpy(netdev->dev_addr, &v1, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)&v1);
if (card->vlan_required) {
netdev->hard_header_len += VLAN_HLEN;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 66d4e024d11e..f50f9a43d3ea 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1296,7 +1296,7 @@ spider_net_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
/* switch off GMACTPE and GMACRPE */
regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 52245ac60fc7..ce38f7515225 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -708,7 +708,7 @@ static int tc35815_read_plat_dev_addr(struct net_device *dev)
lp->pci_dev, tc35815_mac_match);
if (pd) {
if (pd->platform_data)
- memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->platform_data);
put_device(pd);
return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
}
@@ -725,6 +725,7 @@ static int tc35815_init_dev_addr(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
+ u8 addr[ETH_ALEN];
int i;
while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
@@ -735,9 +736,10 @@ static int tc35815_init_dev_addr(struct net_device *dev)
while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
;
data = tc_readl(&tr->PROM_Data);
- dev->dev_addr[i] = data & 0xff;
- dev->dev_addr[i+1] = data >> 8;
+ addr[i] = data & 0xff;
+ addr[i+1] = data >> 8;
}
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr))
return tc35815_read_plat_dev_addr(dev);
return 0;
@@ -1859,7 +1861,8 @@ static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
return &dev->stats;
}
-static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
+static void tc35815_set_cam_entry(struct net_device *dev, int index,
+ const unsigned char *addr)
{
struct tc35815_local *lp = netdev_priv(dev);
struct tc35815_regs __iomem *tr =
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 3b73a9c55a5a..509c5e9b29df 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -899,6 +899,7 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
struct net_device *dev;
struct rhine_private *rp;
int i, rc, phy_id;
+ u8 addr[ETH_ALEN];
const char *name;
/* this should always be supported */
@@ -933,7 +934,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
rhine_hw_init(dev, pioaddr);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
+ addr[i] = ioread8(ioaddr + StationAddr + i);
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
/* Report it and use a random ethernet address instead */
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4b9c30f735b5..be2b992f24d9 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2767,6 +2767,7 @@ static int velocity_probe(struct device *dev, int irq,
struct velocity_info *vptr;
struct mac_regs __iomem *regs;
int ret = -ENOMEM;
+ u8 addr[ETH_ALEN];
/* FIXME: this driver, like almost all other ethernet drivers,
* can support more than MAX_UNITS.
@@ -2820,7 +2821,8 @@ static int velocity_probe(struct device *dev, int irq,
mac_wol_reset(regs);
for (i = 0; i < 6; i++)
- netdev->dev_addr[i] = readb(&regs->PAR[i]);
+ addr[i] = readb(&regs->PAR[i]);
+ eth_hw_addr_set(netdev, addr);
velocity_get_options(&vptr->options, velocity_nics);
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
index 2b84848dc26a..7779a36da3c8 100644
--- a/drivers/net/ethernet/wiznet/w5100-spi.c
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -463,7 +463,9 @@ static int w5100_spi_probe(struct spi_device *spi)
static int w5100_spi_remove(struct spi_device *spi)
{
- return w5100_remove(&spi->dev);
+ w5100_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id w5100_spi_ids[] = {
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index f974e70a82e8..ae24d6b86803 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -985,7 +985,7 @@ static int w5100_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, sock_addr->sa_data);
w5100_write_macaddr(priv);
return 0;
}
@@ -1064,7 +1064,9 @@ static int w5100_mmio_probe(struct platform_device *pdev)
static int w5100_mmio_remove(struct platform_device *pdev)
{
- return w5100_remove(&pdev->dev);
+ w5100_remove(&pdev->dev);
+
+ return 0;
}
void *w5100_ops_priv(const struct net_device *ndev)
@@ -1155,7 +1157,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
INIT_WORK(&priv->restart_work, w5100_restart_work);
if (mac_addr)
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, mac_addr);
else
eth_hw_addr_random(ndev);
@@ -1210,7 +1212,7 @@ err_register:
}
EXPORT_SYMBOL_GPL(w5100_probe);
-int w5100_remove(struct device *dev)
+void w5100_remove(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
@@ -1226,7 +1228,6 @@ int w5100_remove(struct device *dev)
unregister_netdev(ndev);
free_netdev(ndev);
- return 0;
}
EXPORT_SYMBOL_GPL(w5100_remove);
diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h
index 5d3d4b541fec..481af3b6d9e8 100644
--- a/drivers/net/ethernet/wiznet/w5100.h
+++ b/drivers/net/ethernet/wiznet/w5100.h
@@ -31,6 +31,6 @@ void *w5100_ops_priv(const struct net_device *ndev);
int w5100_probe(struct device *dev, const struct w5100_ops *ops,
int sizeof_ops_priv, const void *mac_addr, int irq,
int link_gpio);
-int w5100_remove(struct device *dev);
+void w5100_remove(struct device *dev);
extern const struct dev_pm_ops w5100_pm_ops;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 46aae30c4636..402d5036f266 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -472,7 +472,7 @@ static int w5300_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, sock_addr->sa_data);
w5300_write_macaddr(priv);
return 0;
}
@@ -534,7 +534,7 @@ static int w5300_hw_probe(struct platform_device *pdev)
int ret;
if (data && is_valid_ether_addr(data->mac_addr)) {
- memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, data->mac_addr);
} else {
eth_hw_addr_random(ndev);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 463094ced104..e7065c9a8e38 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -438,7 +438,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
static int temac_init_mac_address(struct net_device *ndev, const void *address)
{
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ eth_hw_addr_set(ndev, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
temac_do_set_mac_address(ndev);
@@ -451,7 +451,7 @@ static int temac_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
temac_do_set_mac_address(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 871b5ec3183d..0b7606987c1e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -360,7 +360,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ eth_hw_addr_set(ndev, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b780aad3550a..0815de581c7f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -206,12 +206,13 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* This function writes data from a 16-bit aligned buffer to a 32-bit aligned
* address in the EmacLite device.
*/
-static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
+static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
unsigned length)
{
+ const u16 *from_u16_ptr;
u32 align_buffer;
u32 *to_u32_ptr;
- u16 *from_u16_ptr, *to_u16_ptr;
+ u16 *to_u16_ptr;
to_u32_ptr = dest_ptr;
from_u16_ptr = src_ptr;
@@ -470,7 +471,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* buffers (if configured).
*/
static void xemaclite_update_address(struct net_local *drvdata,
- u8 *address_ptr)
+ const u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
@@ -511,7 +512,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
xemaclite_update_address(lp, dev->dev_addr);
return 0;
}
@@ -1157,7 +1158,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
- rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
+ rc = of_get_ethdev_address(ofdev->dev.of_node, ndev);
if (rc) {
dev_warn(dev, "No MAC address found, using random\n");
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index ae611e46da6a..f9587e55b842 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -671,7 +671,6 @@ static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev,
void *priv)
{
struct net_device *dev = priv;
- int i;
if (tuple->TupleDataLen != 13)
return -EINVAL;
@@ -679,8 +678,7 @@ static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev,
(tuple->TupleData[2] != 6))
return -EINVAL;
/* another try (James Lehmer's CE2 version 4.1)*/
- for (i = 2; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
+ dev_addr_mod(dev, 2, &tuple->TupleData[2], 4);
return 0;
};
@@ -742,11 +740,9 @@ xirc2ps_config(struct pcmcia_device * link)
len = pcmcia_get_tuple(link, 0x89, &buf);
/* data layout looks like tuple 0x22 */
if (buf && len == 8) {
- if (*buf == CISTPL_FUNCE_LAN_NODE_ID) {
- int i;
- for (i = 2; i < 6; i++)
- dev->dev_addr[i] = buf[i+2];
- } else
+ if (*buf == CISTPL_FUNCE_LAN_NODE_ID)
+ dev_addr_mod(dev, 2, &buf[2], 4);
+ else
err = -1;
}
kfree(buf);
@@ -1271,7 +1267,7 @@ struct set_address_info {
unsigned int ioaddr;
};
-static void set_address(struct set_address_info *sa_info, char *addr)
+static void set_address(struct set_address_info *sa_info, const char *addr)
{
unsigned int ioaddr = sa_info->ioaddr;
int i;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 931494cc1c39..65fdad1107fc 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1103,10 +1103,9 @@ static int init_queues(struct port *port)
return -ENOMEM;
}
- if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
- &port->desc_tab_phys)))
+ port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys);
+ if (!port->desc_tab)
return -ENOMEM;
- memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
@@ -1524,7 +1523,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN);
+ eth_hw_addr_set(ndev, plat->hwaddr);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 6d1e3f49a3d3..b584ffe38ad6 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1028,7 +1028,7 @@ static void dfx_bus_config_check(DFX_board_t *bp)
* or read adapter MAC address
*
* Assumptions:
- * Memory allocated from pci_alloc_consistent() call is physically
+ * Memory allocated from dma_alloc_coherent() call is physically
* contiguous, locked memory.
*
* Side Effects:
@@ -1117,7 +1117,7 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
* dfx_ctl_set_mac_address.
*/
- memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
+ dev_addr_set(dev, bp->factory_mac_addr);
if (dfx_bus_tc)
board_name = "DEFTA";
if (dfx_bus_eisa)
@@ -1474,7 +1474,7 @@ static int dfx_open(struct net_device *dev)
* address.
*/
- memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
+ dev_addr_set(dev, bp->factory_mac_addr);
/* Clear local unicast/multicast address tables and counts */
@@ -2379,7 +2379,7 @@ static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
/* Copy unicast address to driver-maintained structs and update count */
- memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */
+ dev_addr_set(dev, p_sockaddr->sa_data); /* update device struct */
memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
bp->uc_count = 1;
@@ -3249,7 +3249,7 @@ static void dfx_rcv_queue_process(
* is contained in a single physically contiguous buffer
* in which the virtual address of the start of packet
* (skb->data) can be converted to a physical address
- * by using pci_map_single().
+ * by using dma_map_single().
*
* Since the adapter architecture requires a three byte
* packet request header to prepend the start of packet,
@@ -3402,7 +3402,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
* skb->data.
* 6. The physical address of the start of packet
* can be determined from the virtual address
- * by using pci_map_single() and is only 32-bits
+ * by using dma_map_single() and is only 32-bits
* wide.
*/
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 0de2c4552f5e..3a6b08eb5e1b 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1380,7 +1380,7 @@ static int fza_probe(struct device *bdev)
goto err_out_irq;
fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr));
- memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN);
+ dev_addr_set(dev, &hw_addr);
fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev));
fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev));
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 3814a2ff64ae..b0e6ce0d893e 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -470,7 +470,7 @@ void card_stop(struct s_smc *smc);
void init_board(struct s_smc *smc, u_char *mac_addr);
int init_fplus(struct s_smc *smc);
void init_plc(struct s_smc *smc);
-int init_smt(struct s_smc *smc, u_char *mac_addr);
+int init_smt(struct s_smc *smc, const u_char *mac_addr);
void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index c5cb421f9890..2b6a607ac0b7 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -78,6 +78,7 @@ static const char * const boot_msg =
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/fddidevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
@@ -433,7 +434,7 @@ static int skfp_driver_init(struct net_device *dev)
}
read_address(smc, NULL);
pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
+ eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
smt_reset_defaults(smc, 0);
@@ -500,7 +501,7 @@ static int skfp_open(struct net_device *dev)
* address.
*/
read_address(smc, NULL);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
+ eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
init_smt(smc, NULL);
smt_online(smc, 1);
@@ -924,7 +925,7 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
unsigned long Flags;
- memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
+ dev_addr_set(dev, p_sockaddr->sa_data);
spin_lock_irqsave(&bp->DriverLock, Flags);
ResetAdapter(smc);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
@@ -1012,7 +1013,7 @@ static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __
* is contained in a single physically contiguous buffer
* in which the virtual address of the start of packet
* (skb->data) can be converted to a physical address
- * by using pci_map_single().
+ * by using dma_map_single().
*
* We have an internal queue for packets we can not send
* immediately. Packets in this queue can be given to the
diff --git a/drivers/net/fddi/skfp/smtinit.c b/drivers/net/fddi/skfp/smtinit.c
index c9898c83fe30..8b172c195685 100644
--- a/drivers/net/fddi/skfp/smtinit.c
+++ b/drivers/net/fddi/skfp/smtinit.c
@@ -19,7 +19,7 @@
#include "h/fddi.h"
#include "h/smc.h"
-void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
+void init_fddi_driver(struct s_smc *smc, const u_char *mac_addr);
/* define global debug variable */
#if defined(DEBUG) && !defined(DEBUG_BRD)
@@ -57,7 +57,7 @@ static void set_oem_spec_val(struct s_smc *smc)
/*
* Init SMT
*/
-int init_smt(struct s_smc *smc, u_char *mac_addr)
+int init_smt(struct s_smc *smc, const u_char *mac_addr)
/* u_char *mac_addr; canonical address or NULL */
{
int p ;
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index 065bb0a40b1d..704e949484d0 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -137,7 +137,8 @@ static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
epbh->ring = NULL;
}
-void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
+void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, const u8 *mac_addr,
+ u32 mtu)
{
union ep_buffer_info *info = epbh->info;
u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h
index b4608ea2a2d5..997c7b37a402 100644
--- a/drivers/net/fjes/fjes_hw.h
+++ b/drivers/net/fjes/fjes_hw.h
@@ -330,7 +330,7 @@ int fjes_hw_register_buff_addr(struct fjes_hw *, int,
int fjes_hw_unregister_buff_addr(struct fjes_hw *, int);
void fjes_hw_init_command_registers(struct fjes_hw *,
struct fjes_device_command_param *);
-void fjes_hw_setup_epbuf(struct epbuf_handler *, u8 *, u32);
+void fjes_hw_setup_epbuf(struct epbuf_handler *, const u8 *, u32);
int fjes_hw_raise_interrupt(struct fjes_hw *, int, enum REG_ICTL_MASK);
void fjes_hw_set_irqmask(struct fjes_hw *, enum REG_ICTL_MASK, bool);
u32 fjes_hw_capture_interrupt_status(struct fjes_hw *);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 185c8a398681..b06c17ac8d4e 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1203,6 +1203,7 @@ static int fjes_probe(struct platform_device *plat_dev)
struct net_device *netdev;
struct resource *res;
struct fjes_hw *hw;
+ u8 addr[ETH_ALEN];
int err;
err = -ENOMEM;
@@ -1266,12 +1267,13 @@ static int fjes_probe(struct platform_device *plat_dev)
goto err_free_control_wq;
/* setup MAC address (02:00:00:00:00:[epid])*/
- netdev->dev_addr[0] = 2;
- netdev->dev_addr[1] = 0;
- netdev->dev_addr[2] = 0;
- netdev->dev_addr[3] = 0;
- netdev->dev_addr[4] = 0;
- netdev->dev_addr[5] = hw->my_epid; /* EPID */
+ addr[0] = 2;
+ addr[1] = 0;
+ addr[2] = 0;
+ addr[3] = 0;
+ addr[4] = 0;
+ addr[5] = hw->my_epid; /* EPID */
+ eth_hw_addr_set(netdev, addr);
err = register_netdev(netdev);
if (err)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 30e0a10595a1..24e5c54d06c1 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -539,7 +539,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(&rt->dst);
}
- rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 6192244b304a..f4e8793e995d 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -288,7 +288,7 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
- memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
@@ -317,7 +317,7 @@ static void sp_setup(struct net_device *dev)
/* Only activated in AX.25 mode */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->flags = 0;
}
@@ -726,7 +726,7 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
}
netif_tx_lock_bh(dev);
- memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
+ __dev_addr_set(dev, &addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
err = 0;
break;
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index f4843f9672c1..441da03c23ee 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -48,6 +48,7 @@ config BPQETHER
config DMASCC
tristate "High-speed (DMA) SCC driver for AX.25"
depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
+ depends on VIRT_TO_BUS
help
This is a driver for high-speed SCC boards, i.e. those supporting
DMA on one port. You usually use those boards to connect your
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 775dcf4ebde5..a03d0b474641 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -623,16 +623,16 @@ static int receive(struct net_device *dev, int cnt)
/* --------------------------------------------------------------------- */
-#ifdef __i386__
+#if defined(__i386__) && !defined(CONFIG_UML)
#include <asm/msr.h>
#define GETTICK(x) \
({ \
if (boot_cpu_has(X86_FEATURE_TSC)) \
x = (unsigned int)rdtsc(); \
})
-#else /* __i386__ */
+#else /* __i386__ && !CONFIG_UML */
#define GETTICK(x)
-#endif /* __i386__ */
+#endif /* __i386__ && !CONFIG_UML */
static void epp_bh(struct work_struct *work)
{
@@ -791,7 +791,7 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *)addr;
/* addr is an AX.25 shifted ASCII mac address */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -1159,7 +1159,7 @@ static void baycom_probe(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &null_ax25_address, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&null_ax25_address);
dev->tx_queue_len = 16;
/* New style flags */
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index d967b0748773..30af0081e2be 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -302,7 +302,7 @@ static int bpq_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *)addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -457,9 +457,6 @@ static void bpq_setup(struct net_device *dev)
dev->netdev_ops = &bpq_netdev_ops;
dev->needs_free_netdev = true;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
-
dev->flags = 0;
dev->features = NETIF_F_LLTX; /* Allow recursion */
@@ -472,6 +469,8 @@ static void bpq_setup(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN;
dev->addr_len = AX25_ADDR_LEN;
+ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
/*
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index b50b7fafd8d6..7e527499d3ad 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -426,7 +426,7 @@ static void __init dev_setup(struct net_device *dev)
dev->addr_len = AX25_ADDR_LEN;
dev->tx_queue_len = 64;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
static const struct net_device_ops scc_netdev_ops = {
@@ -956,8 +956,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
static int scc_set_mac_address(struct net_device *dev, void *sa)
{
- memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
- dev->addr_len);
+ dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data);
return 0;
}
@@ -973,7 +972,7 @@ static inline void tx_on(struct scc_priv *priv)
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
set_dma_addr(priv->param.dma,
- (int) priv->tx_buf[priv->tx_tail] + n);
+ virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
set_dma_count(priv->param.dma,
priv->tx_len[priv->tx_tail] - n);
release_dma_lock(flags);
@@ -1020,7 +1019,7 @@ static inline void rx_on(struct scc_priv *priv)
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_READ);
set_dma_addr(priv->param.dma,
- (int) priv->rx_buf[priv->rx_head]);
+ virt_to_bus(priv->rx_buf[priv->rx_head]));
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
enable_dma(priv->param.dma);
@@ -1233,7 +1232,7 @@ static void special_condition(struct scc_priv *priv, int rc)
if (priv->param.dma >= 0) {
flags = claim_dma_lock();
set_dma_addr(priv->param.dma,
- (int) priv->rx_buf[priv->rx_head]);
+ virt_to_bus(priv->rx_buf[priv->rx_head]));
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
} else {
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 5805cfc83854..b0edb91bb10a 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -415,7 +415,7 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *)addr;
/* addr is an AX.25 shifted ASCII mac address */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -675,7 +675,7 @@ static void hdlcdrv_setup(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->tx_queue_len = 16;
}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 8666110bec55..867252a0247b 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -344,7 +344,7 @@ static int ax_set_mac_address(struct net_device *dev, void *addr)
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
- memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
@@ -647,7 +647,7 @@ static void ax_setup(struct net_device *dev)
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
@@ -850,7 +850,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
}
netif_tx_lock_bh(dev);
- memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
+ __dev_addr_set(dev, addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
err = 0;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index e0bb131a33d7..3d59dac063ac 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1563,9 +1563,6 @@ static void scc_net_setup(struct net_device *dev)
dev->netdev_ops = &scc_netdev_ops;
dev->header_ops = &ax25_header_ops;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
-
dev->flags = 0;
dev->type = ARPHRD_AX25;
@@ -1573,6 +1570,8 @@ static void scc_net_setup(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN;
dev->addr_len = AX25_ADDR_LEN;
+ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
/* ----> open network device <---- */
@@ -1951,7 +1950,7 @@ static int scc_net_siocdevprivate(struct net_device *dev,
static int scc_net_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *) addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 6ddacbdb224b..6376b8485976 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1063,7 +1063,7 @@ static int yam_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *) addr;
/* addr is an AX.25 shifted ASCII mac address */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -1107,7 +1107,7 @@ static void yam_setup(struct net_device *dev)
dev->mtu = AX25_MTU;
dev->addr_len = AX25_ADDR_LEN;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
static int __init yam_init_driver(void)
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 7661dbb31162..16105292b140 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -502,6 +502,7 @@ static unsigned int write_eeprom(struct rr_private *rrpriv,
static int rr_init(struct net_device *dev)
{
+ u8 addr[HIPPI_ALEN] __aligned(4);
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 sram_size, rev;
@@ -537,10 +538,11 @@ static int rr_init(struct net_device *dev)
* other method I've seen. -VAL
*/
- *(__be16 *)(dev->dev_addr) =
+ *(__be16 *)(addr) =
htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
- *(__be32 *)(dev->dev_addr+2) =
+ *(__be32 *)(addr+2) =
htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
+ dev_addr_set(dev, addr);
printk(" MAC: %pM\n", dev->dev_addr);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 382bebc2420d..7e66ae1d2a59 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -803,6 +803,7 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+/* This function should only be called after skb_record_rx_queue() */
static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
@@ -2536,7 +2537,7 @@ static int netvsc_probe(struct hv_device *dev,
goto rndis_failed;
}
- memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
+ eth_hw_addr_set(net, device_info->mac_adr);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2742,8 +2743,7 @@ static int netvsc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
- if ((event_dev->priv_flags & IFF_BONDING) &&
- (event_dev->flags & IFF_MASTER))
+ if (netif_is_bond_master(event_dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 3a2824f24caa..ece6ff6049f6 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2938,9 +2938,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv)
*/
static void ca8210_dev_com_clear(struct ca8210_priv *priv)
{
- flush_workqueue(priv->mlme_workqueue);
destroy_workqueue(priv->mlme_workqueue);
- flush_workqueue(priv->irq_workqueue);
destroy_workqueue(priv->irq_workqueue);
}
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index e9258a9f3702..2c319dd27f29 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/netfilter_netdev.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
@@ -75,8 +76,10 @@ static void ifb_ri_tasklet(struct tasklet_struct *t)
}
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
+ /* Skip tc and netfilter to prevent redirection loop. */
skb->redirected = 0;
skb->tc_skip_classify = 1;
+ nf_skip_egress(skb, true);
u64_stats_update_begin(&txp->tsync);
txp->tx_packets++;
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 8f99cfa14680..d037682fb7ad 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -4,6 +4,7 @@ config QCOM_IPA
depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_SCM
select QCOM_QMI_HELPERS
help
Choose Y or M here to include support for the Qualcomm
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index c0b21a5580d5..1d2f4e7d7324 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -579,7 +579,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
* world but keep using the physical-dev address for the outgoing
* packets.
*/
- memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, phy_dev->dev_addr);
dev->priv_flags |= IFF_NO_RX_HANDLER;
@@ -787,7 +787,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_CHANGEADDR:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
+ eth_hw_addr_set(ipvlan->dev, dev->dev_addr);
call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
}
break;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 1cedb634f4f7..ef02f2cf5ce1 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -162,7 +162,7 @@ static int ipvtap_device_event(struct notifier_block *unused,
devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor);
classdev = device_create(&ipvtap_class, &dev->dev, devt,
- dev, tap_name);
+ dev, "%s", tap_name);
if (IS_ERR(classdev)) {
tap_free_minor(ipvtap_major, &vlantap->tap);
return notifier_from_errno(PTR_ERR(classdev));
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 93dc48b9b4f2..18b6dba9394e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3614,7 +3614,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
dev_uc_del(real_dev, dev->dev_addr);
out:
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 35f46ad040b0..6189acb33973 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -202,7 +202,7 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
/* Now that we are unhashed it is safe to change the device
* address without confusing packet delivery.
*/
- memcpy(vlan->dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(vlan->dev, addr);
macvlan_hash_add(vlan);
}
@@ -707,7 +707,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
- ether_addr_copy(dev->dev_addr, addr);
+ eth_hw_addr_set(dev, addr);
} else {
/* Rehash and update the device filters */
if (macvlan_addr_busy(vlan->port, addr))
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 694e2f5dbbe5..6b12902a803f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -169,7 +169,7 @@ static int macvtap_device_event(struct notifier_block *unused,
devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor);
classdev = device_create(&macvtap_class, &dev->dev, devt,
- dev, tap_name);
+ dev, "%s", tap_name);
if (IS_ERR(classdev)) {
tap_free_minor(macvtap_major, &vlantap->tap);
return notifier_from_errno(PTR_ERR(classdev));
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 0d7d3e15d2f0..5f4cd24a0241 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -207,6 +207,7 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
{
struct ipq4019_mdio_data *priv;
struct mii_bus *bus;
+ struct resource *res;
int ret;
bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
@@ -224,7 +225,10 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
return PTR_ERR(priv->mdio_clk);
/* The platform resource is provided on the chipset IPQ5018 */
- priv->eth_ldo_rdy = devm_platform_ioremap_resource(pdev, 1);
+ /* This resource is optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res)
+ priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
bus->name = "ipq4019_mdio";
bus->read = ipq4019_mdio_read;
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 1ee592d3eae4..17f98f609ec8 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -134,8 +134,9 @@ static int mscc_miim_reset(struct mii_bus *bus)
static int mscc_miim_probe(struct platform_device *pdev)
{
- struct mii_bus *bus;
struct mscc_miim_dev *dev;
+ struct resource *res;
+ struct mii_bus *bus;
int ret;
bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));
@@ -156,10 +157,14 @@ static int mscc_miim_probe(struct platform_device *pdev)
return PTR_ERR(dev->regs);
}
- dev->phy_regs = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dev->phy_regs)) {
- dev_err(&pdev->dev, "Unable to map internal phy registers\n");
- return PTR_ERR(dev->phy_regs);
+ /* This resource is optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ dev->phy_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->phy_regs)) {
+ dev_err(&pdev->dev, "Unable to map internal phy registers\n");
+ return PTR_ERR(dev->phy_regs);
+ }
}
ret = of_mdiobus_register(bus, pdev->dev.of_node);
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index d127eb6e9257..aaa628f859fd 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -321,7 +321,7 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
/* Start MHI channels */
err = mhi_prepare_for_transfer(mhi_dev);
if (err)
- goto out_err;
+ return err;
/* Number of transfer descriptors determines size of the queue */
mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
@@ -331,10 +331,6 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
return err;
return 0;
-
-out_err:
- free_netdev(ndev);
- return err;
}
static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 2a4892402ed8..86ec5aae4289 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -748,8 +748,7 @@ struct failover *net_failover_create(struct net_device *standby_dev)
failover_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
failover_dev->features |= failover_dev->hw_features;
- memcpy(failover_dev->dev_addr, standby_dev->dev_addr,
- failover_dev->addr_len);
+ dev_addr_set(failover_dev, standby_dev->dev_addr);
failover_dev->min_mtu = standby_dev->min_mtu;
failover_dev->max_mtu = standby_dev->max_mtu;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index b2214bc9efe2..9661aca35703 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1470,7 +1470,6 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_devlink_free;
- devlink_register(devlink);
err = devlink_params_register(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
if (err)
@@ -1511,9 +1510,9 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_psample_exit;
- devlink_params_publish(devlink);
- devlink_reload_enable(devlink);
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
err_psample_exit:
@@ -1534,7 +1533,6 @@ err_params_unregister:
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
- devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
err_devlink_free:
devlink_free(devlink);
@@ -1568,15 +1566,13 @@ void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
struct devlink *devlink = priv_to_devlink(nsim_dev);
- devlink_reload_disable(devlink);
-
+ devlink_unregister(devlink);
nsim_dev_reload_destroy(nsim_dev);
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
- devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index a5bab614ff84..98ca6b18415e 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -428,7 +428,7 @@ static int ntb_netdev_probe(struct device *client_dev)
ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
eth_random_addr(ndev->perm_addr);
- memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
+ dev_addr_set(ndev, ndev->perm_addr);
ndev->netdev_ops = &ntb_netdev_ops;
ndev->ethtool_ops = &ntb_ethtool_ops;
diff --git a/drivers/net/pcs/pcs-xpcs-nxp.c b/drivers/net/pcs/pcs-xpcs-nxp.c
index 984c9f7f16a8..d16fc58cd48d 100644
--- a/drivers/net/pcs/pcs-xpcs-nxp.c
+++ b/drivers/net/pcs/pcs-xpcs-nxp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
*/
#include <linux/pcs/pcs-xpcs.h>
#include "pcs-xpcs.h"
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index fb0a83dc09ac..7de631f5356f 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -666,6 +666,10 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
{
int ret;
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0);
+ if (ret < 0)
+ return ret;
+
if (enable) {
/* Enable EEE */
ret = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
@@ -673,9 +677,6 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
mult_fact_100ns << DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT;
} else {
- ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0);
- if (ret < 0)
- return ret;
ret &= ~(DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
@@ -690,21 +691,28 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
if (ret < 0)
return ret;
- ret |= DW_VR_MII_EEE_TRN_LPI;
+ if (enable)
+ ret |= DW_VR_MII_EEE_TRN_LPI;
+ else
+ ret &= ~DW_VR_MII_EEE_TRN_LPI;
+
return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, ret);
}
EXPORT_SYMBOL_GPL(xpcs_config_eee);
static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
{
- int ret;
+ int ret, mdio_ctrl;
/* For AN for C37 SGMII mode, the settings are :-
- * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN)
- * 2) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII)
+ * 1) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 0b (Disable SGMII AN in case
+ it is already enabled)
+ * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN)
+ * 3) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII)
* DW xPCS used with DW EQoS MAC is always MAC side SGMII.
- * 3) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic
+ * 4) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic
* speed/duplex mode change by HW after SGMII AN complete)
+ * 5) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 1b (Enable SGMII AN)
*
* Note: Since it is MAC side SGMII, there is no need to set
* SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from
@@ -712,6 +720,17 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
* between PHY and Link Partner. There is also no need to
* trigger AN restart for MAC-side SGMII.
*/
+ mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL);
+ if (mdio_ctrl < 0)
+ return mdio_ctrl;
+
+ if (mdio_ctrl & AN_CL37_EN) {
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+ mdio_ctrl & ~AN_CL37_EN);
+ if (ret < 0)
+ return ret;
+ }
+
ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL);
if (ret < 0)
return ret;
@@ -736,7 +755,15 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
else
ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
- return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
+ if (ret < 0)
+ return ret;
+
+ if (phylink_autoneg_inband(mode))
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+ mdio_ctrl | AN_CL37_EN);
+
+ return ret;
}
static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 3feee4d59030..69da011e82c8 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -86,15 +86,22 @@
#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
-#define AT803X_DEBUG_REG_0 0x00
+#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
+#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
+#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
-#define AT803X_DEBUG_REG_5 0x05
+#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
+#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
+#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+
#define AT803X_DEBUG_REG_3C 0x3C
-#define AT803X_DEBUG_REG_3D 0x3D
+#define AT803X_DEBUG_REG_GREEN 0x3D
+#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
#define AT803X_DEBUG_REG_1F 0x1F
#define AT803X_DEBUG_PLL_ON BIT(2)
@@ -153,6 +160,7 @@
#define QCA8327_A_PHY_ID 0x004dd033
#define QCA8327_B_PHY_ID 0x004dd034
#define QCA8337_PHY_ID 0x004dd036
+#define QCA9561_PHY_ID 0x004dd042
#define QCA8K_PHY_ID_MASK 0xffffffff
#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
@@ -277,25 +285,25 @@ static int at803x_read_page(struct phy_device *phydev)
static int at803x_enable_rx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
AT803X_DEBUG_RX_CLK_DLY_EN);
}
static int at803x_enable_tx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
AT803X_DEBUG_TX_CLK_DLY_EN);
}
static int at803x_disable_rx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
AT803X_DEBUG_RX_CLK_DLY_EN, 0);
}
static int at803x_disable_tx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
AT803X_DEBUG_TX_CLK_DLY_EN, 0);
}
@@ -1237,7 +1245,8 @@ static int at803x_cable_test_get_status(struct phy_device *phydev,
int pair, ret;
if (phydev->phy_id == ATH9331_PHY_ID ||
- phydev->phy_id == ATH8032_PHY_ID)
+ phydev->phy_id == ATH8032_PHY_ID ||
+ phydev->phy_id == QCA9561_PHY_ID)
pair_mask = 0x3;
else
pair_mask = 0xf;
@@ -1277,7 +1286,8 @@ static int at803x_cable_test_start(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
if (phydev->phy_id != ATH9331_PHY_ID &&
- phydev->phy_id != ATH8032_PHY_ID)
+ phydev->phy_id != ATH8032_PHY_ID &&
+ phydev->phy_id != QCA9561_PHY_ID)
phy_write(phydev, MII_CTRL1000, 0);
/* we do all the (time consuming) work later */
@@ -1293,9 +1303,9 @@ static int qca83xx_config_init(struct phy_device *phydev)
switch (switch_revision) {
case 1:
/* For 100M waveform */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_0, 0x02ea);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
/* Turn on Gigabit clock */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x68a0);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
break;
case 2:
@@ -1303,12 +1313,95 @@ static int qca83xx_config_init(struct phy_device *phydev)
fallthrough;
case 4:
phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x6860);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_5, 0x2c46);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
break;
}
+ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+ * Disable on init and enable only with 100m speed following
+ * qca original source code.
+ */
+ if (phydev->drv->phy_id == QCA8327_A_PHY_ID ||
+ phydev->drv->phy_id == QCA8327_B_PHY_ID)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static void qca83xx_link_change_notify(struct phy_device *phydev)
+{
+ /* QCA8337 doesn't require DAC Amplitude adjustement */
+ if (phydev->drv->phy_id == QCA8337_PHY_ID)
+ return;
+
+ /* Set DAC Amplitude adjustment to +6% for 100m on link running */
+ if (phydev->state == PHY_RUNNING) {
+ if (phydev->speed == SPEED_100)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN,
+ QCA8327_DEBUG_MANU_CTRL_EN);
+ } else {
+ /* Reset DAC Amplitude adjustment */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ }
+}
+
+static int qca83xx_resume(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Skip reset if not suspended */
+ if (!phydev->suspended)
+ return 0;
+
+ /* Reinit the port, reset values set by suspend */
+ qca83xx_config_init(phydev);
+
+ /* Reset the port on port resume */
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+
+ /* On resume from suspend the switch execute a reset and
+ * restart auto-negotiation. Wait for reset to complete.
+ */
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
+
+ msleep(1);
+
+ return 0;
+}
+
+static int qca83xx_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* Only QCA8337 support actual suspend.
+ * QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ if (phydev->drv->phy_id == QCA8337_PHY_ID) {
+ genphy_suspend(phydev);
+ } else {
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+ }
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+ AT803X_DEBUG_GATE_CLK_IN1000, 0);
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+
return 0;
}
@@ -1409,11 +1502,27 @@ static struct phy_driver at803x_driver[] = {
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
}, {
+ /* Qualcomm Atheros QCA9561 */
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = &at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, {
/* QCA8337 */
.phy_id = QCA8337_PHY_ID,
.phy_id_mask = QCA8K_PHY_ID_MASK,
.name = "Qualcomm Atheros 8337 internal PHY",
/* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
.probe = at803x_probe,
.flags = PHY_IS_INTERNAL,
.config_init = qca83xx_config_init,
@@ -1421,14 +1530,15 @@ static struct phy_driver at803x_driver[] = {
.get_sset_count = at803x_get_sset_count,
.get_strings = at803x_get_strings,
.get_stats = at803x_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = qca83xx_suspend,
+ .resume = qca83xx_resume,
}, {
/* QCA8327-A from switch QCA8327-AL1A */
.phy_id = QCA8327_A_PHY_ID,
.phy_id_mask = QCA8K_PHY_ID_MASK,
.name = "Qualcomm Atheros 8327-A internal PHY",
/* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
.probe = at803x_probe,
.flags = PHY_IS_INTERNAL,
.config_init = qca83xx_config_init,
@@ -1436,14 +1546,15 @@ static struct phy_driver at803x_driver[] = {
.get_sset_count = at803x_get_sset_count,
.get_strings = at803x_get_strings,
.get_stats = at803x_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = qca83xx_suspend,
+ .resume = qca83xx_resume,
}, {
/* QCA8327-B from switch QCA8327-BL1A */
.phy_id = QCA8327_B_PHY_ID,
.phy_id_mask = QCA8K_PHY_ID_MASK,
.name = "Qualcomm Atheros 8327-B internal PHY",
/* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
.probe = at803x_probe,
.flags = PHY_IS_INTERNAL,
.config_init = qca83xx_config_init,
@@ -1451,8 +1562,8 @@ static struct phy_driver at803x_driver[] = {
.get_sset_count = at803x_get_sset_count,
.get_strings = at803x_get_strings,
.get_stats = at803x_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = qca83xx_suspend,
+ .resume = qca83xx_resume,
}, };
module_phy_driver(at803x_driver);
@@ -1466,6 +1577,7 @@ static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 3a29a1493ff1..6ceadd2a0082 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -27,7 +27,12 @@
#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
+#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x0
+#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x1
+#define MII_BCM7XXX_SHD_3_EEE_CAP 0x2
#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
+#define MII_BCM7XXX_SHD_3_EEE_LP 0x4
+#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x5
#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
#define MII_BCM7XXX_SHD_3_AN_STAT 0xb
@@ -216,25 +221,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
-static int phy_set_clr_bits(struct phy_device *dev, int location,
- int set_mask, int clr_mask)
+static int __phy_set_clr_bits(struct phy_device *dev, int location,
+ int set_mask, int clr_mask)
{
int v, ret;
- v = phy_read(dev, location);
+ v = __phy_read(dev, location);
if (v < 0)
return v;
v &= ~clr_mask;
v |= set_mask;
- ret = phy_write(dev, location, v);
+ ret = __phy_write(dev, location, v);
if (ret < 0)
return ret;
return v;
}
+static int phy_set_clr_bits(struct phy_device *dev, int location,
+ int set_mask, int clr_mask)
+{
+ int ret;
+
+ mutex_lock(&dev->mdio.bus->mdio_lock);
+ ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);
+ mutex_unlock(&dev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+
static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
{
int ret;
@@ -582,6 +599,93 @@ static int bcm7xxx_16nm_ephy_resume(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+#define MII_BCM7XXX_REG_INVALID 0xff
+
+static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
+{
+ switch (regnum) {
+ case MDIO_CTRL1:
+ return MII_BCM7XXX_SHD_3_PCS_CTRL;
+ case MDIO_STAT1:
+ return MII_BCM7XXX_SHD_3_PCS_STATUS;
+ case MDIO_PCS_EEE_ABLE:
+ return MII_BCM7XXX_SHD_3_EEE_CAP;
+ case MDIO_AN_EEE_ADV:
+ return MII_BCM7XXX_SHD_3_AN_EEE_ADV;
+ case MDIO_AN_EEE_LPABLE:
+ return MII_BCM7XXX_SHD_3_EEE_LP;
+ case MDIO_PCS_EEE_WK_ERR:
+ return MII_BCM7XXX_SHD_3_EEE_WK_ERR;
+ default:
+ return MII_BCM7XXX_REG_INVALID;
+ }
+}
+
+static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)
+{
+ return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;
+}
+
+static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,
+ int devnum, u16 regnum)
+{
+ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
+ int ret;
+
+ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
+ shd == MII_BCM7XXX_REG_INVALID)
+ return -EOPNOTSUPP;
+
+ /* set shadow mode 2 */
+ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Access the desired shadow register address */
+ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);
+
+reset_shadow_mode:
+ /* reset shadow mode 2 */
+ __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+ MII_BCM7XXX_SHD_MODE_2);
+ return ret;
+}
+
+static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,
+ int devnum, u16 regnum, u16 val)
+{
+ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
+ int ret;
+
+ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
+ shd == MII_BCM7XXX_REG_INVALID)
+ return -EOPNOTSUPP;
+
+ /* set shadow mode 2 */
+ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Access the desired shadow register address */
+ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Write the desired value in the shadow register */
+ __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);
+
+reset_shadow_mode:
+ /* reset shadow mode 2 */
+ return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+ MII_BCM7XXX_SHD_MODE_2);
+}
+
static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
{
int ret;
@@ -779,6 +883,8 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
.get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
.remove = bcm7xxx_28nm_remove, \
+ .read_mmd = bcm7xxx_28nm_ephy_read_mmd, \
+ .write_mmd = bcm7xxx_28nm_ephy_write_mmd, \
}
#define BCM7XXX_40NM_EPHY(_oui, _name) \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 6bbc81ad295f..914619f3f0e3 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -619,6 +619,25 @@ static int dp83867_of_init(struct phy_device *phydev)
#else
static int dp83867_of_init(struct phy_device *phydev)
{
+ struct dp83867_private *dp83867 = phydev->priv;
+ u16 delay;
+
+ /* For non-OF device, the RX and TX ID values are either strapped
+ * or take from default value. So, we init RX & TX ID values here
+ * so that the RGMIIDCTL is configured correctly later in
+ * dp83867_config_init();
+ */
+ delay = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL);
+ dp83867->rx_id_delay = delay & DP83867_RGMII_RX_CLK_DELAY_MAX;
+ dp83867->tx_id_delay = (delay >> DP83867_RGMII_TX_CLK_DELAY_SHIFT) &
+ DP83867_RGMII_TX_CLK_DELAY_MAX;
+
+ /* Per datasheet, IO impedance is default to 50-ohm, so we set the
+ * same here or else the default '0' means highest IO impedance
+ * which is wrong.
+ */
+ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN / 2;
+
return 0;
}
#endif /* CONFIG_OF_MDIO */
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index bd310e8d5e43..b6fea119fe13 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -22,6 +22,7 @@
* If both the fiber and copper ports are connected, the first to gain
* link takes priority and the other port is completely locked out.
*/
+#include <linux/bitfield.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
@@ -33,6 +34,8 @@
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
+#define MV_VERSION(a,b,c,d) ((a) << 24 | (b) << 16 | (c) << 8 | (d))
+
enum {
MV_PMA_FW_VER0 = 0xc011,
MV_PMA_FW_VER1 = 0xc012,
@@ -62,6 +65,15 @@ enum {
MV_PCS_CSCR1_MDIX_MDIX = 0x0020,
MV_PCS_CSCR1_MDIX_AUTO = 0x0060,
+ MV_PCS_DSC1 = 0x8003,
+ MV_PCS_DSC1_ENABLE = BIT(9),
+ MV_PCS_DSC1_10GBT = 0x01c0,
+ MV_PCS_DSC1_1GBR = 0x0038,
+ MV_PCS_DSC1_100BTX = 0x0007,
+ MV_PCS_DSC2 = 0x8004,
+ MV_PCS_DSC2_2P5G = 0xf000,
+ MV_PCS_DSC2_5G = 0x0f00,
+
MV_PCS_CSSR1 = 0x8008,
MV_PCS_CSSR1_SPD1_MASK = 0xc000,
MV_PCS_CSSR1_SPD1_SPD2 = 0xc000,
@@ -125,6 +137,7 @@ enum {
};
struct mv3310_chip {
+ bool (*has_downshift)(struct phy_device *phydev);
void (*init_supported_interfaces)(unsigned long *mask);
int (*get_mactype)(struct phy_device *phydev);
int (*init_interface)(struct phy_device *phydev, int mactype);
@@ -138,6 +151,7 @@ struct mv3310_priv {
DECLARE_BITMAP(supported_interfaces, PHY_INTERFACE_MODE_MAX);
u32 firmware_ver;
+ bool has_downshift;
bool rate_match;
phy_interface_t const_interface;
@@ -330,6 +344,71 @@ static int mv3310_reset(struct phy_device *phydev, u32 unit)
5000, 100000, true);
}
+static int mv3310_get_downshift(struct phy_device *phydev, u8 *ds)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+ int val;
+
+ if (!priv->has_downshift)
+ return -EOPNOTSUPP;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1);
+ if (val < 0)
+ return val;
+
+ if (val & MV_PCS_DSC1_ENABLE)
+ /* assume that all fields are the same */
+ *ds = 1 + FIELD_GET(MV_PCS_DSC1_10GBT, (u16)val);
+ else
+ *ds = DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int mv3310_set_downshift(struct phy_device *phydev, u8 ds)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+ u16 val;
+ int err;
+
+ if (!priv->has_downshift)
+ return -EOPNOTSUPP;
+
+ if (ds == DOWNSHIFT_DEV_DISABLE)
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1,
+ MV_PCS_DSC1_ENABLE);
+
+ /* DOWNSHIFT_DEV_DEFAULT_COUNT is confusing. It looks like it should
+ * set the default settings for the PHY. However, it is used for
+ * "ethtool --set-phy-tunable ethN downshift on". The intention is
+ * to enable downshift at a default number of retries. The default
+ * settings for 88x3310 are for two retries with downshift disabled.
+ * So let's use two retries with downshift enabled.
+ */
+ if (ds == DOWNSHIFT_DEV_DEFAULT_COUNT)
+ ds = 2;
+
+ if (ds > 8)
+ return -E2BIG;
+
+ ds -= 1;
+ val = FIELD_PREP(MV_PCS_DSC2_2P5G, ds);
+ val |= FIELD_PREP(MV_PCS_DSC2_5G, ds);
+ err = phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC2,
+ MV_PCS_DSC2_2P5G | MV_PCS_DSC2_5G, val);
+ if (err < 0)
+ return err;
+
+ val = MV_PCS_DSC1_ENABLE;
+ val |= FIELD_PREP(MV_PCS_DSC1_10GBT, ds);
+ val |= FIELD_PREP(MV_PCS_DSC1_1GBR, ds);
+ val |= FIELD_PREP(MV_PCS_DSC1_100BTX, ds);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1,
+ MV_PCS_DSC1_ENABLE | MV_PCS_DSC1_10GBT |
+ MV_PCS_DSC1_1GBR | MV_PCS_DSC1_100BTX, val);
+}
+
static int mv3310_get_edpd(struct phy_device *phydev, u16 *edpd)
{
int val;
@@ -448,6 +527,9 @@ static int mv3310_probe(struct phy_device *phydev)
priv->firmware_ver >> 24, (priv->firmware_ver >> 16) & 255,
(priv->firmware_ver >> 8) & 255, priv->firmware_ver & 255);
+ if (chip->has_downshift)
+ priv->has_downshift = chip->has_downshift(phydev);
+
/* Powering down the port when not in use saves about 600mW */
ret = mv3310_power_down(phydev);
if (ret)
@@ -616,7 +698,16 @@ static int mv3310_config_init(struct phy_device *phydev)
}
/* Enable EDPD mode - saving 600mW */
- return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
+ err = mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
+ if (err)
+ return err;
+
+ /* Allow downshift */
+ err = mv3310_set_downshift(phydev, DOWNSHIFT_DEV_DEFAULT_COUNT);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ return 0;
}
static int mv3310_get_features(struct phy_device *phydev)
@@ -886,6 +977,8 @@ static int mv3310_get_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, void *data)
{
switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return mv3310_get_downshift(phydev, data);
case ETHTOOL_PHY_EDPD:
return mv3310_get_edpd(phydev, data);
default:
@@ -897,6 +990,8 @@ static int mv3310_set_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, const void *data)
{
switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return mv3310_set_downshift(phydev, *(u8 *)data);
case ETHTOOL_PHY_EDPD:
return mv3310_set_edpd(phydev, *(u16 *)data);
default:
@@ -904,6 +999,14 @@ static int mv3310_set_tunable(struct phy_device *phydev,
}
}
+static bool mv3310_has_downshift(struct phy_device *phydev)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+
+ /* Fails to downshift with firmware older than v0.3.5.0 */
+ return priv->firmware_ver >= MV_VERSION(0,3,5,0);
+}
+
static void mv3310_init_supported_interfaces(unsigned long *mask)
{
__set_bit(PHY_INTERFACE_MODE_SGMII, mask);
@@ -943,6 +1046,7 @@ static void mv2111_init_supported_interfaces(unsigned long *mask)
}
static const struct mv3310_chip mv3310_type = {
+ .has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3310_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
.init_interface = mv3310_init_interface,
@@ -953,6 +1057,7 @@ static const struct mv3310_chip mv3310_type = {
};
static const struct mv3310_chip mv3340_type = {
+ .has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3340_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
.init_interface = mv3340_init_interface,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 53f034fc2ef7..c204067f1890 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -525,6 +525,10 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
NULL == bus->read || NULL == bus->write)
return -EINVAL;
+ if (bus->parent && bus->parent->of_node)
+ bus->parent->of_node->fwnode.flags |=
+ FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
+
BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
bus->state != MDIOBUS_UNREGISTERED);
@@ -534,6 +538,13 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
bus->dev.groups = NULL;
dev_set_name(&bus->dev, "%s", bus->id);
+ /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
+ * the device in mdiobus_free()
+ *
+ * State will be updated later in this function in case of success
+ */
+ bus->state = MDIOBUS_UNREGISTERED;
+
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
@@ -926,6 +937,28 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
EXPORT_SYMBOL_GPL(mdiobus_modify);
/**
+ * mdiobus_modify_changed - Convenience function for modifying a given mdio
+ * device register and returning if it changed
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set)
+{
+ int err;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_modify_changed(bus, addr, regnum, mask, set);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mdiobus_modify_changed);
+
+/**
* mdio_bus_match - determine if given MDIO driver supports the given
* MDIO device
* @dev: target MDIO device
@@ -938,8 +971,14 @@ EXPORT_SYMBOL_GPL(mdiobus_modify);
*/
static int mdio_bus_match(struct device *dev, struct device_driver *drv)
{
+ struct mdio_driver *mdiodrv = to_mdio_driver(drv);
struct mdio_device *mdio = to_mdio_device(dev);
+ /* Both the driver and device must type-match */
+ if (!(mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY) !=
+ !(mdio->flags & MDIO_DEVICE_FLAG_PHY))
+ return 0;
+
if (of_driver_match_device(dev, drv))
return 1;
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index c94cb5382dc9..250742ffdfd9 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -179,6 +179,16 @@ static int mdio_remove(struct device *dev)
return 0;
}
+static void mdio_shutdown(struct device *dev)
+{
+ struct mdio_device *mdiodev = to_mdio_device(dev);
+ struct device_driver *drv = mdiodev->dev.driver;
+ struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+
+ if (mdiodrv->shutdown)
+ mdiodrv->shutdown(mdiodev);
+}
+
/**
* mdio_driver_register - register an mdio_driver with the MDIO layer
* @drv: new mdio_driver to register
@@ -193,6 +203,7 @@ int mdio_driver_register(struct mdio_driver *drv)
mdiodrv->driver.bus = &mdio_bus_type;
mdiodrv->driver.probe = mdio_probe;
mdiodrv->driver.remove = mdio_remove;
+ mdiodrv->driver.shutdown = mdio_shutdown;
retval = driver_register(&mdiodrv->driver);
if (retval) {
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5c928f827173..44a24b99c894 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -863,9 +863,9 @@ static int ksz9031_config_init(struct phy_device *phydev)
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
tx_data_skews, 4, &update);
- if (update && phydev->interface != PHY_INTERFACE_MODE_RGMII)
+ if (update && !phy_interface_is_rgmii(phydev))
phydev_warn(phydev,
- "*-skew-ps values should be used only with phy-mode = \"rgmii\"\n");
+ "*-skew-ps values should be used only with RGMII PHY modes\n");
/* Silicon Errata Sheet (DS80000691D or DS80000692D):
* When the device links in the 1000BASE-T slave mode only,
@@ -1003,6 +1003,26 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
txcdll_val);
}
+/* Silicon Errata DS80000693B
+ *
+ * When LEDs are configured in Individual Mode, LED1 is ON in a no-link
+ * condition. Workaround is to set register 0x1e, bit 9, this way LED1 behaves
+ * according to the datasheet (off if there is no link).
+ */
+static int ksz9131_led_errata(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, 2, 0);
+ if (reg < 0)
+ return reg;
+
+ if (!(reg & BIT(4)))
+ return 0;
+
+ return phy_set_bits(phydev, 0x1e, BIT(9));
+}
+
static int ksz9131_config_init(struct phy_device *phydev)
{
struct device_node *of_node;
@@ -1058,6 +1078,10 @@ static int ksz9131_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = ksz9131_led_errata(phydev);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -1537,6 +1561,65 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
return ret;
}
+#define LAN_EXT_PAGE_ACCESS_CONTROL 0x16
+#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17
+#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000
+
+#define LAN8804_ALIGN_SWAP 0x4a
+#define LAN8804_ALIGN_TX_A_B_SWAP 0x1
+#define LAN8804_ALIGN_TX_A_B_SWAP_MASK GENMASK(2, 0)
+#define LAN8814_CLOCK_MANAGEMENT 0xd
+#define LAN8814_LINK_QUALITY 0x8e
+
+static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
+{
+ u32 data;
+
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
+
+ return data;
+}
+
+static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
+ u16 val)
+{
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+
+ val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
+ if (val) {
+ phydev_err(phydev, "Error: phy_write has returned error %d\n",
+ val);
+ return val;
+ }
+ return 0;
+}
+
+static int lan8804_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ /* MDI-X setting for swap A,B transmit */
+ val = lanphy_read_page_reg(phydev, 2, LAN8804_ALIGN_SWAP);
+ val &= ~LAN8804_ALIGN_TX_A_B_SWAP_MASK;
+ val |= LAN8804_ALIGN_TX_A_B_SWAP;
+ lanphy_write_page_reg(phydev, 2, LAN8804_ALIGN_SWAP, val);
+
+ /* Make sure that the PHY will not stop generating the clock when the
+ * link partner goes down
+ */
+ lanphy_write_page_reg(phydev, 31, LAN8814_CLOCK_MANAGEMENT, 0x27e);
+ lanphy_read_page_reg(phydev, 1, LAN8814_LINK_QUALITY);
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1593,8 +1676,9 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ /* No suspend/resume callbacks because of errata DS80000700A,
+ * receiver error following software power down.
+ */
}, {
.phy_id = PHY_ID_KSZ8041RNLI,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -1719,6 +1803,20 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
}, {
+ .phy_id = PHY_ID_LAN8804,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip LAN966X Gigabit PHY",
+ .config_init = lan8804_config_init,
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .soft_reset = genphy_soft_reset,
+ .read_status = ksz9031_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = kszphy_resume,
+}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
@@ -1794,6 +1892,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 6e32da28e138..ebfeeb3c67c1 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -273,12 +273,12 @@ static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
static int vsc85xx_wol_set(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
+ const u8 *mac_addr = phydev->attached_dev->dev_addr;
int rc;
u16 reg_val;
u8 i;
u16 pwd[3] = {0, 0, 0};
struct ethtool_wolinfo *wol_conf = wol;
- u8 *mac_addr = phydev->attached_dev->dev_addr;
mutex_lock(&phydev->lock);
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 2d5d5081c3b6..5ce1bf03bbd7 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -493,6 +493,25 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
return ret;
}
+static int gpy115_loopback(struct phy_device *phydev, bool enable)
+{
+ int ret;
+ int fw_minor;
+
+ if (enable)
+ return gpy_loopback(phydev, enable);
+
+ ret = phy_read(phydev, PHY_FWV);
+ if (ret < 0)
+ return ret;
+
+ fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, ret);
+ if (fw_minor > 0x0076)
+ return gpy_loopback(phydev, 0);
+
+ return genphy_soft_reset(phydev);
+}
+
static struct phy_driver gpy_drivers[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx),
@@ -527,7 +546,7 @@ static struct phy_driver gpy_drivers[] = {
.handle_interrupt = gpy_handle_interrupt,
.set_wol = gpy_set_wol,
.get_wol = gpy_get_wol,
- .set_loopback = gpy_loopback,
+ .set_loopback = gpy115_loopback,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_GPY115C),
@@ -544,7 +563,7 @@ static struct phy_driver gpy_drivers[] = {
.handle_interrupt = gpy_handle_interrupt,
.set_wol = gpy_set_wol,
.get_wol = gpy_get_wol,
- .set_loopback = gpy_loopback,
+ .set_loopback = gpy115_loopback,
},
{
.phy_id = PHY_ID_GPY211B,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ba5ad86ec826..74d8e1dc125f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3125,6 +3125,9 @@ static void phy_shutdown(struct device *dev)
{
struct phy_device *phydev = to_phy_device(dev);
+ if (phydev->state == PHY_READY || !phydev->attached_dev)
+ return;
+
phy_disable_interrupts(phydev);
}
@@ -3146,6 +3149,16 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
return -EINVAL;
}
+ /* PHYLIB device drivers must not match using a DT compatible table
+ * as this bypasses our checks that the mdiodev that is being matched
+ * is backed by a struct phy_device. If such a case happens, we will
+ * make out-of-bounds accesses and lockup in phydev->lock.
+ */
+ if (WARN(new_driver->mdiodrv.driver.of_match_table,
+ "%s: driver must not provide a DT match table\n",
+ new_driver->name))
+ return -EINVAL;
+
new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
new_driver->mdiodrv.driver.name = new_driver->name;
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5a58c77d0002..7e93d81fa5ad 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -132,6 +132,17 @@ void phylink_set_port_modes(unsigned long *mask)
}
EXPORT_SYMBOL_GPL(phylink_set_port_modes);
+void phylink_set_10g_modes(unsigned long *mask)
+{
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+}
+EXPORT_SYMBOL_GPL(phylink_set_10g_modes);
+
static int phylink_is_empty_linkmode(const unsigned long *linkmode)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, };
@@ -540,9 +551,15 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
- state->speed = SPEED_UNKNOWN;
- state->duplex = DUPLEX_UNKNOWN;
- state->pause = MLO_PAUSE_NONE;
+ if (state->an_enabled) {
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ state->pause = MLO_PAUSE_NONE;
+ } else {
+ state->speed = pl->link_config.speed;
+ state->duplex = pl->link_config.duplex;
+ state->pause = pl->link_config.pause;
+ }
state->an_complete = 0;
state->link = 1;
@@ -1601,20 +1618,11 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising,
config.an_enabled);
- /* Validate without changing the current supported mask. */
- linkmode_copy(support, pl->supported);
- if (phylink_validate(pl, support, &config))
- return -EINVAL;
-
- /* If autonegotiation is enabled, we must have an advertisement */
- if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
- return -EINVAL;
-
/* If this link is with an SFP, ensure that changes to advertised modes
* also cause the associated interface to be selected such that the
* link can be configured correctly.
*/
- if (pl->sfp_port && pl->sfp_bus) {
+ if (pl->sfp_bus) {
config.interface = sfp_select_interface(pl->sfp_bus,
config.advertising);
if (config.interface == PHY_INTERFACE_MODE_NA) {
@@ -1634,8 +1642,17 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
return -EINVAL;
}
+ } else {
+ /* Validate without changing the current supported mask. */
+ linkmode_copy(support, pl->supported);
+ if (phylink_validate(pl, support, &config))
+ return -EINVAL;
}
+ /* If autonegotiation is enabled, we must have an advertisement */
+ if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
+ return -EINVAL;
+
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@@ -2538,7 +2555,10 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
state->link = !!(bmsr & BMSR_LSTATUS);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
- if (!state->link)
+ /* If there is no link or autonegotiation is disabled, the LP advertisement
+ * data is not meaningful, so don't go any further.
+ */
+ if (!state->link || !state->an_enabled)
return;
switch (state->interface) {
@@ -2585,7 +2605,6 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
{
struct mii_bus *bus = pcs->bus;
int addr = pcs->addr;
- int val, ret;
u16 adv;
switch (interface) {
@@ -2599,32 +2618,12 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
advertising))
adv |= ADVERTISE_1000XPSE_ASYM;
- val = mdiobus_read(bus, addr, MII_ADVERTISE);
- if (val < 0)
- return val;
-
- if (val == adv)
- return 0;
-
- ret = mdiobus_write(bus, addr, MII_ADVERTISE, adv);
- if (ret < 0)
- return ret;
-
- return 1;
+ return mdiobus_modify_changed(bus, addr, MII_ADVERTISE,
+ 0xffff, adv);
case PHY_INTERFACE_MODE_SGMII:
- val = mdiobus_read(bus, addr, MII_ADVERTISE);
- if (val < 0)
- return val;
-
- if (val == 0x0001)
- return 0;
-
- ret = mdiobus_write(bus, addr, MII_ADVERTISE, 0x0001);
- if (ret < 0)
- return ret;
-
- return 1;
+ return mdiobus_modify_changed(bus, addr, MII_ADVERTISE,
+ 0xffff, 0x0001);
default:
/* Nothing to do for other modes */
@@ -2661,7 +2660,12 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
changed = ret > 0;
/* Ensure ISOLATE bit is disabled */
- bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0;
+ if (mode == MLO_AN_INBAND &&
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising))
+ bmcr = BMCR_ANENABLE;
+ else
+ bmcr = 0;
+
ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR,
BMCR_ANENABLE | BMCR_ISOLATE, bmcr);
if (ret < 0)
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 11be60333fa8..a5671ab896b3 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -1023,6 +1023,14 @@ static struct phy_driver realtek_drvs[] = {
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc942),
+ .name = "RTL8365MB-VC Gigabit Ethernet",
+ /* Interrupt handling analogous to RTL8366RB */
+ .config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
},
};
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 34e90216bd2c..ab77a9f439ef 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -134,7 +134,7 @@ static const char * const sm_state_strings[] = {
[SFP_S_LINK_UP] = "link_up",
[SFP_S_TX_FAULT] = "tx_fault",
[SFP_S_REINIT] = "reinit",
- [SFP_S_TX_DISABLE] = "rx_disable",
+ [SFP_S_TX_DISABLE] = "tx_disable",
};
static const char *sm_state_to_str(unsigned short sm_state)
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 82d609401711..0d491b4d6667 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -284,12 +284,16 @@ static const struct net_device_ops plip_netdev_ops = {
static void
plip_init_netdev(struct net_device *dev)
{
+ static const u8 addr_init[ETH_ALEN] = {
+ 0xfc, 0xfc, 0xfc,
+ 0xfc, 0xfc, 0xfc,
+ };
struct net_local *nl = netdev_priv(dev);
/* Then, override parts of it */
dev->tx_queue_len = 10;
dev->flags = IFF_POINTOPOINT|IFF_NOARP;
- memset(dev->dev_addr, 0xfc, ETH_ALEN);
+ eth_hw_addr_set(dev, addr_init);
dev->netdev_ops = &plip_netdev_ops;
dev->header_ops = &plip_header_ops;
@@ -1109,7 +1113,7 @@ plip_open(struct net_device *dev)
plip_init_dev(). */
const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
if (ifa != NULL) {
- memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
+ dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
}
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fb52cd175b45..1180a0e2445f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1161,7 +1161,7 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
if (!ifname_is_set) {
while (1) {
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
- if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
+ if (!netdev_name_in_use(ppp->ppp_net, ppp->dev->name))
break;
unit_put(&pn->units_idr, ret);
ret = unit_get(&pn->units_idr, ppp, ret + 1);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 2056d6ad04b5..1a95f3beb784 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -482,6 +482,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
{
int rc = 0;
struct rionet_private *rnet;
+ u8 addr[ETH_ALEN];
u16 device_id;
const size_t rionet_active_bytes = sizeof(void *) *
RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
@@ -501,12 +502,13 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
/* Set the default MAC address */
device_id = rio_local_get_device_id(mport);
- ndev->dev_addr[0] = 0x00;
- ndev->dev_addr[1] = 0x01;
- ndev->dev_addr[2] = 0x00;
- ndev->dev_addr[3] = 0x01;
- ndev->dev_addr[4] = device_id >> 8;
- ndev->dev_addr[5] = device_id & 0xff;
+ addr[0] = 0x00;
+ addr[1] = 0x01;
+ addr[2] = 0x00;
+ addr[3] = 0x01;
+ addr[4] = device_id >> 8;
+ addr[5] = device_id & 0xff;
+ eth_hw_addr_set(ndev, addr);
ndev->netdev_ops = &rionet_netdev_ops;
ndev->mtu = RIONET_MAX_MTU;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index f01c9db01b16..57a6d598467b 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -149,6 +149,7 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
unsigned short ioaddr[2], irq;
unsigned int serial_number;
int error = -ENODEV;
+ u8 addr[ETH_ALEN];
if (pnp_device_attach(pdev) < 0)
return -ENODEV;
@@ -203,10 +204,13 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
dev->netdev_ops = &sb1000_netdev_ops;
/* hardware address is 0:0:serial_number */
- dev->dev_addr[2] = serial_number >> 24 & 0xff;
- dev->dev_addr[3] = serial_number >> 16 & 0xff;
- dev->dev_addr[4] = serial_number >> 8 & 0xff;
- dev->dev_addr[5] = serial_number >> 0 & 0xff;
+ addr[0] = 0;
+ addr[1] = 0;
+ addr[2] = serial_number >> 24 & 0xff;
+ addr[3] = serial_number >> 16 & 0xff;
+ addr[4] = serial_number >> 8 & 0xff;
+ addr[5] = serial_number >> 0 & 0xff;
+ eth_hw_addr_set(dev, addr);
pnp_set_drvdata(pdev, dev);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index dd7917cab2b1..8b2adc56b92a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1790,7 +1790,7 @@ static int team_set_mac_address(struct net_device *dev, void *p)
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev_addr_set(dev, addr->sa_data);
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list)
if (team->ops.port_change_dev_addr)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 4c5d69732a7e..b554054a7560 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -99,6 +99,10 @@ config USB_RTL8150
config USB_RTL8152
tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
select MII
+ select CRC32
+ select CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_SHA256
help
This option adds support for Realtek RTL8152 based USB 2.0
10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
@@ -113,6 +117,7 @@ config USB_LAN78XX
select PHYLIB
select MICROCHIP_PHY
select FIXED_PHY
+ select CRC32
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 73b97f4cc1ec..ea06d10e1c21 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -119,7 +119,7 @@ static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
}
static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, void *data)
+ u16 index, u16 size, const void *data)
{
int ret;
@@ -714,7 +714,7 @@ static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto out;
- ether_addr_copy(dev->net->dev_addr, dev->net->perm_addr);
+ eth_hw_addr_set(dev->net, dev->net->perm_addr);
/* Set Rx urb size */
dev->rx_urb_size = URB_SIZE;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 38cda590895c..42ba4af68090 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -791,7 +791,7 @@ int asix_set_mac_address(struct net_device *net, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(net, addr->sa_data);
/* We use the 20 byte dev->data
* for our 6 byte mac buffer
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 30821f6a6d7a..4514d35ef4c4 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -59,7 +59,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
static void asix_set_netdev_dev_addr(struct usbnet *dev, u8 *addr)
{
if (is_valid_ether_addr(addr)) {
- memcpy(dev->net->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev->net, addr);
} else {
netdev_info(dev->net, "invalid hw address, using random\n");
eth_hw_addr_random(dev->net);
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index d9777d9a7c5d..3777c7e2e6fc 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -176,7 +176,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = -EIO;
goto free;
}
- memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+ eth_hw_addr_set(dev->net, buf);
dev->net->netdev_ops = &ax88172a_netdev_ops;
dev->net->ethtool_ops = &ax88172a_ethtool_ops;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index f25448a08870..ea8aa8c33241 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -209,7 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
}
static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm)
+ u16 size, const void *data, int in_pm)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
@@ -272,7 +272,7 @@ static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
}
static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, void *data)
+ u16 index, u16 size, const void *data)
{
int ret;
@@ -313,7 +313,7 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
}
static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
+ u16 size, const void *data)
{
int ret;
@@ -463,7 +463,7 @@ static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
u16 tmp16;
u8 tmp8;
int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
- int (*fnw)(struct usbnet *, u8, u16, u16, u16, void *);
+ int (*fnw)(struct usbnet *, u8, u16, u16, u16, const void *);
if (!in_pm) {
fnr = ax88179_read_cmd;
@@ -1015,7 +1015,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(net, addr->sa_data);
/* Set the MAC address */
ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
@@ -1310,7 +1310,7 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
}
if (is_valid_ether_addr(mac)) {
- memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(dev->net, mac);
} else {
netdev_info(dev->net, "invalid MAC address, using random\n");
eth_hw_addr_random(dev->net);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 97ba67042d12..24db5768a3c0 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -615,7 +615,7 @@ static void catc_stats_timer(struct timer_list *t)
* Receive modes. Broadcast, Multicast, Promisc.
*/
-static void catc_multicast(unsigned char *addr, u8 *multicast)
+static void catc_multicast(const unsigned char *addr, u8 *multicast)
{
u32 crc;
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index e1da9102a540..ad5121e9cf5d 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -275,6 +275,8 @@ static const struct net_device_ops usbpn_ops = {
static void usbpn_setup(struct net_device *dev)
{
+ const u8 addr = PN_MEDIA_USB;
+
dev->features = 0;
dev->netdev_ops = &usbpn_ops;
dev->header_ops = &phonet_header_ops;
@@ -284,8 +286,8 @@ static void usbpn_setup(struct net_device *dev)
dev->min_mtu = PHONET_MIN_MTU;
dev->max_mtu = PHONET_MAX_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_USB;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
dev->tx_queue_len = 3;
dev->needs_free_netdev = true;
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index d7f3b70d5477..f69d9b902da0 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -336,6 +336,7 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
{
int retval = 0;
unsigned char data[2];
+ u8 addr[ETH_ALEN];
retval = usbnet_get_endpoints(dev, intf);
if (retval)
@@ -383,7 +384,8 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
CONTROL_TIMEOUT_MS);
- retval = get_mac_address(dev, dev->net->dev_addr);
+ retval = get_mac_address(dev, addr);
+ eth_hw_addr_set(dev->net, addr);
return retval;
}
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index c4568a491dc4..79a47e2fd437 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -146,6 +146,7 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
u8 link[3];
int timeout = 50;
struct cx82310_priv *priv;
+ u8 addr[ETH_ALEN];
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -202,12 +203,12 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
/* get the MAC address */
- ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
- dev->net->dev_addr, ETH_ALEN);
+ ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0, addr, ETH_ALEN);
if (ret) {
netdev_err(dev->net, "unable to read MAC address: %d\n", ret);
goto err;
}
+ eth_hw_addr_set(dev->net, addr);
/* start (does not seem to have any effect?) */
ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 907f98b1eefe..48d7d278631e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -93,7 +93,8 @@ static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
value, reg, NULL, 0);
}
-static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
+static void dm_write_async(struct usbnet *dev, u8 reg, u16 length,
+ const void *data)
{
usbnet_write_cmd_async(dev, DM_WRITE_REGS,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
@@ -331,7 +332,7 @@ static int dm9601_set_mac_address(struct net_device *net, void *p)
return -EINVAL;
}
- memcpy(net->dev_addr, addr->sa_data, net->addr_len);
+ eth_hw_addr_set(net, addr->sa_data);
__dm9601_set_mac_address(dev);
return 0;
@@ -391,7 +392,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
* Overwrite the auto-generated address only with good ones.
*/
if (is_valid_ether_addr(mac))
- memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(dev->net, mac);
else {
printk(KERN_WARNING
"dm9601: No valid MAC address in EEPROM, using %pM\n",
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a57251ba5991..f97813a4e8d1 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2719,14 +2719,14 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
serial = kzalloc(sizeof(*serial), GFP_KERNEL);
if (!serial)
- goto exit;
+ goto err_free_dev;
hso_dev->port_data.dev_serial = serial;
serial->parent = hso_dev;
if (hso_serial_common_create
(serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE))
- goto exit;
+ goto err_free_serial;
serial->tx_data_length--;
serial->write_data = hso_mux_serial_write_data;
@@ -2742,11 +2742,9 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
/* done, return it */
return hso_dev;
-exit:
- if (serial) {
- tty_unregister_device(tty_drv, serial->minor);
- kfree(serial);
- }
+err_free_serial:
+ kfree(serial);
+err_free_dev:
kfree(hso_dev);
return NULL;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 06e2181e5810..cd33955df0b6 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -303,7 +303,7 @@ static int ipheth_get_macaddr(struct ipheth_device *dev)
__func__, retval);
retval = -EINVAL;
} else {
- memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN);
+ eth_hw_addr_set(net, dev->ctrl_buf);
retval = 0;
}
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index fc5895f85cee..9f2b70ef39aa 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -149,7 +149,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
if (status)
return status;
- memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
+ eth_hw_addr_set(dev->net, ethernet_addr);
return status;
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 144c686b4333..9b2bc1993ece 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1044,8 +1044,7 @@ err_fw:
goto err_all_but_rxbuf;
memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr));
- memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr,
- sizeof(kaweth->configuration.hw_addr));
+ eth_hw_addr_set(netdev, (u8 *)&kaweth->configuration.hw_addr);
netdev->netdev_ops = &kaweth_netdev_ops;
netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 793f8fbe0069..03319fdb5235 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1817,7 +1817,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
- ether_addr_copy(dev->net->dev_addr, addr);
+ eth_hw_addr_set(dev->net, addr);
}
/* MDIO read and write wrappers for phylib */
@@ -2416,7 +2416,7 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
addr_lo = netdev->dev_addr[0] |
netdev->dev_addr[1] << 8 |
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 66866bef25df..326cc4e749d8 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -132,7 +132,8 @@ static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
return 0;
}
-static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr)
+static int mcs7830_hif_set_mac_address(struct usbnet *dev,
+ const unsigned char *addr)
{
int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
@@ -159,7 +160,7 @@ static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
return ret;
/* it worked --> adopt it on netdev side */
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
@@ -472,17 +473,19 @@ static const struct net_device_ops mcs7830_netdev_ops = {
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
{
struct net_device *net = dev->net;
+ u8 addr[ETH_ALEN];
int ret;
int retry;
/* Initial startup: Gather MAC address setting from EEPROM */
ret = -EINVAL;
for (retry = 0; retry < 5 && ret; retry++)
- ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
+ ret = mcs7830_hif_get_mac_address(dev, addr);
if (ret) {
dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
goto out;
}
+ eth_hw_addr_set(net, addr);
mcs7830_data_set_multicast(net);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6a92a3fef75e..c4cd40b090fd 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -357,7 +357,7 @@ static void set_ethernet_addr(pegasus_t *pegasus)
goto err;
}
- memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id));
+ eth_hw_addr_set(pegasus->net, node_id);
return;
err:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 33ada2c59952..86b814e99224 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -835,8 +835,11 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
/* make MAC addr easily distinguishable from an IP header */
if (possibly_iphdr(dev->net->dev_addr)) {
- dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */
- dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */
+ u8 addr = dev->net->dev_addr[0];
+
+ addr |= 0x02; /* set local assignment bit */
+ addr &= 0xbf; /* clear "IP" bit */
+ dev_addr_mod(dev->net, 0, &addr, 1);
}
dev->net->netdev_ops = &qmi_wwan_netdev_ops;
dev->net->sysfs_groups[0] = &qmi_wwan_sysfs_attr_group;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 60ba9b734055..4a02f33f0643 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -767,6 +767,7 @@ enum rtl8152_flags {
PHY_RESET,
SCHEDULE_TASKLET,
GREEN_ETHERNET,
+ RX_EPROTO,
};
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
@@ -1570,7 +1571,7 @@ static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
mutex_lock(&tp->control);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
@@ -1718,7 +1719,7 @@ static int set_ethernet_addr(struct r8152 *tp, bool in_resume)
return ret;
if (tp->version == RTL_VER_01)
- ether_addr_copy(dev->dev_addr, sa.sa_data);
+ eth_hw_addr_set(dev, sa.sa_data);
else
ret = __rtl8152_set_mac_address(dev, &sa, in_resume);
@@ -1770,6 +1771,14 @@ static void read_bulk_callback(struct urb *urb)
rtl_set_unplug(tp);
netif_device_detach(tp->netdev);
return;
+ case -EPROTO:
+ urb->actual_length = 0;
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ set_bit(RX_EPROTO, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 1);
+ return;
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
@@ -2425,6 +2434,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
if (list_empty(&tp->rx_done))
goto out1;
+ clear_bit(RX_EPROTO, &tp->flags);
INIT_LIST_HEAD(&rx_queue);
spin_lock_irqsave(&tp->rx_lock, flags);
list_splice_init(&tp->rx_done, &rx_queue);
@@ -2441,7 +2451,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
agg = list_entry(cursor, struct rx_agg, list);
urb = agg->urb;
- if (urb->actual_length < ETH_ZLEN)
+ if (urb->status != 0 || urb->actual_length < ETH_ZLEN)
goto submit;
agg_free = rtl_get_free_rx(tp, GFP_ATOMIC);
@@ -6643,6 +6653,10 @@ static void rtl_work_func_t(struct work_struct *work)
netif_carrier_ok(tp->netdev))
tasklet_schedule(&tp->tx_tl);
+ if (test_and_clear_bit(RX_EPROTO, &tp->flags) &&
+ !list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
+
mutex_unlock(&tp->control);
out1:
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 85a8b96e39a6..4a84f90e377c 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -421,7 +421,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
if (bp[0] & 0x02)
eth_hw_addr_random(net);
else
- ether_addr_copy(net->dev_addr, bp);
+ eth_hw_addr_set(net, bp);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 4a1b0e0fc3a3..3d2bf2acca94 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -262,7 +262,7 @@ static void set_ethernet_addr(rtl8150_t *dev)
ret = get_registers(dev, IDR, sizeof(node_id), node_id);
if (!ret) {
- ether_addr_copy(dev->netdev->dev_addr, node_id);
+ eth_hw_addr_set(dev->netdev, node_id);
} else {
eth_hw_addr_random(dev->netdev);
netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n",
@@ -278,7 +278,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr);
/* Set the IDR registers. */
set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 55025202dc4f..bb4cbe8fc846 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -669,6 +669,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
+ u8 mod[2];
dev_dbg(&dev->udev->dev, "%s", __func__);
@@ -698,8 +699,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &sierra_net_device_ops;
/* change MAC addr to include, ifacenum, and to be unique */
- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
- dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
+ mod[0] = atomic_inc_return(&iface_counter);
+ mod[1] = ifacenum;
+ dev_addr_mod(dev->net, ETH_ALEN - 2, mod, 2);
/* prepare shutdown message template */
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 76f7af161313..95de452ff4da 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -757,9 +757,10 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc75xx_init_mac_address(struct usbnet *dev)
{
+ u8 addr[ETH_ALEN];
+
/* maybe the boot loader passed the MAC address in devicetree */
- if (!eth_platform_get_mac_address(&dev->udev->dev,
- dev->net->dev_addr)) {
+ if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) {
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* device tree values are valid so use them */
netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
@@ -768,8 +769,8 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
}
/* try reading mac address from EEPROM */
- if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
- dev->net->dev_addr) == 0) {
+ if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0) {
+ eth_hw_addr_set(dev->net, addr);
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* eeprom values are valid so use them */
netif_dbg(dev, ifup, dev->net,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 7d953974eb9b..20fe4cd8f784 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -755,9 +755,10 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc95xx_init_mac_address(struct usbnet *dev)
{
+ u8 addr[ETH_ALEN];
+
/* maybe the boot loader passed the MAC address in devicetree */
- if (!eth_platform_get_mac_address(&dev->udev->dev,
- dev->net->dev_addr)) {
+ if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) {
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* device tree values are valid so use them */
netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
@@ -766,8 +767,8 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
}
/* try reading mac address from EEPROM */
- if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
- dev->net->dev_addr) == 0) {
+ if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0) {
+ eth_hw_addr_set(dev->net, addr);
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* eeprom values are valid so use them */
netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM\n");
@@ -1178,7 +1179,10 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
static void smsc95xx_handle_link_change(struct net_device *net)
{
+ struct usbnet *dev = netdev_priv(net);
+
phy_print_status(net->phydev);
+ usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
}
static int smsc95xx_start_phy(struct usbnet *dev)
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 6516a37893e2..b658510cc9a4 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -56,7 +56,8 @@ static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
value, reg, NULL, 0);
}
-static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
+static void sr_write_async(struct usbnet *dev, u8 reg, u16 length,
+ const void *data)
{
usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
0, reg, data, length);
@@ -296,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
return -EINVAL;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
return 0;
@@ -319,6 +320,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct net_device *netdev;
struct mii_if_info *mii;
+ u8 addr[ETH_ALEN];
int ret;
ret = usbnet_get_endpoints(dev, intf);
@@ -349,11 +351,12 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
* EEPROM automatically to PAR. In case there is no EEPROM externally,
* a default MAC address is stored in PAR for making chip work properly.
*/
- if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
+ if (sr_read(dev, SR_PAR, ETH_ALEN, addr) < 0) {
netdev_err(netdev, "Error reading MAC address\n");
ret = -ENODEV;
goto out;
}
+ eth_hw_addr_set(netdev, addr);
/* power up and reset phy */
sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 576401c8b1be..f5e19f3ef6cd 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -503,7 +503,7 @@ static int sr_set_mac_address(struct net_device *net, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(net, addr->sa_data);
/* We use the 20 byte dev->data
* for our 6 byte mac buffer
@@ -731,6 +731,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
struct sr_data *data = (struct sr_data *)&dev->data;
u16 led01_mux, led23_mux;
int ret, embd_phy;
+ u8 addr[ETH_ALEN];
u32 phyid;
u16 rx_ctl;
@@ -754,12 +755,12 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* Get the MAC address */
- ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
- dev->net->dev_addr);
+ ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, addr);
if (ret < 0) {
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
}
+ eth_hw_addr_set(dev->net, addr);
netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
/* Initialize MII structure */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 840c1c2ab16a..350bae673ed4 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -165,12 +165,13 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
{
+ u8 addr[ETH_ALEN];
int tmp = -1, ret;
unsigned char buf [13];
ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
if (ret == 12)
- tmp = hex2bin(dev->net->dev_addr, buf, 6);
+ tmp = hex2bin(addr, buf, 6);
if (tmp < 0) {
dev_dbg(&dev->udev->dev,
"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
@@ -178,6 +179,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
ret = -EINVAL;
return ret;
}
+ eth_hw_addr_set(dev->net, addr);
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
@@ -1726,7 +1728,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->net = net;
strscpy(net->name, "usb%d", sizeof(net->name));
- memcpy (net->dev_addr, node_id, sizeof node_id);
+ eth_hw_addr_set(net, node_id);
/* rx and tx sides can use different message sizes;
* bind() should set rx_urb_size in that case.
@@ -1788,6 +1790,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if (!dev->rx_urb_size)
dev->rx_urb_size = dev->hard_mtu;
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+ if (dev->maxpacket == 0) {
+ /* that is a broken device */
+ goto out4;
+ }
/* let userspace know we have a random address */
if (ether_addr_equal(net->dev_addr, node_id))
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e46a715dad2b..c501b5974aee 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -408,7 +408,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
* add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
truesize = headroom ? PAGE_SIZE : truesize;
- tailroom = truesize - len - headroom;
+ tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
buf = p - headroom;
len -= hdr_len;
@@ -425,6 +425,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
skb_reserve(skb, p - buf);
skb_put(skb, len);
+
+ page = (struct page *)page->private;
+ if (page)
+ give_pages(rq, page);
goto ok;
}
@@ -730,6 +734,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
dev->stats.rx_length_errors++;
goto err_len;
}
+
+ if (likely(!vi->xdp_enabled)) {
+ xdp_prog = NULL;
+ goto skip_xdp;
+ }
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -812,6 +822,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
}
rcu_read_unlock();
+skip_xdp:
skb = build_skb(buf, buflen);
if (!skb) {
put_page(page);
@@ -893,6 +904,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
dev->stats.rx_length_errors++;
goto err_skb;
}
+
+ if (likely(!vi->xdp_enabled)) {
+ xdp_prog = NULL;
+ goto skip_xdp;
+ }
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -1020,6 +1037,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();
+skip_xdp:
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
metasize, headroom);
curr_skb = head_skb;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 142f70670f5c..7a205ddf0060 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2824,7 +2824,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ dev_addr_set(netdev, addr->sa_data);
vmxnet3_write_mac_addr(adapter, addr->sa_data);
return 0;
@@ -3638,7 +3638,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#endif
vmxnet3_read_mac_addr(adapter, mac);
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ dev_addr_set(netdev, mac);
netdev->netdev_ops = &vmxnet3_netdev_ops;
vmxnet3_set_ethtool_ops(netdev);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index bf2fac913942..662e26117353 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1360,8 +1360,6 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
- nf_reset_ct(skb);
-
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb.
* For strict packets with a source LLA, determine the dst using the
@@ -1424,8 +1422,6 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
- nf_reset_ct(skb);
-
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
goto out;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5a8df5a195cb..141635a35c28 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -4756,12 +4756,12 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
LIST_HEAD(list);
unsigned int h;
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
}
+ rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 7637edce443e..81e72bc1891f 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1093,7 +1093,9 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
} else {
- *(__be16 *)dev->dev_addr = htons(dlci);
+ __be16 addr = htons(dlci);
+
+ dev_addr_set(dev, (u8 *)&addr);
dlci_to_q922(dev->broadcast, dlci);
}
dev->netdev_ops = &pvc_ops;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 89d31adc3809..282192b82404 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -301,7 +301,7 @@ static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 49cc4b7ed516..0e9bad33fac8 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1772,9 +1772,8 @@ static const struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
SMCWUSBT-G2 */
- AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1, TEW444UBEU*/
AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
- AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */
AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */
AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 741289e385d5..ca007b800f75 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -44,7 +44,7 @@ config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support"
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
- depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
+ select QCOM_SCM
select QCOM_QMI_HELPERS
help
This module adds support for integrated WCN3990 chip connected
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 2f9be182fbfb..c21e05549f61 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3224,7 +3224,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ath10k_debug_print_board_info(ar);
}
- device_get_mac_address(ar->dev, ar->mac_addr, sizeof(ar->mac_addr));
+ device_get_mac_address(ar->dev, ar->mac_addr);
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index c272b290fa73..7ca68c81d9b6 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -993,8 +993,12 @@ static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
ath10k_mac_vif_beacon_free(arvif);
if (arvif->beacon_buf) {
- dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
- arvif->beacon_buf, arvif->beacon_paddr);
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ kfree(arvif->beacon_buf);
+ else
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf,
+ arvif->beacon_paddr);
arvif->beacon_buf = NULL;
}
}
@@ -5576,10 +5580,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_AP) {
- arvif->beacon_buf = dma_alloc_coherent(ar->dev,
- IEEE80211_MAX_FRAME_LEN,
- &arvif->beacon_paddr,
- GFP_ATOMIC);
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ arvif->beacon_buf = kmalloc(IEEE80211_MAX_FRAME_LEN,
+ GFP_KERNEL);
+ arvif->beacon_paddr = (dma_addr_t)arvif->beacon_buf;
+ } else {
+ arvif->beacon_buf =
+ dma_alloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
+ }
if (!arvif->beacon_buf) {
ret = -ENOMEM;
ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
@@ -5794,8 +5805,12 @@ err_vdev_delete:
err:
if (arvif->beacon_buf) {
- dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
- arvif->beacon_buf, arvif->beacon_paddr);
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ kfree(arvif->beacon_buf);
+ else
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf,
+ arvif->beacon_paddr);
arvif->beacon_buf = NULL;
}
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index b746052737e0..eb705214f3f0 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1363,8 +1363,11 @@ static void ath10k_rx_indication_async_work(struct work_struct *work)
ep->ep_ops.ep_rx_complete(ar, skb);
}
- if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
+ local_bh_disable();
napi_schedule(&ar->napi);
+ local_bh_enable();
+ }
}
static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index b8a4bbfe10b8..7c1c2658cb5f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2610,6 +2610,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (ieee80211_is_beacon(hdr->frame_control))
ath10k_mac_handle_beacon(ar, skb);
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
+ status->boottime_ns = ktime_get_boottime_ns();
+
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 969bf1a590d9..2328e594a96a 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -37,7 +37,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "IPQ8074/hw2.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
@@ -59,7 +59,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.tcl_0_only = false,
- .spectral_fft_sz = 2,
+
+ .spectral = {
+ .fft_sz = 2,
+ /* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
+ * so added pad size as 2 bytes to compensate the BIN size
+ */
+ .fft_pad_sz = 2,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -78,7 +88,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "IPQ6018/hw1.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 2,
.bdf_addr = 0x4ABC0000,
@@ -100,7 +110,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.tcl_0_only = false,
- .spectral_fft_sz = 4,
+
+ .spectral = {
+ .fft_sz = 4,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -119,7 +136,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "QCA6390/hw2.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
@@ -141,7 +158,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.tcl_0_only = true,
- .spectral_fft_sz = 0,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
@@ -159,7 +183,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "QCN9074/hw1.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 1,
.single_pdev_only = false,
@@ -180,6 +204,15 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.tcl_0_only = false,
+
+ .spectral = {
+ .fft_sz = 2,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 16,
+ .fft_hdr_len = 24,
+ .max_fft_bins = 1024,
+ },
+
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
@@ -197,7 +230,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "WCN6855/hw2.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
@@ -219,7 +252,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.tcl_0_only = true,
- .spectral_fft_sz = 0,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 018fb2385f2a..31d234a51c79 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -93,6 +93,8 @@ struct ath11k_skb_rxcb {
bool is_first_msdu;
bool is_last_msdu;
bool is_continuation;
+ bool is_mcbc;
+ bool is_eapol;
struct hal_rx_desc *rx_desc;
u8 err_rel_src;
u8 err_code;
@@ -100,6 +102,8 @@ struct ath11k_skb_rxcb {
u8 unmapped;
u8 is_frag;
u8 tid;
+ u16 peer_id;
+ u16 seq_no;
};
enum ath11k_hw_rev {
@@ -193,7 +197,9 @@ enum ath11k_dev_flags {
};
enum ath11k_monitor_flags {
- ATH11K_FLAG_MONITOR_ENABLED,
+ ATH11K_FLAG_MONITOR_CONF_ENABLED,
+ ATH11K_FLAG_MONITOR_STARTED,
+ ATH11K_FLAG_MONITOR_VDEV_CREATED,
};
struct ath11k_vif {
@@ -362,6 +368,7 @@ struct ath11k_sta {
enum hal_pn_type pn_type;
struct work_struct update_wk;
+ struct work_struct set_4addr_wk;
struct rate_info txrate;
struct rate_info last_txrate;
u64 rx_duration;
@@ -374,12 +381,15 @@ struct ath11k_sta {
/* protected by conf_mutex */
bool aggr_mode;
#endif
+
+ bool use_4addr_set;
+ u16 tcl_metadata;
};
#define ATH11K_MIN_5G_FREQ 4150
-#define ATH11K_MIN_6G_FREQ 5945
+#define ATH11K_MIN_6G_FREQ 5925
#define ATH11K_MAX_6G_FREQ 7115
-#define ATH11K_NUM_CHANS 100
+#define ATH11K_NUM_CHANS 101
#define ATH11K_MAX_5G_CHAN 173
enum ath11k_state {
@@ -484,7 +494,6 @@ struct ath11k {
u32 chan_tx_pwr;
u32 num_stations;
u32 max_num_stations;
- bool monitor_present;
/* To synchronize concurrent synchronous mac80211 callback operations,
* concurrent debugfs configuration and concurrent FW statistics events.
*/
@@ -559,6 +568,7 @@ struct ath11k {
struct ath11k_per_peer_tx_stats cached_stats;
u32 last_ppdu_id;
u32 cached_ppdu_id;
+ int monitor_vdev_id;
#ifdef CONFIG_ATH11K_DEBUGFS
struct ath11k_debug debug;
#endif
@@ -591,6 +601,8 @@ struct ath11k_pdev_cap {
u32 tx_chain_mask_shift;
u32 rx_chain_mask_shift;
struct ath11k_band_cap band[NUM_NL80211_BANDS];
+ bool nss_ratio_enabled;
+ u8 nss_ratio_info;
};
struct ath11k_pdev {
@@ -794,12 +806,15 @@ struct ath11k_fw_stats_pdev {
s32 hw_reaped;
/* Num underruns */
s32 underrun;
+ /* Num hw paused */
+ u32 hw_paused;
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
/* Num MPDUs requeued by SW */
s32 mpdus_requeued;
/* excessive retries */
u32 tx_ko;
+ u32 tx_xretry;
/* data hw rate code */
u32 data_rc;
/* Scheduler self triggers */
@@ -820,6 +835,30 @@ struct ath11k_fw_stats_pdev {
u32 phy_underrun;
/* MPDU is more than txop limit */
u32 txop_ovf;
+ /* Num sequences posted */
+ u32 seq_posted;
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+ /* Num sequences completed */
+ u32 seq_completed;
+ /* Num sequences restarted */
+ u32 seq_restarted;
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
/* PDEV RX stats */
/* Cnts any change in ring routing mid-ppdu */
@@ -845,6 +884,8 @@ struct ath11k_fw_stats_pdev {
s32 phy_err_drop;
/* Number of mpdu errors - FCS, MIC, ENC etc. */
s32 mpdu_errs;
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
};
struct ath11k_fw_stats_vdev {
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index 5e1f5437b418..fd98ba5b1130 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -8,8 +8,7 @@
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
- struct ath11k_dbring_element *buff,
- gfp_t gfp)
+ struct ath11k_dbring_element *buff)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
@@ -35,7 +34,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
goto err;
spin_lock_bh(&ring->idr_lock);
- buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
+ buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
spin_unlock_bh(&ring->idr_lock);
if (buf_id < 0) {
ret = -ENOBUFS;
@@ -72,8 +71,7 @@ err:
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
- struct ath11k_dbring *ring,
- gfp_t gfp)
+ struct ath11k_dbring *ring)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
@@ -92,11 +90,11 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
size = sizeof(*buff) + ring->buf_sz + align - 1;
while (num_remain > 0) {
- buff = kzalloc(size, gfp);
+ buff = kzalloc(size, GFP_ATOMIC);
if (!buff)
break;
- ret = ath11k_dbring_bufs_replenish(ar, ring, buff, gfp);
+ ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
@@ -176,7 +174,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
- ret = ath11k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
+ ret = ath11k_dbring_fill_bufs(ar, ring);
return ret;
}
@@ -322,7 +320,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
}
memset(buff, 0, size);
- ath11k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
+ ath11k_dbring_bufs_replenish(ar, ring, buff);
}
spin_unlock_bh(&srng->lock);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 554feaf1ed5c..17f0bbbac7ae 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -902,7 +902,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 rx_filter = 0, ring_id, filter, mode;
u8 buf[128] = {0};
- int i, ret;
+ int i, ret, rx_buf_sz = 0;
ssize_t rc;
mutex_lock(&ar->conf_mutex);
@@ -940,6 +940,17 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
}
+ /* Clear rx filter set for monitor mode and rx status */
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ rx_buf_sz, &tlv_filter);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+ }
#define HTT_RX_FILTER_TLV_LITE_MODE \
(HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
@@ -955,6 +966,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
} else if (mode == ATH11K_PKTLOG_MODE_LITE) {
ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_PKTLOG);
@@ -964,7 +976,12 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
} else {
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ rx_filter = tlv_filter.rx_filter;
+
ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_DEFAULT);
if (ret) {
@@ -988,7 +1005,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
ar->dp.mac_id + i,
HAL_RXDMA_MONITOR_STATUS,
- DP_RX_BUFFER_SIZE, &tlv_filter);
+ rx_buf_sz, &tlv_filter);
if (ret) {
ath11k_warn(ab, "failed to set rx filter for monitor status ring\n");
@@ -996,8 +1013,8 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
}
- ath11k_dbg(ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
- filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+ ath11k_info(ab, "pktlog mode %s\n",
+ ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
ar->debug.pktlog_filter = filter;
ar->debug.pktlog_mode = mode;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index e5346af71f24..ec743a015dc7 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -38,6 +38,10 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS = 29,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF_STATS = 31,
+ ATH11K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH11K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
/* keep this last */
ATH11K_DBG_HTT_NUM_EXT_STATS,
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 9e0c90da99d3..4484235bcda4 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -10,23 +10,28 @@
#include "debug.h"
#include "debugfs_htt_stats.h"
-#define HTT_DBG_OUT(buf, len, fmt, ...) \
- scnprintf(buf, len, fmt "\n", ##__VA_ARGS__)
-
-#define HTT_MAX_STRING_LEN 256
#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
#define HTT_TLV_HDR_LEN 4
-#define ARRAY_TO_STRING(out, arr, len) \
+#define PRINT_ARRAY_TO_BUF(out, buflen, arr, str, len, newline) \
do { \
- int index = 0; u8 i; \
+ int index = 0; u8 i; const char *str_val = str; \
+ const char *new_line = newline; \
+ if (str_val) { \
+ index += scnprintf((out + buflen), \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen), \
+ "%s = ", str_val); \
+ } \
for (i = 0; i < len; i++) { \
- index += scnprintf(out + index, HTT_MAX_STRING_LEN - index, \
- " %u:%u,", i, arr[i]); \
- if (index < 0 || index >= HTT_MAX_STRING_LEN) \
- break; \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ " %u:%u,", i, arr[i]); \
} \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ "%s", new_line); \
+ buflen += index; \
} while (0)
static inline void htt_print_stats_string_tlv(const void *tag_buf,
@@ -38,22 +43,20 @@ static inline void htt_print_stats_string_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i;
- u16 index = 0;
- char data[HTT_MAX_STRING_LEN] = {0};
tag_len = tag_len >> 2;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "data = ");
for (i = 0; i < tag_len; i++) {
- index += scnprintf(&data[index],
- HTT_MAX_STRING_LEN - index,
- "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
- if (index >= HTT_MAX_STRING_LEN)
- break;
+ len += scnprintf(buf + len,
+ buf_len - len,
+ "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
}
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "data = %s\n", data);
+ /* New lines are added for better display */
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -71,107 +74,107 @@ static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
- htt_stats_buf->hw_queued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
- htt_stats_buf->hw_reaped);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun = %u",
- htt_stats_buf->underrun);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_paused = %u",
- htt_stats_buf->hw_paused);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_flush = %u",
- htt_stats_buf->hw_flush);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_filt = %u",
- htt_stats_buf->hw_filt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
- htt_stats_buf->tx_abort);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_requeued = %u",
- htt_stats_buf->mpdu_requeued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_xretry = %u",
- htt_stats_buf->tx_xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "data_rc = %u",
- htt_stats_buf->data_rc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_dropped_xretry = %u",
- htt_stats_buf->mpdu_dropped_xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "illegal_rate_phy_err = %u",
- htt_stats_buf->illgl_rate_phy_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cont_xretry = %u",
- htt_stats_buf->cont_xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_timeout = %u",
- htt_stats_buf->tx_timeout);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_resets = %u",
- htt_stats_buf->pdev_resets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_underrun = %u",
- htt_stats_buf->phy_underrun);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_ovf = %u",
- htt_stats_buf->txop_ovf);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted = %u",
- htt_stats_buf->seq_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_failed_queueing = %u",
- htt_stats_buf->seq_failed_queueing);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_completed = %u",
- htt_stats_buf->seq_completed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_restarted = %u",
- htt_stats_buf->seq_restarted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_seq_posted = %u",
- htt_stats_buf->mu_seq_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_switch_hw_paused = %u",
- htt_stats_buf->seq_switch_hw_paused);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "next_seq_posted_dsr = %u",
- htt_stats_buf->next_seq_posted_dsr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted_isr = %u",
- htt_stats_buf->seq_posted_isr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_ctrl_cached = %u",
- htt_stats_buf->seq_ctrl_cached);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count_tqm = %u",
- htt_stats_buf->mpdu_count_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count_tqm = %u",
- htt_stats_buf->msdu_count_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_removed_tqm = %u",
- htt_stats_buf->mpdu_removed_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_removed_tqm = %u",
- htt_stats_buf->msdu_removed_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_sw_flush = %u",
- htt_stats_buf->mpdus_sw_flush);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
- htt_stats_buf->mpdus_hw_filter);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_truncated = %u",
- htt_stats_buf->mpdus_truncated);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_ack_failed = %u",
- htt_stats_buf->mpdus_ack_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_expired = %u",
- htt_stats_buf->mpdus_expired);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u",
- htt_stats_buf->mpdus_seq_hw_retry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
- htt_stats_buf->ack_tlv_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u",
- htt_stats_buf->coex_abort_mpdu_cnt_valid);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u",
- htt_stats_buf->coex_abort_mpdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u",
- htt_stats_buf->num_total_ppdus_tried_ota);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u",
- htt_stats_buf->num_data_ppdus_tried_ota);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u",
- htt_stats_buf->local_ctrl_mgmt_enqued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u",
- htt_stats_buf->local_ctrl_mgmt_freed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_enqued = %u",
- htt_stats_buf->local_data_enqued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_freed = %u",
- htt_stats_buf->local_data_freed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried = %u",
- htt_stats_buf->mpdu_tried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "isr_wait_seq_posted = %u",
- htt_stats_buf->isr_wait_seq_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_low = %u",
- htt_stats_buf->tx_active_dur_us_low);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
- htt_stats_buf->tx_active_dur_us_high);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+ htt_stats_buf->underrun);
+ len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+ htt_stats_buf->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+ htt_stats_buf->hw_flush);
+ len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+ htt_stats_buf->hw_filt);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+ htt_stats_buf->mpdu_requeued);
+ len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+ htt_stats_buf->tx_xretry);
+ len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+ htt_stats_buf->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+ htt_stats_buf->mpdu_dropped_xretry);
+ len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+ htt_stats_buf->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+ htt_stats_buf->cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+ htt_stats_buf->tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+ htt_stats_buf->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+ htt_stats_buf->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+ htt_stats_buf->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+ htt_stats_buf->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+ htt_stats_buf->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+ htt_stats_buf->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+ htt_stats_buf->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "mu_seq_posted = %u\n",
+ htt_stats_buf->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+ htt_stats_buf->seq_switch_hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+ htt_stats_buf->next_seq_posted_dsr);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+ htt_stats_buf->seq_posted_isr);
+ len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+ htt_stats_buf->seq_ctrl_cached);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+ htt_stats_buf->mpdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+ htt_stats_buf->msdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+ htt_stats_buf->mpdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+ htt_stats_buf->msdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+ htt_stats_buf->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+ htt_stats_buf->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+ htt_stats_buf->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+ htt_stats_buf->mpdus_expired);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+ htt_stats_buf->mpdus_seq_hw_retry);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt_valid);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_total_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_data_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_freed);
+ len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+ htt_stats_buf->local_data_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+ htt_stats_buf->local_data_freed);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+ htt_stats_buf->mpdu_tried);
+ len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+ htt_stats_buf->isr_wait_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+ htt_stats_buf->tx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n\n",
+ htt_stats_buf->tx_active_dur_us_high);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -190,13 +193,12 @@ htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char urrn_stats[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:\n");
- ARRAY_TO_STRING(urrn_stats, htt_stats_buf->urrn_stats, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "urrn_stats = %s\n", urrn_stats);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->urrn_stats, "urrn_stats",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -215,13 +217,12 @@ htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char flush_errs[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:\n");
- ARRAY_TO_STRING(flush_errs, htt_stats_buf->flush_errs, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_errs = %s\n", flush_errs);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->flush_errs, "flush_errs",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -240,14 +241,12 @@ htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sifs_status[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:\n");
- ARRAY_TO_STRING(sifs_status, htt_stats_buf->sifs_status, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_status = %s\n",
- sifs_status);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_status, "sifs_status",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -266,13 +265,12 @@ htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char phy_errs[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:\n");
- ARRAY_TO_STRING(phy_errs, htt_stats_buf->phy_errs, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_errs, "phy_errs",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -291,15 +289,13 @@ htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sifs_hist_status[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:\n");
- ARRAY_TO_STRING(sifs_hist_status, htt_stats_buf->sifs_hist_status, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_hist_status = %s\n",
- sifs_hist_status);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_hist_status,
+ "sifs_hist_status", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -318,23 +314,23 @@ htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u",
- htt_stats_buf->num_data_ppdus_legacy_su);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u\n",
+ htt_stats_buf->num_data_ppdus_legacy_su);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u",
- htt_stats_buf->num_data_ppdus_ac_su);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u",
- htt_stats_buf->num_data_ppdus_ax_su);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ax_su);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u",
- htt_stats_buf->num_data_ppdus_ac_su_txbf);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su_txbf);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n",
- htt_stats_buf->num_data_ppdus_ax_su_txbf);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n\n",
+ htt_stats_buf->num_data_ppdus_ax_su_txbf);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -353,25 +349,15 @@ htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
- htt_stats_buf->hist_bin_size);
-
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(tried_mpdu_cnt_hist,
- htt_stats_buf->tried_mpdu_cnt_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tried_mpdu_cnt_hist = %s\n",
- tried_mpdu_cnt_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER\n");
- }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -390,14 +376,14 @@ static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
HTT_STATS_MAX_HW_INTR_NAME_LEN);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_intr_name = %s ", hw_intr_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mask = %u",
- htt_stats_buf->mask);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
- htt_stats_buf->count);
+ len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", hw_intr_name);
+ len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+ htt_stats_buf->mask);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -417,13 +403,13 @@ htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n");
memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
HTT_STATS_MAX_HW_MODULE_NAME_LEN);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_module_name = %s ",
- hw_module_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u",
- htt_stats_buf->count);
+ len += scnprintf(buf + len, buf_len - len, "hw_module_name = %s\n",
+ hw_module_name);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -441,29 +427,29 @@ static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
- htt_stats_buf->tx_abort);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort_fail_count = %u",
- htt_stats_buf->tx_abort_fail_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort = %u",
- htt_stats_buf->rx_abort);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort_fail_count = %u",
- htt_stats_buf->rx_abort_fail_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "warm_reset = %u",
- htt_stats_buf->warm_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cold_reset = %u",
- htt_stats_buf->cold_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flush = %u",
- htt_stats_buf->tx_flush);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_glb_reset = %u",
- htt_stats_buf->tx_glb_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_txq_reset = %u",
- htt_stats_buf->tx_txq_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_timeout_reset = %u\n",
- htt_stats_buf->rx_timeout_reset);
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+ htt_stats_buf->tx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+ htt_stats_buf->rx_abort);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+ htt_stats_buf->rx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+ htt_stats_buf->warm_reset);
+ len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+ htt_stats_buf->cold_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+ htt_stats_buf->tx_flush);
+ len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+ htt_stats_buf->tx_glb_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+ htt_stats_buf->tx_txq_reset);
+ len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
+ htt_stats_buf->rx_timeout_reset);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -481,35 +467,36 @@ static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_update_timestamp = %u",
- htt_stats_buf->last_update_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_add_timestamp = %u",
- htt_stats_buf->last_add_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_remove_timestamp = %u",
- htt_stats_buf->last_remove_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_processed_msdu_count = %u",
- htt_stats_buf->total_processed_msdu_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u",
- htt_stats_buf->cur_msdu_count_in_flowq);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flow_no = %u",
- htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xF0000) >>
- 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_rule = %u",
- (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0x100000) >>
- 20);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_enqueue_count = %u",
- htt_stats_buf->last_cycle_enqueue_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_dequeue_count = %u",
- htt_stats_buf->last_cycle_dequeue_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_drop_count = %u",
- htt_stats_buf->last_cycle_drop_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_drop_th = %u\n",
- htt_stats_buf->current_drop_th);
+ len += scnprintf(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "last_update_timestamp = %u\n",
+ htt_stats_buf->last_update_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_add_timestamp = %u\n",
+ htt_stats_buf->last_add_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_remove_timestamp = %u\n",
+ htt_stats_buf->last_remove_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "total_processed_msdu_count = %u\n",
+ htt_stats_buf->total_processed_msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u\n",
+ htt_stats_buf->cur_msdu_count_in_flowq);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "tx_flow_no = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TX_FLOW_NO,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TID_NUM,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "drop_rule = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_DROP_RULE,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_enqueue_count = %u\n",
+ htt_stats_buf->last_cycle_enqueue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_dequeue_count = %u\n",
+ htt_stats_buf->last_cycle_dequeue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_drop_count = %u\n",
+ htt_stats_buf->last_cycle_drop_count);
+ len += scnprintf(buf + len, buf_len - len, "current_drop_th = %u\n\n",
+ htt_stats_buf->current_drop_th);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -528,38 +515,41 @@ static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
- htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
- (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
- 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
- htt_stats_buf->tid_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
- htt_stats_buf->hw_queued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
- htt_stats_buf->hw_reaped);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
- htt_stats_buf->mpdus_hw_filter);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
- htt_stats_buf->qdepth_bytes);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
- htt_stats_buf->qdepth_num_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
- htt_stats_buf->qdepth_num_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
- htt_stats_buf->last_scheduled_tsmp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
- htt_stats_buf->pause_module_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u\n",
- htt_stats_buf->block_module_id);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n\n",
+ htt_stats_buf->block_module_id);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -578,42 +568,45 @@ static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
- htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
- (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
- 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
- htt_stats_buf->tid_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_bytes = %u",
- htt_stats_buf->max_qdepth_bytes);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_n_msdus = %u",
- htt_stats_buf->max_qdepth_n_msdus);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rsvd = %u",
- htt_stats_buf->rsvd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
- htt_stats_buf->qdepth_bytes);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
- htt_stats_buf->qdepth_num_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
- htt_stats_buf->qdepth_num_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
- htt_stats_buf->last_scheduled_tsmp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
- htt_stats_buf->pause_module_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u",
- htt_stats_buf->block_module_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "allow_n_flags = 0x%x",
- htt_stats_buf->allow_n_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sendn_frms_allowed = %u\n",
- htt_stats_buf->sendn_frms_allowed);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_bytes = %u\n",
+ htt_stats_buf->max_qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_n_msdus = %u\n",
+ htt_stats_buf->max_qdepth_n_msdus);
+ len += scnprintf(buf + len, buf_len - len, "rsvd = %u\n",
+ htt_stats_buf->rsvd);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n",
+ htt_stats_buf->block_module_id);
+ len += scnprintf(buf + len, buf_len - len, "allow_n_flags = 0x%x\n",
+ htt_stats_buf->allow_n_flags);
+ len += scnprintf(buf + len, buf_len - len, "sendn_frms_allowed = %u\n\n",
+ htt_stats_buf->sendn_frms_allowed);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -632,21 +625,23 @@ static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_in_reorder = %u",
- htt_stats_buf->dup_in_reorder);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_outside_window = %u",
- htt_stats_buf->dup_past_outside_window);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_within_window = %u",
- htt_stats_buf->dup_past_within_window);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n",
- htt_stats_buf->rxdesc_err_decrypt);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "dup_in_reorder = %u\n",
+ htt_stats_buf->dup_in_reorder);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_outside_window = %u\n",
+ htt_stats_buf->dup_past_outside_window);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_within_window = %u\n",
+ htt_stats_buf->dup_past_within_window);
+ len += scnprintf(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n\n",
+ htt_stats_buf->rxdesc_err_decrypt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -663,16 +658,14 @@ static inline void htt_print_counter_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char counter_name[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_COUNTER_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_COUNTER_TLV:\n");
- ARRAY_TO_STRING(counter_name,
- htt_stats_buf->counter_name,
- HTT_MAX_COUNTER_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "counter_name = %s ", counter_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
- htt_stats_buf->count);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->counter_name,
+ "counter_name",
+ HTT_MAX_COUNTER_NAME, "\n");
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -690,35 +683,35 @@ static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_cnt = %u",
- htt_stats_buf->ppdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt = %u",
- htt_stats_buf->mpdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_cnt = %u",
- htt_stats_buf->msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_bitmap = %u",
- htt_stats_buf->pause_bitmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "block_bitmap = %u",
- htt_stats_buf->block_bitmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_rssi = %d",
- htt_stats_buf->rssi);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_count = %llu",
- htt_stats_buf->peer_enqueued_count_low |
- ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dequeued_count = %llu",
- htt_stats_buf->peer_dequeued_count_low |
- ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dropped_count = %llu",
- htt_stats_buf->peer_dropped_count_low |
- ((u64)htt_stats_buf->peer_dropped_count_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu",
- htt_stats_buf->ppdu_transmitted_bytes_low |
- ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ttl_removed_count = %u",
- htt_stats_buf->peer_ttl_removed_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "inactive_time = %u\n",
- htt_stats_buf->inactive_time);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ppdu_cnt = %u\n",
+ htt_stats_buf->ppdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt = %u\n",
+ htt_stats_buf->mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "msdu_cnt = %u\n",
+ htt_stats_buf->msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "pause_bitmap = %u\n",
+ htt_stats_buf->pause_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "block_bitmap = %u\n",
+ htt_stats_buf->block_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "last_rssi = %d\n",
+ htt_stats_buf->rssi);
+ len += scnprintf(buf + len, buf_len - len, "enqueued_count = %llu\n",
+ htt_stats_buf->peer_enqueued_count_low |
+ ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dequeued_count = %llu\n",
+ htt_stats_buf->peer_dequeued_count_low |
+ ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dropped_count = %llu\n",
+ htt_stats_buf->peer_dropped_count_low |
+ ((u64)htt_stats_buf->peer_dropped_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu\n",
+ htt_stats_buf->ppdu_transmitted_bytes_low |
+ ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "ttl_removed_count = %u\n",
+ htt_stats_buf->peer_ttl_removed_count);
+ len += scnprintf(buf + len, buf_len - len, "inactive_time = %u\n\n",
+ htt_stats_buf->inactive_time);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -736,29 +729,38 @@ static inline void htt_print_peer_details_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_type = %u",
- htt_stats_buf->peer_type);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
- htt_stats_buf->vdev_pdev_ast_idx & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
- (htt_stats_buf->vdev_pdev_ast_idx & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ast_idx = %u",
- (htt_stats_buf->vdev_pdev_ast_idx & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x",
- htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF,
- (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF00) >> 8,
- (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF0000) >> 16,
- (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF000000) >> 24,
- (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF),
- (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_flags = 0x%x",
- htt_stats_buf->peer_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_flags = 0x%x\n",
- htt_stats_buf->qpeer_flags);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "peer_type = %u\n",
+ htt_stats_buf->peer_type);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_VDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_PDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "ast_idx = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_AST_IDX,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len,
+ "mac_addr = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->mac_addr.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->mac_addr.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "peer_flags = 0x%x\n",
+ htt_stats_buf->peer_flags);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_flags = 0x%x\n\n",
+ htt_stats_buf->qpeer_flags);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -775,74 +777,40 @@ static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char str_buf[HTT_MAX_STRING_LEN] = {0};
- char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
u8 j;
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!tx_gi[j])
- goto fail;
- }
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
- htt_stats_buf->tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
- htt_stats_buf->ack_rssi);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_su_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_su_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mu_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mu_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf,
- htt_stats_buf->tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf,
- htt_stats_buf->tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
- HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_mcs, "tx_su_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mu_mcs, "tx_mu_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j],
- htt_stats_buf->tx_gi[j],
- HTT_TX_PEER_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PEER_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf,
- htt_stats_buf->tx_dcm,
- HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -850,10 +818,6 @@ static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-
-fail:
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
- kfree(tx_gi[j]);
}
static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
@@ -864,79 +828,48 @@ static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 j;
- char *rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS] = {NULL};
- char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
- char str_buf[HTT_MAX_STRING_LEN] = {0};
-
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
- rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rssi_chain[j])
- goto fail;
- }
-
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rx_gi[j])
- goto fail;
- }
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
- htt_stats_buf->nsts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
- htt_stats_buf->rx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
- htt_stats_buf->rssi_mgmt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
- htt_stats_buf->rssi_data);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
- htt_stats_buf->rssi_comb);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
- HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
- HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
- ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
- HTT_RX_PEER_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
- j, rssi_chain[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PEER_STATS_NUM_BW_COUNTERS, "\n");
}
for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
- j, rx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
- HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s\n", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -944,13 +877,6 @@ static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-
-fail:
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++)
- kfree(rssi_chain[j]);
-
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++)
- kfree(rx_gi[j]);
}
static inline void
@@ -962,13 +888,13 @@ htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
- htt_stats_buf->mu_mimo_sch_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
- htt_stats_buf->mu_mimo_sch_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
- htt_stats_buf->mu_mimo_ppdu_posted);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -987,22 +913,22 @@ htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u",
- htt_stats_buf->mu_mimo_err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u",
- htt_stats_buf->mu_mimo_mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n",
- htt_stats_buf->mu_mimo_ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u\n",
+ htt_stats_buf->mu_mimo_err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n\n",
+ htt_stats_buf->mu_mimo_ampdu_underrun_usr);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1021,11 +947,13 @@ htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__hwq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u\n",
- (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1044,51 +972,53 @@ htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
/* TODO: HKDBG */
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__hwq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u",
- (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "xretry = %u",
- htt_stats_buf->xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_cnt = %u",
- htt_stats_buf->underrun_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cnt = %u",
- htt_stats_buf->flush_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "filt_cnt = %u",
- htt_stats_buf->filt_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_bmap = %u",
- htt_stats_buf->null_mpdu_bmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "user_ack_failure = %u",
- htt_stats_buf->user_ack_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
- htt_stats_buf->ack_tlv_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_id_proc = %u",
- htt_stats_buf->sched_id_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_tx_count = %u",
- htt_stats_buf->null_mpdu_tx_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u",
- htt_stats_buf->mpdu_bmap_not_recvd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_bar = %u",
- htt_stats_buf->num_bar);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
- htt_stats_buf->rts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
- htt_stats_buf->cts2self);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
- htt_stats_buf->qos_null);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried_cnt = %u",
- htt_stats_buf->mpdu_tried_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queued_cnt = %u",
- htt_stats_buf->mpdu_queued_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u",
- htt_stats_buf->mpdu_ack_fail_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_filt_cnt = %u",
- htt_stats_buf->mpdu_filt_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "false_mpdu_ack_count = %u",
- htt_stats_buf->false_mpdu_ack_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_timeout = %u\n",
- htt_stats_buf->txq_timeout);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "xretry = %u\n",
+ htt_stats_buf->xretry);
+ len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n",
+ htt_stats_buf->underrun_cnt);
+ len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n",
+ htt_stats_buf->flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n",
+ htt_stats_buf->filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n",
+ htt_stats_buf->null_mpdu_bmap);
+ len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n",
+ htt_stats_buf->user_ack_failure);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n",
+ htt_stats_buf->sched_id_proc);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n",
+ htt_stats_buf->null_mpdu_tx_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n",
+ htt_stats_buf->mpdu_bmap_not_recvd);
+ len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n",
+ htt_stats_buf->num_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n",
+ htt_stats_buf->mpdu_tried_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n",
+ htt_stats_buf->mpdu_queued_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n",
+ htt_stats_buf->mpdu_ack_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n",
+ htt_stats_buf->mpdu_filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n",
+ htt_stats_buf->false_mpdu_ack_count);
+ len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n\n",
+ htt_stats_buf->txq_timeout);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1108,17 +1038,14 @@ htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
- char difs_latency_hist[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hist_intvl = %u",
- htt_stats_buf->hist_intvl);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "hist_intvl = %u\n",
+ htt_stats_buf->hist_intvl);
- ARRAY_TO_STRING(difs_latency_hist, htt_stats_buf->difs_latency_hist,
- data_len);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "difs_latency_hist = %s\n",
- difs_latency_hist);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->difs_latency_hist,
+ "difs_latency_hist", data_len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1138,16 +1065,14 @@ htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 data_len;
- char cmd_result[HTT_MAX_STRING_LEN] = {0};
data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:");
-
- ARRAY_TO_STRING(cmd_result, htt_stats_buf->cmd_result, data_len);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_result = %s\n", cmd_result);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_result, "cmd_result",
+ data_len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1167,15 +1092,13 @@ htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems;
- char cmd_stall_status[HTT_MAX_STRING_LEN] = {0};
num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:\n");
- ARRAY_TO_STRING(cmd_stall_status, htt_stats_buf->cmd_stall_status, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_stall_status = %s\n",
- cmd_stall_status);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_stall_status,
+ "cmd_stall_status", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1195,15 +1118,14 @@ htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems;
- char fes_result[HTT_MAX_STRING_LEN] = {0};
num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:\n");
- ARRAY_TO_STRING(fes_result, htt_stats_buf->fes_result, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fes_result = %s\n", fes_result);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fes_result, "fes_result",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1222,27 +1144,16 @@ htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u32 num_elements = ((tag_len -
sizeof(htt_stats_buf->hist_bin_size)) >> 2);
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
- htt_stats_buf->hist_bin_size);
-
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(tried_mpdu_cnt_hist,
- htt_stats_buf->tried_mpdu_cnt_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "tried_mpdu_cnt_hist = %s\n",
- tried_mpdu_cnt_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER ");
- }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1261,23 +1172,14 @@ htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char txop_used_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u32 num_elements = tag_len >> 2;
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->txop_used_cnt_hist,
+ "txop_used_cnt_hist", num_elements, "\n\n");
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(txop_used_cnt_hist,
- htt_stats_buf->txop_used_cnt_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_used_cnt_hist = %s\n",
- txop_used_cnt_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER ");
- }
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
@@ -1300,86 +1202,86 @@ static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
const u32 *cbf_160 = htt_stats_buf->cbf_160;
if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
- cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
- i,
- htt_stats_buf->sounding[0],
- htt_stats_buf->sounding[1],
- htt_stats_buf->sounding[2],
- htt_stats_buf->sounding[3]);
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
}
} else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
- cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
- i,
- htt_stats_buf->sounding[0],
- htt_stats_buf->sounding[1],
- htt_stats_buf->sounding[2],
- htt_stats_buf->sounding[3]);
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
}
}
@@ -1400,31 +1302,31 @@ htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "su_bar = %u",
- htt_stats_buf->su_bar);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
- htt_stats_buf->rts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
- htt_stats_buf->cts2self);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
- htt_stats_buf->qos_null);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_1 = %u",
- htt_stats_buf->delayed_bar_1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_2 = %u",
- htt_stats_buf->delayed_bar_2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_3 = %u",
- htt_stats_buf->delayed_bar_3);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_4 = %u",
- htt_stats_buf->delayed_bar_4);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_5 = %u",
- htt_stats_buf->delayed_bar_5);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_6 = %u",
- htt_stats_buf->delayed_bar_6);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_7 = %u\n",
- htt_stats_buf->delayed_bar_7);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n",
+ htt_stats_buf->su_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n",
+ htt_stats_buf->delayed_bar_1);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n",
+ htt_stats_buf->delayed_bar_2);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n",
+ htt_stats_buf->delayed_bar_3);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n",
+ htt_stats_buf->delayed_bar_4);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n",
+ htt_stats_buf->delayed_bar_5);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n",
+ htt_stats_buf->delayed_bar_6);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n",
+ htt_stats_buf->delayed_bar_7);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1443,21 +1345,21 @@ htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa = %u",
- htt_stats_buf->ac_su_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp = %u",
- htt_stats_buf->ac_su_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u",
- htt_stats_buf->ac_mu_mimo_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u",
- htt_stats_buf->ac_mu_mimo_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u",
- htt_stats_buf->ac_mu_mimo_brpoll_1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u",
- htt_stats_buf->ac_mu_mimo_brpoll_2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n",
- htt_stats_buf->ac_mu_mimo_brpoll_3);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa = %u\n",
+ htt_stats_buf->ac_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp = %u\n",
+ htt_stats_buf->ac_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_3);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1476,37 +1378,37 @@ htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa = %u",
- htt_stats_buf->ax_su_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp = %u",
- htt_stats_buf->ax_su_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u",
- htt_stats_buf->ax_mu_mimo_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u",
- htt_stats_buf->ax_mu_mimo_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_3);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_4);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_5);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_6);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_7);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger = %u",
- htt_stats_buf->ax_basic_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger = %u",
- htt_stats_buf->ax_bsr_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger = %u",
- htt_stats_buf->ax_mu_bar_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n",
- htt_stats_buf->ax_mu_rts_trigger);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa = %u\n",
+ htt_stats_buf->ax_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp = %u\n",
+ htt_stats_buf->ax_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_3);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_4);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_5);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_6);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_7);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
+ htt_stats_buf->ax_basic_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
+ htt_stats_buf->ax_bsr_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1525,21 +1427,21 @@ htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp_err = %u",
- htt_stats_buf->ac_su_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa_err = %u",
- htt_stats_buf->ac_su_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u",
- htt_stats_buf->ac_mu_mimo_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u",
- htt_stats_buf->ac_mu_mimo_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u",
- htt_stats_buf->ac_mu_mimo_brp1_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u",
- htt_stats_buf->ac_mu_mimo_brp2_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n",
- htt_stats_buf->ac_mu_mimo_brp3_err);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n",
+ htt_stats_buf->ac_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n",
+ htt_stats_buf->ac_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brp3_err);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1558,37 +1460,37 @@ htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp_err = %u",
- htt_stats_buf->ax_su_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa_err = %u",
- htt_stats_buf->ax_su_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u",
- htt_stats_buf->ax_mu_mimo_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u",
- htt_stats_buf->ax_mu_mimo_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u",
- htt_stats_buf->ax_mu_mimo_brp1_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u",
- htt_stats_buf->ax_mu_mimo_brp2_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u",
- htt_stats_buf->ax_mu_mimo_brp3_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u",
- htt_stats_buf->ax_mu_mimo_brp4_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u",
- htt_stats_buf->ax_mu_mimo_brp5_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u",
- htt_stats_buf->ax_mu_mimo_brp6_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u",
- htt_stats_buf->ax_mu_mimo_brp7_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger_err = %u",
- htt_stats_buf->ax_basic_trigger_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger_err = %u",
- htt_stats_buf->ax_bsr_trigger_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u",
- htt_stats_buf->ax_mu_bar_trigger_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n",
- htt_stats_buf->ax_mu_rts_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n",
+ htt_stats_buf->ax_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n",
+ htt_stats_buf->ax_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp3_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp4_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp5_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp6_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp7_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
+ htt_stats_buf->ax_basic_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
+ htt_stats_buf->ax_bsr_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger_err);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1608,35 +1510,35 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
- htt_stats_buf->mu_mimo_sch_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
- htt_stats_buf->mu_mimo_sch_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
- htt_stats_buf->mu_mimo_ppdu_posted);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:");
+ len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_sch_nusers_%u = %u",
- i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:");
+ len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_sch_nusers_%u = %u",
- i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:");
+ len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_ofdma_sch_nusers_%u = %u",
- i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1657,114 +1559,114 @@ htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
if (!htt_stats_buf->user_index)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
if (htt_stats_buf->user_index <
HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_queued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_tried_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_failed_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_requeued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_err_no_ba_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdu_underrun_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n",
- htt_stats_buf->user_index,
- htt_stats_buf->ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
}
}
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
if (!htt_stats_buf->user_index)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
if (htt_stats_buf->user_index <
HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_queued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_tried_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_failed_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_requeued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_err_no_ba_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdu_underrun_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n",
- htt_stats_buf->user_index,
- htt_stats_buf->ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
}
}
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
if (!htt_stats_buf->user_index)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_queued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_tried_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_failed_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_requeued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_err_no_ba_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdu_underrun_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n",
- htt_stats_buf->user_index,
- htt_stats_buf->ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
}
}
@@ -1785,15 +1687,12 @@ htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_cmd_posted[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:\n");
- ARRAY_TO_STRING(sched_cmd_posted, htt_stats_buf->sched_cmd_posted,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_posted = %s\n",
- sched_cmd_posted);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_posted,
+ "sched_cmd_posted", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1812,15 +1711,12 @@ htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_cmd_reaped[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:\n");
- ARRAY_TO_STRING(sched_cmd_reaped, htt_stats_buf->sched_cmd_reaped,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_reaped = %s\n",
- sched_cmd_reaped);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_reaped,
+ "sched_cmd_reaped", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1839,18 +1735,15 @@ htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_order_su[HTT_MAX_STRING_LEN] = {0};
/* each entry is u32, i.e. 4 bytes */
u32 sched_order_su_num_entries =
min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:\n");
- ARRAY_TO_STRING(sched_order_su, htt_stats_buf->sched_order_su,
- sched_order_su_num_entries);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_order_su = %s\n",
- sched_order_su);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_order_su, "sched_order_su",
+ sched_order_su_num_entries, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1869,17 +1762,15 @@ htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_ineligibility[HTT_MAX_STRING_LEN] = {0};
/* each entry is u32, i.e. 4 bytes */
u32 sched_ineligibility_num_entries = tag_len >> 2;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:\n");
- ARRAY_TO_STRING(sched_ineligibility, htt_stats_buf->sched_ineligibility,
- sched_ineligibility_num_entries);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_ineligibility = %s\n",
- sched_ineligibility);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_ineligibility,
+ "sched_ineligibility", sched_ineligibility_num_entries,
+ "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1898,54 +1789,56 @@ htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__txq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_id = %u",
- (htt_stats_buf->mac_id__txq_id__word & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_policy = %u",
- htt_stats_buf->sched_policy);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "last_sched_cmd_posted_timestamp = %u",
- htt_stats_buf->last_sched_cmd_posted_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "last_sched_cmd_compl_timestamp = %u",
- htt_stats_buf->last_sched_cmd_compl_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u",
- htt_stats_buf->sched_2_tac_lwm_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_ring_full = %u",
- htt_stats_buf->sched_2_tac_ring_full);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_post_failure = %u",
- htt_stats_buf->sched_cmd_post_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_active_tids = %u",
- htt_stats_buf->num_active_tids);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ps_schedules = %u",
- htt_stats_buf->num_ps_schedules);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmds_pending = %u",
- htt_stats_buf->sched_cmds_pending);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_register = %u",
- htt_stats_buf->num_tid_register);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_unregister = %u",
- htt_stats_buf->num_tid_unregister);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_qstats_queried = %u",
- htt_stats_buf->num_qstats_queried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qstats_update_pending = %u",
- htt_stats_buf->qstats_update_pending);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_qstats_query_timestamp = %u",
- htt_stats_buf->last_qstats_query_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_cmdq_full = %u",
- htt_stats_buf->num_tqm_cmdq_full);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u",
- htt_stats_buf->num_de_sched_algo_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u",
- htt_stats_buf->num_rt_sched_algo_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u",
- htt_stats_buf->num_tqm_sched_algo_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_sched = %u\n",
- htt_stats_buf->notify_sched);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
- htt_stats_buf->dur_based_sendn_term);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "txq_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+ htt_stats_buf->sched_policy);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_posted_timestamp);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_compl_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+ htt_stats_buf->sched_2_tac_lwm_count);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+ htt_stats_buf->sched_2_tac_ring_full);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+ htt_stats_buf->sched_cmd_post_failure);
+ len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+ htt_stats_buf->num_active_tids);
+ len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+ htt_stats_buf->num_ps_schedules);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+ htt_stats_buf->sched_cmds_pending);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+ htt_stats_buf->num_tid_register);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+ htt_stats_buf->num_tid_unregister);
+ len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+ htt_stats_buf->num_qstats_queried);
+ len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+ htt_stats_buf->qstats_update_pending);
+ len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+ htt_stats_buf->last_qstats_query_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+ htt_stats_buf->num_tqm_cmdq_full);
+ len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_de_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_rt_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_tqm_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n\n",
+ htt_stats_buf->notify_sched);
+ len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n\n",
+ htt_stats_buf->dur_based_sendn_term);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1963,11 +1856,11 @@ static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_timestamp = %u\n",
- htt_stats_buf->current_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
+ htt_stats_buf->current_timestamp);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1986,16 +1879,13 @@ htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char gen_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = min_t(u16, (tag_len >> 2),
HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:\n");
- ARRAY_TO_STRING(gen_mpdu_end_reason, htt_stats_buf->gen_mpdu_end_reason,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_end_reason = %s\n",
- gen_mpdu_end_reason);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->gen_mpdu_end_reason,
+ "gen_mpdu_end_reason", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2014,16 +1904,14 @@ htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char list_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_end_reason,
+ "list_mpdu_end_reason", num_elems, "\n\n");
- ARRAY_TO_STRING(list_mpdu_end_reason, htt_stats_buf->list_mpdu_end_reason,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_end_reason = %s\n",
- list_mpdu_end_reason);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
@@ -2041,16 +1929,13 @@ htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char list_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2),
HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
- ARRAY_TO_STRING(list_mpdu_cnt_hist, htt_stats_buf->list_mpdu_cnt_hist,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist = %s\n",
- list_mpdu_cnt_hist);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_cnt_hist,
+ "list_mpdu_cnt_hist", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2069,69 +1954,69 @@ htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count = %u",
- htt_stats_buf->msdu_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count = %u",
- htt_stats_buf->mpdu_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu = %u",
- htt_stats_buf->remove_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu = %u",
- htt_stats_buf->remove_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl = %u",
- htt_stats_buf->remove_msdu_ttl);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_bar = %u",
- htt_stats_buf->send_bar);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "bar_sync = %u",
- htt_stats_buf->bar_sync);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu = %u",
- htt_stats_buf->notify_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
- htt_stats_buf->sync_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
- htt_stats_buf->write_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_trigger = %u",
- htt_stats_buf->hwsch_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
- htt_stats_buf->ack_tlv_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
- htt_stats_buf->gen_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_list_cmd = %u",
- htt_stats_buf->gen_list_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
- htt_stats_buf->remove_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u",
- htt_stats_buf->remove_mpdu_tried_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
- htt_stats_buf->mpdu_queue_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
- htt_stats_buf->mpdu_head_info_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
- htt_stats_buf->msdu_flow_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
- htt_stats_buf->remove_msdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u",
- htt_stats_buf->remove_msdu_ttl_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
- htt_stats_buf->flush_cache_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
- htt_stats_buf->update_mpduq_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue = %u",
- htt_stats_buf->enqueue);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue_notify = %u",
- htt_stats_buf->enqueue_notify);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_at_head = %u",
- htt_stats_buf->notify_mpdu_at_head);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_state_valid = %u",
- htt_stats_buf->notify_mpdu_state_valid);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify1 = %u",
- htt_stats_buf->sched_udp_notify1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify2 = %u",
- htt_stats_buf->sched_udp_notify2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify1 = %u",
- htt_stats_buf->sched_nonudp_notify1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n",
- htt_stats_buf->sched_nonudp_notify2);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+ htt_stats_buf->msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+ htt_stats_buf->mpdu_count);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+ htt_stats_buf->remove_msdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+ htt_stats_buf->remove_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+ htt_stats_buf->remove_msdu_ttl);
+ len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+ htt_stats_buf->send_bar);
+ len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+ htt_stats_buf->bar_sync);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+ htt_stats_buf->notify_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+ htt_stats_buf->hwsch_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+ htt_stats_buf->gen_list_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_tried_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+ htt_stats_buf->remove_msdu_ttl_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+ htt_stats_buf->enqueue);
+ len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+ htt_stats_buf->enqueue_notify);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+ htt_stats_buf->notify_mpdu_at_head);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+ htt_stats_buf->notify_mpdu_state_valid);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+ htt_stats_buf->sched_udp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+ htt_stats_buf->sched_udp_notify2);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+ htt_stats_buf->sched_nonudp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
+ htt_stats_buf->sched_nonudp_notify2);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2149,23 +2034,23 @@ static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "max_cmdq_id = %u",
- htt_stats_buf->max_cmdq_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u",
- htt_stats_buf->list_mpdu_cnt_hist_intvl);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu = %u",
- htt_stats_buf->add_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty = %u",
- htt_stats_buf->q_empty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty = %u",
- htt_stats_buf->q_not_empty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_notification = %u",
- htt_stats_buf->drop_notification);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "desc_threshold = %u\n",
- htt_stats_buf->desc_threshold);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+ htt_stats_buf->max_cmdq_id);
+ len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+ htt_stats_buf->list_mpdu_cnt_hist_intvl);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+ htt_stats_buf->add_msdu);
+ len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+ htt_stats_buf->q_empty);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+ htt_stats_buf->q_not_empty);
+ len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+ htt_stats_buf->drop_notification);
+ len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n\n",
+ htt_stats_buf->desc_threshold);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2183,13 +2068,13 @@ static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty_failure = %u",
- htt_stats_buf->q_empty_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty_failure = %u",
- htt_stats_buf->q_not_empty_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu_failure = %u\n",
- htt_stats_buf->add_msdu_failure);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+ htt_stats_buf->q_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+ htt_stats_buf->q_not_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
+ htt_stats_buf->add_msdu_failure);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2207,33 +2092,35 @@ static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__cmdq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cmdq_id = %u\n",
- (htt_stats_buf->mac_id__cmdq_id__word & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
- htt_stats_buf->sync_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
- htt_stats_buf->write_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
- htt_stats_buf->gen_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
- htt_stats_buf->mpdu_queue_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
- htt_stats_buf->mpdu_head_info_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
- htt_stats_buf->msdu_flow_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
- htt_stats_buf->remove_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
- htt_stats_buf->remove_msdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
- htt_stats_buf->flush_cache_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
- htt_stats_buf->update_mpduq_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "update_msduq_cmd = %u\n",
- htt_stats_buf->update_msduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_MAC_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "cmdq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_msduq_cmd = %u\n\n",
+ htt_stats_buf->update_msduq_cmd);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2252,20 +2139,20 @@ htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m1_packets = %u",
- htt_stats_buf->m1_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m2_packets = %u",
- htt_stats_buf->m2_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m3_packets = %u",
- htt_stats_buf->m3_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m4_packets = %u",
- htt_stats_buf->m4_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "g1_packets = %u",
- htt_stats_buf->g1_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "g2_packets = %u\n",
- htt_stats_buf->g2_packets);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
+ htt_stats_buf->m1_packets);
+ len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
+ htt_stats_buf->m2_packets);
+ len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
+ htt_stats_buf->m3_packets);
+ len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
+ htt_stats_buf->m4_packets);
+ len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
+ htt_stats_buf->g1_packets);
+ len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n\n",
+ htt_stats_buf->g2_packets);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2284,34 +2171,34 @@ htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bss_peer_not_found = %u",
- htt_stats_buf->ap_bss_peer_not_found);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u",
- htt_stats_buf->ap_bcast_mcast_no_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sta_delete_in_progress = %u",
- htt_stats_buf->sta_delete_in_progress);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ibss_no_bss_peer = %u",
- htt_stats_buf->ibss_no_bss_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_vdev_type = %u",
- htt_stats_buf->invalid_vdev_type);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_ast_peer_entry = %u",
- htt_stats_buf->invalid_ast_peer_entry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_entry_invalid = %u",
- htt_stats_buf->peer_entry_invalid);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ethertype_not_ip = %u",
- htt_stats_buf->ethertype_not_ip);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "eapol_lookup_failed = %u",
- htt_stats_buf->eapol_lookup_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_not_allow_data = %u",
- htt_stats_buf->qpeer_not_allow_data);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_tid_override = %u",
- htt_stats_buf->fse_tid_override);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u",
- htt_stats_buf->ipv6_jumbogram_zero_length);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
- htt_stats_buf->qos_to_non_qos_in_prog);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
+ htt_stats_buf->ap_bss_peer_not_found);
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
+ htt_stats_buf->ap_bcast_mcast_no_peer);
+ len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
+ htt_stats_buf->sta_delete_in_progress);
+ len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
+ htt_stats_buf->ibss_no_bss_peer);
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
+ htt_stats_buf->invalid_vdev_type);
+ len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
+ htt_stats_buf->invalid_ast_peer_entry);
+ len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
+ htt_stats_buf->peer_entry_invalid);
+ len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
+ htt_stats_buf->ethertype_not_ip);
+ len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
+ htt_stats_buf->eapol_lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
+ htt_stats_buf->qpeer_not_allow_data);
+ len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
+ htt_stats_buf->fse_tid_override);
+ len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
+ htt_stats_buf->ipv6_jumbogram_zero_length);
+ len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n\n",
+ htt_stats_buf->qos_to_non_qos_in_prog);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2330,73 +2217,73 @@ htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "arp_packets = %u",
- htt_stats_buf->arp_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "igmp_packets = %u",
- htt_stats_buf->igmp_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dhcp_packets = %u",
- htt_stats_buf->dhcp_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "host_inspected = %u",
- htt_stats_buf->host_inspected);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_included = %u",
- htt_stats_buf->htt_included);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_mcs = %u",
- htt_stats_buf->htt_valid_mcs);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_nss = %u",
- htt_stats_buf->htt_valid_nss);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_preamble_type = %u",
- htt_stats_buf->htt_valid_preamble_type);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_chainmask = %u",
- htt_stats_buf->htt_valid_chainmask);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_guard_interval = %u",
- htt_stats_buf->htt_valid_guard_interval);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_retries = %u",
- htt_stats_buf->htt_valid_retries);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_bw_info = %u",
- htt_stats_buf->htt_valid_bw_info);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_power = %u",
- htt_stats_buf->htt_valid_power);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x",
- htt_stats_buf->htt_valid_key_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_no_encryption = %u",
- htt_stats_buf->htt_valid_no_encryption);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_entry_count = %u",
- htt_stats_buf->fse_entry_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_be = %u",
- htt_stats_buf->fse_priority_be);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_high = %u",
- htt_stats_buf->fse_priority_high);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_low = %u",
- htt_stats_buf->fse_priority_low);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u",
- htt_stats_buf->fse_traffic_ptrn_be);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u",
- htt_stats_buf->fse_traffic_ptrn_over_sub);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u",
- htt_stats_buf->fse_traffic_ptrn_bursty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u",
- htt_stats_buf->fse_traffic_ptrn_interactive);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u",
- htt_stats_buf->fse_traffic_ptrn_periodic);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_alloc = %u",
- htt_stats_buf->fse_hwqueue_alloc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_created = %u",
- htt_stats_buf->fse_hwqueue_created);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u",
- htt_stats_buf->fse_hwqueue_send_to_host);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mcast_entry = %u",
- htt_stats_buf->mcast_entry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "bcast_entry = %u",
- htt_stats_buf->bcast_entry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_update_peer_cache = %u",
- htt_stats_buf->htt_update_peer_cache);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_learning_frame = %u",
- htt_stats_buf->htt_learning_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_invalid_peer = %u",
- htt_stats_buf->fse_invalid_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mec_notify = %u\n",
- htt_stats_buf->mec_notify);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
+ htt_stats_buf->arp_packets);
+ len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
+ htt_stats_buf->igmp_packets);
+ len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
+ htt_stats_buf->dhcp_packets);
+ len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
+ htt_stats_buf->host_inspected);
+ len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
+ htt_stats_buf->htt_included);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
+ htt_stats_buf->htt_valid_mcs);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
+ htt_stats_buf->htt_valid_nss);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
+ htt_stats_buf->htt_valid_preamble_type);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
+ htt_stats_buf->htt_valid_chainmask);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
+ htt_stats_buf->htt_valid_guard_interval);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
+ htt_stats_buf->htt_valid_retries);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
+ htt_stats_buf->htt_valid_bw_info);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
+ htt_stats_buf->htt_valid_power);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
+ htt_stats_buf->htt_valid_key_flags);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
+ htt_stats_buf->htt_valid_no_encryption);
+ len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
+ htt_stats_buf->fse_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
+ htt_stats_buf->fse_priority_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
+ htt_stats_buf->fse_priority_high);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
+ htt_stats_buf->fse_priority_low);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_over_sub);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_bursty);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_interactive);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_periodic);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
+ htt_stats_buf->fse_hwqueue_alloc);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
+ htt_stats_buf->fse_hwqueue_created);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
+ htt_stats_buf->fse_hwqueue_send_to_host);
+ len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
+ htt_stats_buf->mcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
+ htt_stats_buf->bcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
+ htt_stats_buf->htt_update_peer_cache);
+ len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
+ htt_stats_buf->htt_learning_frame);
+ len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
+ htt_stats_buf->fse_invalid_peer);
+ len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n",
+ htt_stats_buf->mec_notify);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2415,24 +2302,24 @@ htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "eok = %u",
- htt_stats_buf->eok);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "classify_done = %u",
- htt_stats_buf->classify_done);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "lookup_failed = %u",
- htt_stats_buf->lookup_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_dhcp = %u",
- htt_stats_buf->send_host_dhcp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_mcast = %u",
- htt_stats_buf->send_host_mcast);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_unknown_dest = %u",
- htt_stats_buf->send_host_unknown_dest);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host = %u",
- htt_stats_buf->send_host);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "status_invalid = %u\n",
- htt_stats_buf->status_invalid);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
+ htt_stats_buf->eok);
+ len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
+ htt_stats_buf->classify_done);
+ len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
+ htt_stats_buf->lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
+ htt_stats_buf->send_host_dhcp);
+ len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
+ htt_stats_buf->send_host_mcast);
+ len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
+ htt_stats_buf->send_host_unknown_dest);
+ len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
+ htt_stats_buf->send_host);
+ len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n",
+ htt_stats_buf->status_invalid);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2451,14 +2338,14 @@ htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_pkts = %u",
- htt_stats_buf->enqueued_pkts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm = %u",
- htt_stats_buf->to_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm_bypass = %u\n",
- htt_stats_buf->to_tqm_bypass);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
+ htt_stats_buf->enqueued_pkts);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
+ htt_stats_buf->to_tqm);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n",
+ htt_stats_buf->to_tqm_bypass);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2477,14 +2364,14 @@ htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "discarded_pkts = %u",
- htt_stats_buf->discarded_pkts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_frames = %u",
- htt_stats_buf->local_frames);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "is_ext_msdu = %u\n",
- htt_stats_buf->is_ext_msdu);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
+ htt_stats_buf->discarded_pkts);
+ len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
+ htt_stats_buf->local_frames);
+ len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n",
+ htt_stats_buf->is_ext_msdu);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2502,17 +2389,17 @@ static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_dummy_frame = %u",
- htt_stats_buf->tcl_dummy_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_dummy_frame = %u",
- htt_stats_buf->tqm_dummy_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_notify_frame = %u",
- htt_stats_buf->tqm_notify_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw2wbm_enq = %u",
- htt_stats_buf->fw2wbm_enq);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_bypass_frame = %u\n",
- htt_stats_buf->tqm_bypass_frame);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
+ htt_stats_buf->tcl_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
+ htt_stats_buf->tqm_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
+ htt_stats_buf->tqm_notify_frame);
+ len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
+ htt_stats_buf->fw2wbm_enq);
+ len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n",
+ htt_stats_buf->tqm_bypass_frame);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2531,24 +2418,13 @@ htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw2wbm_ring_full_hist[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = tag_len >> 2;
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
-
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(fw2wbm_ring_full_hist,
- htt_stats_buf->fw2wbm_ring_full_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "fw2wbm_ring_full_hist = %s\n",
- fw2wbm_ring_full_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER ");
- }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw2wbm_ring_full_hist,
+ "fw2wbm_ring_full_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2566,21 +2442,21 @@ htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *s
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl2fw_entry_count = %u",
- htt_stats_buf->tcl2fw_entry_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "not_to_fw = %u",
- htt_stats_buf->not_to_fw);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u",
- htt_stats_buf->invalid_pdev_vdev_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u",
- htt_stats_buf->tcl_res_invalid_addrx);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm2fw_entry_count = %u",
- htt_stats_buf->wbm2fw_entry_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev = %u\n",
- htt_stats_buf->invalid_pdev);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
+ htt_stats_buf->tcl2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
+ htt_stats_buf->not_to_fw);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
+ htt_stats_buf->invalid_pdev_vdev_peer);
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
+ htt_stats_buf->tcl_res_invalid_addrx);
+ len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
+ htt_stats_buf->wbm2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n\n",
+ htt_stats_buf->invalid_pdev);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2597,52 +2473,51 @@ static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char low_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
- char high_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr = %u",
- htt_stats_buf->base_addr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
- htt_stats_buf->elem_size);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_elems = %u",
- htt_stats_buf->num_elems__prefetch_tail_idx & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_tail_idx = %u",
- (htt_stats_buf->num_elems__prefetch_tail_idx &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "head_idx = %u",
- htt_stats_buf->head_idx__tail_idx & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_idx = %u",
- (htt_stats_buf->head_idx__tail_idx & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_head_idx = %u",
- htt_stats_buf->shadow_head_idx__shadow_tail_idx & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_tail_idx = %u",
- (htt_stats_buf->shadow_head_idx__shadow_tail_idx &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tail_incr = %u",
- htt_stats_buf->num_tail_incr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "lwm_thresh = %u",
- htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwm_thresh = %u",
- (htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "overrun_hit_count = %u",
- htt_stats_buf->overrun_hit_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_hit_count = %u",
- htt_stats_buf->underrun_hit_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "prod_blockwait_count = %u",
- htt_stats_buf->prod_blockwait_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cons_blockwait_count = %u",
- htt_stats_buf->cons_blockwait_count);
-
- ARRAY_TO_STRING(low_wm_hit_count, htt_stats_buf->low_wm_hit_count,
- HTT_STATS_LOW_WM_BINS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "low_wm_hit_count = %s ",
- low_wm_hit_count);
-
- ARRAY_TO_STRING(high_wm_hit_count, htt_stats_buf->high_wm_hit_count,
- HTT_STATS_HIGH_WM_BINS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "high_wm_hit_count = %s\n",
- high_wm_hit_count);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "base_addr = %u\n",
+ htt_stats_buf->base_addr);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_elems = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_NUM_ELEMS,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HEAD_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_TAIL_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_HEAD_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_TAIL_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "num_tail_incr = %u\n",
+ htt_stats_buf->num_tail_incr);
+ len += scnprintf(buf + len, buf_len - len, "lwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_LWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "hwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "overrun_hit_count = %u\n",
+ htt_stats_buf->overrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "underrun_hit_count = %u\n",
+ htt_stats_buf->underrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "prod_blockwait_count = %u\n",
+ htt_stats_buf->prod_blockwait_count);
+ len += scnprintf(buf + len, buf_len - len, "cons_blockwait_count = %u\n",
+ htt_stats_buf->cons_blockwait_count);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->low_wm_hit_count,
+ "low_wm_hit_count", HTT_STATS_LOW_WM_BINS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->high_wm_hit_count,
+ "high_wm_hit_count", HTT_STATS_HIGH_WM_BINS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2660,11 +2535,11 @@ static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
- htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2682,16 +2557,12 @@ static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char dwords_used_by_user_n[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = tag_len >> 2;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:\n");
- ARRAY_TO_STRING(dwords_used_by_user_n,
- htt_stats_buf->dwords_used_by_user_n,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dwords_used_by_user_n = %s\n",
- dwords_used_by_user_n);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dwords_used_by_user_n,
+ "dwords_used_by_user_n", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2709,21 +2580,21 @@ static inline void htt_print_sfm_client_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "client_id = %u",
- htt_stats_buf->client_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_min = %u",
- htt_stats_buf->buf_min);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_max = %u",
- htt_stats_buf->buf_max);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_busy = %u",
- htt_stats_buf->buf_busy);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_alloc = %u",
- htt_stats_buf->buf_alloc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_avail = %u",
- htt_stats_buf->buf_avail);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_users = %u\n",
- htt_stats_buf->num_users);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "client_id = %u\n",
+ htt_stats_buf->client_id);
+ len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n",
+ htt_stats_buf->buf_min);
+ len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n",
+ htt_stats_buf->buf_max);
+ len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n",
+ htt_stats_buf->buf_busy);
+ len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n",
+ htt_stats_buf->buf_alloc);
+ len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n",
+ htt_stats_buf->buf_avail);
+ len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n",
+ htt_stats_buf->num_users);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2741,17 +2612,17 @@ static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_total = %u",
- htt_stats_buf->buf_total);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mem_empty = %u",
- htt_stats_buf->mem_empty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "deallocate_bufs = %u",
- htt_stats_buf->deallocate_bufs);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
- htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n",
+ htt_stats_buf->buf_total);
+ len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n",
+ htt_stats_buf->mem_empty);
+ len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n",
+ htt_stats_buf->deallocate_bufs);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2769,42 +2640,51 @@ static inline void htt_print_sring_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_id = %u",
- (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "arena = %u",
- (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ep = %u",
- (htt_stats_buf->mac_id__ring_id__arena__ep & 0x1000000) >> 24);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_lsb = 0x%x",
- htt_stats_buf->base_addr_lsb);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_msb = 0x%x",
- htt_stats_buf->base_addr_msb);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_size = %u",
- htt_stats_buf->ring_size);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
- htt_stats_buf->elem_size);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_avail_words = %u",
- htt_stats_buf->num_avail_words__num_valid_words & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_valid_words = %u",
- (htt_stats_buf->num_avail_words__num_valid_words &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "head_ptr = %u",
- htt_stats_buf->head_ptr__tail_ptr & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_ptr = %u",
- (htt_stats_buf->head_ptr__tail_ptr & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "consumer_empty = %u",
- htt_stats_buf->consumer_empty__producer_full & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "producer_full = %u",
- (htt_stats_buf->consumer_empty__producer_full &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_count = %u",
- htt_stats_buf->prefetch_count__internal_tail_ptr & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "internal_tail_ptr = %u\n",
- (htt_stats_buf->prefetch_count__internal_tail_ptr &
- 0xFFFF0000) >> 16);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_MAC_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ring_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_RING_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "arena = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_ARENA,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ep = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_EP,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n",
+ htt_stats_buf->base_addr_lsb);
+ len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n",
+ htt_stats_buf->base_addr_msb);
+ len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n",
+ htt_stats_buf->ring_size);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_avail_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_AVAIL_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "num_valid_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_VALID_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "head_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_HEAD_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "tail_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_TAIL_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "consumer_empty = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_CONSUMER_EMPTY,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "producer_full = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PRODUCER_FULL,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_count = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PREFETCH_COUNT,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %lu\n\n",
+ FIELD_GET(HTT_SRING_STATS_INTERNAL_TAIL_PTR,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2822,9 +2702,9 @@ static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
- htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2842,165 +2722,115 @@ static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 j;
- char str_buf[HTT_MAX_STRING_LEN] = {0};
- char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
-
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!tx_gi[j])
- goto fail;
- }
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
- htt_stats_buf->tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u",
- htt_stats_buf->ac_mu_mimo_tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u",
- htt_stats_buf->ax_mu_mimo_tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_ldpc = %u",
- htt_stats_buf->ofdma_tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_success = %u",
- htt_stats_buf->rts_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
- htt_stats_buf->ack_rssi);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u",
- htt_stats_buf->tx_legacy_cck_rate[0],
- htt_stats_buf->tx_legacy_cck_rate[1],
- htt_stats_buf->tx_legacy_cck_rate[2],
- htt_stats_buf->tx_legacy_cck_rate[3]);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
- " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u",
- htt_stats_buf->tx_legacy_ofdm_rate[0],
- htt_stats_buf->tx_legacy_ofdm_rate[1],
- htt_stats_buf->tx_legacy_ofdm_rate[2],
- htt_stats_buf->tx_legacy_ofdm_rate[3],
- htt_stats_buf->tx_legacy_ofdm_rate[4],
- htt_stats_buf->tx_legacy_ofdm_rate[5],
- htt_stats_buf->tx_legacy_ofdm_rate[6],
- htt_stats_buf->tx_legacy_ofdm_rate[7]);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
- HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u",
- htt_stats_buf->tx_he_ltf[1],
- htt_stats_buf->tx_he_ltf[2],
- htt_stats_buf->tx_he_ltf[3]);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ac_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ax_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
+ htt_stats_buf->ofdma_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+ htt_stats_buf->rts_success);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_cck_rate[0],
+ htt_stats_buf->tx_legacy_cck_rate[1],
+ htt_stats_buf->tx_legacy_cck_rate[2],
+ htt_stats_buf->tx_legacy_cck_rate[3]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_ofdm_rate[0],
+ htt_stats_buf->tx_legacy_ofdm_rate[1],
+ htt_stats_buf->tx_legacy_ofdm_rate[2],
+ htt_stats_buf->tx_legacy_ofdm_rate[3],
+ htt_stats_buf->tx_legacy_ofdm_rate[4],
+ htt_stats_buf->tx_legacy_ofdm_rate[5],
+ htt_stats_buf->tx_legacy_ofdm_rate[6],
+ htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_mcs,
+ "ac_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs,
+ "ax_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs, "ofdma_tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_nss,
+ "ac_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_nss,
+ "ax_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_nss, "ofdma_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_bw,
+ "ac_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_bw,
+ "ax_mu_mimo_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_bw, "ofdma_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
+ htt_stats_buf->tx_he_ltf[1],
+ htt_stats_buf->tx_he_ltf[2],
+ htt_stats_buf->tx_he_ltf[3]);
/* SU GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* AC MU-MIMO GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ac_mu_mimo_tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "ac_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* AX MU-MIMO GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ax_mu_mimo_tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "ax_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* DL OFDMA GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ofdma_tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "ofdma_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_dcm,
- HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3008,9 +2838,6 @@ static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-fail:
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
- kfree(tx_gi[j]);
}
static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
@@ -3021,226 +2848,168 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i, j;
- u16 index = 0;
- char *rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
- char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] = {NULL};
- char str_buf[HTT_MAX_STRING_LEN] = {0};
- char *rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rssi_chain[j])
- goto fail;
- }
-
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rx_gi[j])
- goto fail;
- }
-
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- rx_pilot_evm_db[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rx_pilot_evm_db[j])
- goto fail;
- }
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
- htt_stats_buf->nsts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
- htt_stats_buf->rx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
- htt_stats_buf->rssi_mgmt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
- htt_stats_buf->rssi_data);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
- htt_stats_buf->rssi_comb);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_in_dbm = %d",
- htt_stats_buf->rssi_in_dbm);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
- HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
- HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_nss_count = %u",
- htt_stats_buf->nss_count);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_pilot_count = %u",
- htt_stats_buf->pilot_count);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+ len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
+ htt_stats_buf->rssi_in_dbm);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
+ htt_stats_buf->nss_count);
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
+ htt_stats_buf->pilot_count);
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- index = 0;
-
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
- index += scnprintf(&rx_pilot_evm_db[j][index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,",
- i,
- htt_stats_buf->rx_pilot_evm_db[j][i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB[%u] = %s ",
- j, rx_pilot_evm_db[j]);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_pilot_evm_db[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
}
- index = 0;
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db_mean = ");
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
- index += scnprintf(&str_buf[index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB_mean = %s ", str_buf);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,", i,
+ htt_stats_buf->rx_pilot_evm_db_mean[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
- j, rssi_chain[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
}
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
- j, rx_gi[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
- HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s", str_buf);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_su_ext = %u",
- htt_stats_buf->rx_11ax_su_ext);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ac_mumimo = %u",
- htt_stats_buf->rx_11ac_mumimo);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_mumimo = %u",
- htt_stats_buf->rx_11ax_mumimo);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ofdma = %u",
- htt_stats_buf->rx_11ax_ofdma);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txbf = %u",
- htt_stats_buf->txbf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_cck_rate,
- HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_cck_rate = %s ",
- str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_ofdm_rate,
- HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_ofdm_rate = %s ",
- str_buf);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_low = %u",
- htt_stats_buf->rx_active_dur_us_low);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_high = %u",
- htt_stats_buf->rx_active_dur_us_high);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u",
- htt_stats_buf->rx_11ax_ul_ofdma);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_mcs,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_mcs = %s ", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
+ htt_stats_buf->rx_11ax_su_ext);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
+ htt_stats_buf->rx_11ac_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
+ htt_stats_buf->rx_11ax_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ofdma);
+ len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
+ htt_stats_buf->txbf);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
+ "rx_legacy_cck_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_ofdm_rate,
+ "rx_legacy_ofdm_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
+ htt_stats_buf->rx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
+ htt_stats_buf->rx_active_dur_us_high);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ul_ofdma);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs,
+ "ul_ofdma_rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->ul_ofdma_rx_gi[j],
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u] = %s ",
- j, rx_gi[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_nss,
- HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_nss = %s ", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_nss,
+ "ul_ofdma_rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_bw,
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_bw = %s ", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_bw, "ul_ofdma_rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u",
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
htt_stats_buf->ul_ofdma_rx_stbc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u",
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
htt_stats_buf->ul_ofdma_rx_ldpc);
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_non_data_ppdu,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu = %s ",
- str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_data_ppdu,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_data_ppdu = %s ",
- str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_ok,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_fail,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail = %s",
- str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ "rx_ulofdma_non_data_ppdu",
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_ppdu,
+ "rx_ulofdma_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_ok,
+ "rx_ulofdma_mpdu_ok", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_fail,
+ "rx_ulofdma_mpdu_fail", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- index = 0;
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
- index += scnprintf(&str_buf[index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,",
- i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "rx_ul_fd_rssi: nss[%u] = %s", j, str_buf);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
}
- len += HTT_DBG_OUT(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x",
- htt_stats_buf->per_chain_rssi_pkt_type);
+ len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
+ htt_stats_buf->per_chain_rssi_pkt_type);
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- index = 0;
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
- index += scnprintf(&str_buf[index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,",
- i,
- htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "rx_per_chain_rssi_in_dbm[%u] = %s ", j, str_buf);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
}
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3248,16 +3017,6 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-
-fail:
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
- kfree(rssi_chain[j]);
-
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
- kfree(rx_pilot_evm_db[j]);
-
- for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++)
- kfree(rx_gi[i]);
}
static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
@@ -3268,34 +3027,34 @@ static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u",
- htt_stats_buf->fw_reo_ring_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u",
- htt_stats_buf->fw_to_host_data_msdu_bcmc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u",
- htt_stats_buf->fw_to_host_data_msdu_uc);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_remote_data_buf_recycle_cnt = %u",
- htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_remote_free_buf_indication_cnt = %u",
- htt_stats_buf->ofld_remote_free_buf_indication_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_buf_to_host_data_msdu_uc = %u",
- htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "reo_fw_ring_to_host_data_msdu_uc = %u",
- htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_sw_ring_reap = %u",
- htt_stats_buf->wbm_sw_ring_reap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u",
- htt_stats_buf->wbm_forward_to_host_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u",
- htt_stats_buf->wbm_target_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "target_refill_ring_recycle_cnt = %u",
- htt_stats_buf->target_refill_ring_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u\n",
+ htt_stats_buf->fw_reo_ring_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_bcmc);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_free_buf_indication_cnt = %u\n",
+ htt_stats_buf->ofld_remote_free_buf_indication_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_buf_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "reo_fw_ring_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len, "wbm_sw_ring_reap = %u\n",
+ htt_stats_buf->wbm_sw_ring_reap);
+ len += scnprintf(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u\n",
+ htt_stats_buf->wbm_forward_to_host_cnt);
+ len += scnprintf(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u\n",
+ htt_stats_buf->wbm_target_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "target_refill_ring_recycle_cnt = %u\n",
+ htt_stats_buf->target_refill_ring_recycle_cnt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3314,17 +3073,13 @@ htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char refill_ring_empty_cnt[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:\n");
- ARRAY_TO_STRING(refill_ring_empty_cnt,
- htt_stats_buf->refill_ring_empty_cnt,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_empty_cnt = %s\n",
- refill_ring_empty_cnt);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_empty_cnt,
+ "refill_ring_empty_cnt", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3344,17 +3099,13 @@ htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char rxdma_err_cnt[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:\n");
- ARRAY_TO_STRING(rxdma_err_cnt,
- htt_stats_buf->rxdma_err,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdma_err = %s\n",
- rxdma_err_cnt);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rxdma_err, "rxdma_err",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3373,17 +3124,13 @@ htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char reo_err_cnt[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:\n");
- ARRAY_TO_STRING(reo_err_cnt,
- htt_stats_buf->reo_err,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "reo_err = %s\n",
- reo_err_cnt);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reo_err, "reo_err",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3402,27 +3149,27 @@ htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sample_id = %u",
- htt_stats_buf->sample_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_max = %u",
- htt_stats_buf->total_max);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_avg = %u",
- htt_stats_buf->total_avg);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_sample = %u",
- htt_stats_buf->total_sample);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_avg = %u",
- htt_stats_buf->non_zeros_avg);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_sample = %u",
- htt_stats_buf->non_zeros_sample);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_max = %u",
- htt_stats_buf->last_non_zeros_max);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_min %u",
- htt_stats_buf->last_non_zeros_min);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_avg %u",
- htt_stats_buf->last_non_zeros_avg);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_sample %u\n",
- htt_stats_buf->last_non_zeros_sample);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sample_id = %u\n",
+ htt_stats_buf->sample_id);
+ len += scnprintf(buf + len, buf_len - len, "total_max = %u\n",
+ htt_stats_buf->total_max);
+ len += scnprintf(buf + len, buf_len - len, "total_avg = %u\n",
+ htt_stats_buf->total_avg);
+ len += scnprintf(buf + len, buf_len - len, "total_sample = %u\n",
+ htt_stats_buf->total_sample);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_avg = %u\n",
+ htt_stats_buf->non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_sample = %u\n",
+ htt_stats_buf->non_zeros_sample);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_max = %u\n",
+ htt_stats_buf->last_non_zeros_max);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_min %u\n",
+ htt_stats_buf->last_non_zeros_min);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_avg %u\n",
+ htt_stats_buf->last_non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_sample %u\n\n",
+ htt_stats_buf->last_non_zeros_sample);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3441,17 +3188,13 @@ htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char refill_ring_num_refill[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:\n");
- ARRAY_TO_STRING(refill_ring_num_refill,
- htt_stats_buf->refill_ring_num_refill,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_num_refill = %s\n",
- refill_ring_num_refill);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_num_refill,
+ "refill_ring_num_refill", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3468,113 +3211,106 @@ static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw_ring_mgmt_subtype[HTT_MAX_STRING_LEN] = {0};
- char fw_ring_ctrl_subtype[HTT_MAX_STRING_LEN] = {0};
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_recvd = %u",
- htt_stats_buf->ppdu_recvd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u",
- htt_stats_buf->mpdu_cnt_fcs_ok);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u",
- htt_stats_buf->mpdu_cnt_fcs_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_msdu_cnt = %u",
- htt_stats_buf->tcp_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u",
- htt_stats_buf->tcp_ack_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "udp_msdu_cnt = %u",
- htt_stats_buf->udp_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "other_msdu_cnt = %u",
- htt_stats_buf->other_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u",
- htt_stats_buf->fw_ring_mpdu_ind);
-
- ARRAY_TO_STRING(fw_ring_mgmt_subtype,
- htt_stats_buf->fw_ring_mgmt_subtype,
- HTT_STATS_SUBTYPE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mgmt_subtype = %s ",
- fw_ring_mgmt_subtype);
-
- ARRAY_TO_STRING(fw_ring_ctrl_subtype,
- htt_stats_buf->fw_ring_ctrl_subtype,
- HTT_STATS_SUBTYPE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ctrl_subtype = %s ",
- fw_ring_ctrl_subtype);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u",
- htt_stats_buf->fw_ring_mcast_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u",
- htt_stats_buf->fw_ring_bcast_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u",
- htt_stats_buf->fw_ring_ucast_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u",
- htt_stats_buf->fw_ring_null_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u",
- htt_stats_buf->fw_ring_mpdu_drop);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u",
- htt_stats_buf->ofld_local_data_ind_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_local_data_buf_recycle_cnt = %u",
- htt_stats_buf->ofld_local_data_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u",
- htt_stats_buf->drx_local_data_ind_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "drx_local_data_buf_recycle_cnt = %u",
- htt_stats_buf->drx_local_data_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_ind_cnt = %u",
- htt_stats_buf->local_nondata_ind_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u",
- htt_stats_buf->local_nondata_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u",
- htt_stats_buf->fw_status_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u",
- htt_stats_buf->fw_status_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u",
- htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u",
- htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u",
- htt_stats_buf->fw_link_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u",
- htt_stats_buf->fw_link_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u",
- htt_stats_buf->host_pkt_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u",
- htt_stats_buf->host_pkt_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u",
- htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u",
- htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "mon_status_buf_ring_refill_cnt = %u",
- htt_stats_buf->mon_status_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u",
- htt_stats_buf->mon_status_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u",
- htt_stats_buf->mon_desc_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u",
- htt_stats_buf->mon_desc_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u",
- htt_stats_buf->mon_dest_ring_update_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u",
- htt_stats_buf->mon_dest_ring_full_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_cnt = %u",
- htt_stats_buf->rx_suspend_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u",
- htt_stats_buf->rx_suspend_fail_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_cnt = %u",
- htt_stats_buf->rx_resume_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_fail_cnt = %u",
- htt_stats_buf->rx_resume_fail_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_switch_cnt = %u",
- htt_stats_buf->rx_ring_switch_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_restore_cnt = %u",
- htt_stats_buf->rx_ring_restore_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_flush_cnt = %u",
- htt_stats_buf->rx_flush_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n",
- htt_stats_buf->rx_recovery_reset_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n",
+ htt_stats_buf->ppdu_recvd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_ok);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_err);
+ len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_ack_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n",
+ htt_stats_buf->udp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n",
+ htt_stats_buf->other_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n",
+ htt_stats_buf->fw_ring_mpdu_ind);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mgmt_subtype,
+ "fw_ring_mgmt_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_ctrl_subtype,
+ "fw_ring_ctrl_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_mcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_bcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_ucast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_null_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n",
+ htt_stats_buf->fw_ring_mpdu_drop);
+ len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n",
+ htt_stats_buf->drx_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "drx_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->drx_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n",
+ htt_stats_buf->local_nondata_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n",
+ htt_stats_buf->local_nondata_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "mon_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_update_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_full_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n",
+ htt_stats_buf->rx_suspend_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n",
+ htt_stats_buf->rx_suspend_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n",
+ htt_stats_buf->rx_resume_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n",
+ htt_stats_buf->rx_resume_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n",
+ htt_stats_buf->rx_ring_switch_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n",
+ htt_stats_buf->rx_ring_restore_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ htt_stats_buf->rx_flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n\n",
+ htt_stats_buf->rx_recovery_reset_cnt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3592,16 +3328,12 @@ htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw_ring_mpdu_err[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:\n");
- ARRAY_TO_STRING(fw_ring_mpdu_err,
- htt_stats_buf->fw_ring_mpdu_err,
- HTT_RX_STATS_RXDMA_MAX_ERR);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_err = %s\n",
- fw_ring_mpdu_err);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mpdu_err,
+ "fw_ring_mpdu_err", HTT_RX_STATS_RXDMA_MAX_ERR, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3620,15 +3352,12 @@ htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw_mpdu_drop[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:\n");
- ARRAY_TO_STRING(fw_mpdu_drop,
- htt_stats_buf->fw_mpdu_drop,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_mpdu_drop = %s\n", fw_mpdu_drop);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_mpdu_drop, "fw_mpdu_drop",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3646,18 +3375,15 @@ htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char phy_errs[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id__word = %u",
- htt_stats_buf->mac_id__word);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_phy_err_nct = %u",
- htt_stats_buf->total_phy_err_cnt);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id__word = %u\n",
+ htt_stats_buf->mac_id__word);
+ len += scnprintf(buf + len, buf_len - len, "total_phy_err_nct = %u\n",
+ htt_stats_buf->total_phy_err_cnt);
- ARRAY_TO_STRING(phy_errs,
- htt_stats_buf->phy_err,
- HTT_STATS_PHY_ERR_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_err, "phy_errs",
+ HTT_STATS_PHY_ERR_MAX, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3676,20 +3402,20 @@ htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "chan_num = %u",
- htt_stats_buf->chan_num);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u",
- htt_stats_buf->num_records);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x",
- htt_stats_buf->valid_cca_counters_bitmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "collection_interval = %u\n",
- htt_stats_buf->collection_interval);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|");
+ len += scnprintf(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ htt_stats_buf->chan_num);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n",
+ htt_stats_buf->valid_cca_counters_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n",
+ htt_stats_buf->collection_interval);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3708,16 +3434,16 @@ htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|",
- htt_stats_buf->tx_frame_usec,
- htt_stats_buf->rx_frame_usec,
- htt_stats_buf->rx_clear_usec,
- htt_stats_buf->my_rx_frame_usec,
- htt_stats_buf->usec_cnt,
- htt_stats_buf->med_rx_idle_usec,
- htt_stats_buf->med_tx_idle_global_usec,
- htt_stats_buf->cca_obss_usec);
+ len += scnprintf(buf + len, buf_len - len,
+ "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|\n",
+ htt_stats_buf->tx_frame_usec,
+ htt_stats_buf->rx_frame_usec,
+ htt_stats_buf->rx_clear_usec,
+ htt_stats_buf->my_rx_frame_usec,
+ htt_stats_buf->usec_cnt,
+ htt_stats_buf->med_rx_idle_usec,
+ htt_stats_buf->med_tx_idle_global_usec,
+ htt_stats_buf->cca_obss_usec);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3735,32 +3461,32 @@ static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_unpause_ppdu_id = %u",
- htt_stats_buf->last_unpause_ppdu_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u",
- htt_stats_buf->hwsch_unpause_wait_tqm_write);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u",
- htt_stats_buf->hwsch_dummy_tlv_skipped);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "hwsch_misaligned_offset_received = %u",
- htt_stats_buf->hwsch_misaligned_offset_received);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_reset_count = %u",
- htt_stats_buf->hwsch_reset_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dev_reset_war = %u",
- htt_stats_buf->hwsch_dev_reset_war);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_delayed_pause = %u",
- htt_stats_buf->hwsch_delayed_pause);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u",
- htt_stats_buf->hwsch_long_delayed_pause);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u",
- htt_stats_buf->sch_rx_ppdu_no_response);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_selfgen_response = %u",
- htt_stats_buf->sch_selfgen_response);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n",
- htt_stats_buf->sch_rx_sifs_resp_trigger);
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+ htt_stats_buf->last_unpause_ppdu_id);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+ htt_stats_buf->hwsch_unpause_wait_tqm_write);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+ htt_stats_buf->hwsch_dummy_tlv_skipped);
+ len += scnprintf(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u\n",
+ htt_stats_buf->hwsch_misaligned_offset_received);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+ htt_stats_buf->hwsch_reset_count);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+ htt_stats_buf->hwsch_dev_reset_war);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_long_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+ htt_stats_buf->sch_rx_ppdu_no_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+ htt_stats_buf->sch_selfgen_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
+ htt_stats_buf->sch_rx_sifs_resp_trigger);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3779,11 +3505,11 @@ htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
- htt_stats_buf->pdev_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sessions = %u\n",
- htt_stats_buf->num_sessions);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "num_sessions = %u\n\n",
+ htt_stats_buf->num_sessions);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3802,27 +3528,33 @@ htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
- htt_stats_buf->vdev_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "peer_mac = %02x:%02x:%02x:%02x:%02x:%02x",
- htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF,
- (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF00) >> 8,
- (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF0000) >> 16,
- (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF000000) >> 24,
- (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF),
- (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flow_id_flags = %u",
- htt_stats_buf->flow_id_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dialog_id = %u",
- htt_stats_buf->dialog_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_dura_us = %u",
- htt_stats_buf->wake_dura_us);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_intvl_us = %u",
- htt_stats_buf->wake_intvl_us);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sp_offset_us = %u\n",
- htt_stats_buf->sp_offset_us);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+ htt_stats_buf->vdev_id);
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->peer_mac.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->peer_mac.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "flow_id_flags = %u\n",
+ htt_stats_buf->flow_id_flags);
+ len += scnprintf(buf + len, buf_len - len, "dialog_id = %u\n",
+ htt_stats_buf->dialog_id);
+ len += scnprintf(buf + len, buf_len - len, "wake_dura_us = %u\n",
+ htt_stats_buf->wake_dura_us);
+ len += scnprintf(buf + len, buf_len - len, "wake_intvl_us = %u\n",
+ htt_stats_buf->wake_intvl_us);
+ len += scnprintf(buf + len, buf_len - len, "sp_offset_us = %u\n\n",
+ htt_stats_buf->sp_offset_us);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3841,21 +3573,21 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx success PPDU = %u",
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx success PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
htt_stats_buf->num_non_srg_opportunities);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_tried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunities = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "SRG Opportunities = %u\n",
htt_stats_buf->num_srg_opportunities);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
htt_stats_buf->num_srg_ppdu_tried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG success PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "SRG success PPDU = %u\n\n",
htt_stats_buf->num_srg_ppdu_success);
if (len >= buf_len)
@@ -3878,25 +3610,25 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
- htt_stats_buf->pdev_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_head_idx = %u",
- htt_stats_buf->current_head_idx);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_tail_idx = %u",
- htt_stats_buf->current_tail_idx);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_htt_msgs_sent = %u",
- htt_stats_buf->num_htt_msgs_sent);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "backpressure_time_ms = %u",
- htt_stats_buf->backpressure_time_ms);
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "current_head_idx = %u\n",
+ htt_stats_buf->current_head_idx);
+ len += scnprintf(buf + len, buf_len - len, "current_tail_idx = %u\n",
+ htt_stats_buf->current_tail_idx);
+ len += scnprintf(buf + len, buf_len - len, "num_htt_msgs_sent = %u\n",
+ htt_stats_buf->num_htt_msgs_sent);
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_time_ms = %u\n",
+ htt_stats_buf->backpressure_time_ms);
for (i = 0; i < 5; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "backpressure_hist_%u = %u",
- i + 1, htt_stats_buf->backpressure_hist[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_hist_%u = %u\n",
+ i + 1, htt_stats_buf->backpressure_hist[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "============================");
+ len += scnprintf(buf + len, buf_len - len,
+ "============================\n");
if (len >= buf_len) {
buf[buf_len - 1] = 0;
@@ -3907,6 +3639,334 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
}
}
+static inline
+void htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "tx_ol_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_mcs =");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndpa_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndpa_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDPA_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_brp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_brp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_BRP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_num_cbf_rcvd_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err_num_cbf_rcvd[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_steer_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_steer_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_STEER_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_steer_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_steer[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_ol_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_ol[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_prefetch_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_prefetch[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_sound[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_force_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_force_sound[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ htt_stats_buf->rx_ofdma_timing_err_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ htt_stats_buf->rx_cck_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ htt_stats_buf->mactx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ htt_stats_buf->macrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ htt_stats_buf->phytx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_defer_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_lstf_event_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_non_legacy_cnt);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len, "rx_pkt_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CRC_PASS_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_pkt_crc_pass_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_crc_pass_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_PER_BLK_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "per_blk_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->per_blk_err_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_OTA_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ota_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_ota_err_cnt[i]);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "nf_chain[%d] = %d\n",
+ i, htt_stats_buf->nf_chain[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u\n",
+ htt_stats_buf->false_radar_cnt);
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ htt_stats_buf->radar_cs_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n",
+ htt_stats_buf->ani_level);
+ len += scnprintf(buf + len, buf_len - len, "fw_run_time = %u\n",
+ htt_stats_buf->fw_run_time);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_ctrl_path_txrx_stats_tlv *htt_stat_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+ const char *mgmt_frm_type[ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1] = {
+ "assoc_req", "assoc_resp",
+ "reassoc_req", "reassoc_resp",
+ "probe_req", "probe_resp",
+ "timing_advertisement", "reserved",
+ "beacon", "atim", "disassoc",
+ "auth", "deauth", "action", "action_no_ack"};
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ htt_stat_buf->peer_mac_addr[0], htt_stat_buf->peer_mac_addr[1],
+ htt_stat_buf->peer_mac_addr[2], htt_stat_buf->peer_mac_addr[3],
+ htt_stat_buf->peer_mac_addr[4], htt_stat_buf->peer_mac_addr[5]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_tx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_rx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -4258,6 +4318,30 @@ static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
case HTT_STATS_RING_BACKPRESSURE_STATS_TAG:
htt_print_backpressure_stats_tlv_v(tag_buf, user_data);
break;
+ case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+ htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG:
+ htt_print_txbf_ofdma_ndpa_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG:
+ htt_print_txbf_ofdma_ndp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG:
+ htt_print_txbf_ofdma_brp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG:
+ htt_print_txbf_ofdma_steer_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_COUNTERS_TAG:
+ htt_print_phy_counters_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_STATS_TAG:
+ htt_print_phy_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:
+ htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req);
+ break;
default:
break;
}
@@ -4345,8 +4429,7 @@ static ssize_t ath11k_write_htt_stats_type(struct file *file,
if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS)
return -E2BIG;
- if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
- type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO)
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
return -EPERM;
ar->debug.htt_stats.type = type;
@@ -4407,6 +4490,15 @@ static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
default:
break;
}
@@ -4464,7 +4556,9 @@ static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
u8 type = ar->debug.htt_stats.type;
int ret;
- if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS)
return -EPERM;
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index d428f52003a4..dc210c54d131 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -102,6 +102,14 @@ enum htt_tlv_tag_t {
HTT_STATS_PDEV_OBSS_PD_TAG = 88,
HTT_STATS_HW_WAR_TAG = 89,
HTT_STATS_RING_BACKPRESSURE_STATS_TAG = 90,
+ HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG = 101,
+ HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
+ HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG = 113,
+ HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG = 114,
+ HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG = 115,
+ HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG = 116,
+ HTT_STATS_PHY_COUNTERS_TAG = 121,
+ HTT_STATS_PHY_STATS_TAG = 122,
HTT_STATS_MAX_TAG,
};
@@ -137,6 +145,8 @@ struct htt_stats_string_tlv {
u32 data[0]; /* Can be variable length */
} __packed;
+#define HTT_STATS_MAC_ID GENMASK(7, 0)
+
/* == TX PDEV STATS == */
struct htt_tx_pdev_stats_cmn_tlv {
u32 mac_id__word;
@@ -290,6 +300,10 @@ struct htt_hw_stats_whal_tx_tlv {
};
/* ============ PEER STATS ============ */
+#define HTT_MSDU_FLOW_STATS_TX_FLOW_NO GENMASK(15, 0)
+#define HTT_MSDU_FLOW_STATS_TID_NUM GENMASK(19, 16)
+#define HTT_MSDU_FLOW_STATS_DROP_RULE BIT(20)
+
struct htt_msdu_flow_stats_tlv {
u32 last_update_timestamp;
u32 last_add_timestamp;
@@ -306,6 +320,11 @@ struct htt_msdu_flow_stats_tlv {
#define MAX_HTT_TID_NAME 8
+#define HTT_TX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
/* Tidq stats */
struct htt_tx_tid_stats_tlv {
/* Stored as little endian */
@@ -326,6 +345,11 @@ struct htt_tx_tid_stats_tlv {
u32 tid_tx_airtime;
};
+#define HTT_TX_TID_STATS_V1_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_V1_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
/* Tidq stats */
struct htt_tx_tid_stats_v1_tlv {
/* Stored as little endian */
@@ -348,6 +372,9 @@ struct htt_tx_tid_stats_v1_tlv {
u32 sendn_frms_allowed;
};
+#define HTT_RX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_RX_TID_STATS_TID_NUM GENMASK(31, 16)
+
struct htt_rx_tid_stats_tlv {
u32 sw_peer_id__tid_num;
u8 tid_name[MAX_HTT_TID_NAME];
@@ -386,6 +413,10 @@ struct htt_peer_stats_cmn_tlv {
u32 inactive_time;
};
+#define HTT_PEER_DETAILS_VDEV_ID GENMASK(7, 0)
+#define HTT_PEER_DETAILS_PDEV_ID GENMASK(15, 8)
+#define HTT_PEER_DETAILS_AST_IDX GENMASK(31, 16)
+
struct htt_peer_details_tlv {
u32 peer_type;
u32 sw_peer_id;
@@ -510,6 +541,9 @@ struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv {
u32 mu_mimo_ampdu_underrun_usr;
};
+#define HTT_TX_HWQ_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_HWQ_STATS_HWQ_ID GENMASK(15, 8)
+
struct htt_tx_hwq_mu_mimo_cmn_stats_tlv {
u32 mac_id__hwq_id__word;
};
@@ -789,6 +823,9 @@ struct htt_sched_txq_sched_ineligibility_tlv_v {
u32 sched_ineligibility[0];
};
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
+
struct htt_tx_pdev_stats_sched_per_txq_tlv {
u32 mac_id__txq_id__word;
u32 sched_policy;
@@ -910,6 +947,9 @@ struct htt_tx_tqm_error_stats_tlv {
};
/* == TQM CMDQ stats == */
+#define HTT_TX_TQM_CMDQ_STATUS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID GENMASK(15, 8)
+
struct htt_tx_tqm_cmdq_status_tlv {
u32 mac_id__cmdq_id__word;
u32 sync_cmd;
@@ -1055,6 +1095,15 @@ struct htt_tx_de_cmn_stats_tlv {
#define HTT_STATS_LOW_WM_BINS 5
#define HTT_STATS_HIGH_WM_BINS 5
+#define HTT_RING_IF_STATS_NUM_ELEMS GENMASK(15, 0)
+#define HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_SHADOW_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_SHADOW_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_LWM_THRESH GENMASK(15, 0)
+#define HTT_RING_IF_STATS_HWM_THRESH GENMASK(31, 16)
+
struct htt_ring_if_stats_tlv {
u32 base_addr; /* DWORD aligned base memory address of the ring */
u32 elem_size;
@@ -1117,6 +1166,19 @@ struct htt_sfm_cmn_tlv {
};
/* == SRNG STATS == */
+#define HTT_SRING_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_SRING_STATS_RING_ID GENMASK(15, 8)
+#define HTT_SRING_STATS_ARENA GENMASK(23, 16)
+#define HTT_SRING_STATS_EP BIT(24)
+#define HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0)
+#define HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16)
+#define HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0)
+#define HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16)
+#define HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0)
+#define HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16)
+#define HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0)
+#define HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16)
+
struct htt_sring_stats_tlv {
u32 mac_id__ring_id__arena__ep;
u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */
@@ -1696,6 +1758,170 @@ struct htt_ring_backpressure_stats_tlv {
u32 backpressure_hist[5];
};
+#define HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS 14
+#define HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+
+struct htt_pdev_txrate_txbf_stats_tlv {
+ /* SU TxBF TX MCS stats */
+ u32 tx_su_txbf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Implicit BF TX MCS stats */
+ u32 tx_su_ibf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Open loop TX MCS stats */
+ u32 tx_su_ol_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* SU TxBF TX NSS stats */
+ u32 tx_su_txbf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Implicit BF TX NSS stats */
+ u32 tx_su_ibf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Open loop TX NSS stats */
+ u32 tx_su_ol_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* SU TxBF TX BW stats */
+ u32 tx_su_txbf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Implicit BF TX BW stats */
+ u32 tx_su_ibf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Open loop TX BW stats */
+ u32 tx_su_ol_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+};
+
+struct htt_txbf_ofdma_ndpa_stats_tlv {
+ /* 11AX HE OFDMA NDPA frame queued to the HW */
+ u32 ax_ofdma_ndpa_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndpa_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndpa_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndpa_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_ndp_stats_tlv {
+ /* 11AX HE OFDMA NDP frame queued to the HW */
+ u32 ax_ofdma_ndp_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndp_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndp_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_brp_stats_tlv {
+ /* 11AX HE OFDMA MU BRPOLL frame queued to the HW */
+ u32 ax_ofdma_brpoll_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame sent over the air */
+ u32 ax_ofdma_brpoll_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame flushed by HW */
+ u32 ax_ofdma_brpoll_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame completed with error(s) */
+ u32 ax_ofdma_brp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* Number of CBF(s) received when 11AX HE OFDMA MU BRPOLL frame
+ * completed with error(s).
+ */
+ u32 ax_ofdma_brp_err_num_cbf_rcvd[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS + 1];
+};
+
+struct htt_txbf_ofdma_steer_stats_tlv {
+ /* 11AX HE OFDMA PPDUs that were sent over the air with steering (TXBF + OFDMA) */
+ u32 ax_ofdma_num_ppdu_steer[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA PPDUs that were sent over the air in open loop */
+ u32 ax_ofdma_num_ppdu_ol[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which CBF prefetch was
+ * initiated to PHY HW during TX.
+ */
+ u32 ax_ofdma_num_usrs_prefetch[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was initiated during TX */
+ u32 ax_ofdma_num_usrs_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was forced during TX */
+ u32 ax_ofdma_num_usrs_force_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+#define HTT_MAX_RX_PKT_CNT 8
+#define HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define HTT_MAX_PER_BLK_ERR_CNT 20
+#define HTT_MAX_RX_OTA_ERR_CNT 14
+#define HTT_STATS_MAX_CHAINS 8
+#define ATH11K_STATS_MGMT_FRM_TYPE_MAX 16
+
+struct htt_phy_counters_tlv {
+ /* number of RXTD OFDMA OTA error counts except power surge and drop */
+ u32 rx_ofdma_timing_err_cnt;
+ /* rx_cck_fail_cnt:
+ * number of cck error counts due to rx reception failure because of
+ * timing error in cck
+ */
+ u32 rx_cck_fail_cnt;
+ /* number of times tx abort initiated by mac */
+ u32 mactx_abort_cnt;
+ /* number of times rx abort initiated by mac */
+ u32 macrx_abort_cnt;
+ /* number of times tx abort initiated by phy */
+ u32 phytx_abort_cnt;
+ /* number of times rx abort initiated by phy */
+ u32 phyrx_abort_cnt;
+ /* number of rx defered count initiated by phy */
+ u32 phyrx_defer_abort_cnt;
+ /* number of sizing events generated at LSTF */
+ u32 rx_gain_adj_lstf_event_cnt;
+ /* number of sizing events generated at non-legacy LTF */
+ u32 rx_gain_adj_non_legacy_cnt;
+ /* rx_pkt_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_cnt[HTT_MAX_RX_PKT_CNT];
+ /* rx_pkt_crc_pass_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_crc_pass_cnt[HTT_MAX_RX_PKT_CRC_PASS_CNT];
+ /* per_blk_err_cnt -
+ * Error count per error source;
+ * [0] = unknown; [1] = LSIG; [2] = HTSIG; [3] = VHTSIG; [4] = HESIG;
+ * [5] = RXTD_OTA; [6] = RXTD_FATAL; [7] = DEMF; [8] = ROBE;
+ * [9] = PMI; [10] = TXFD; [11] = TXTD; [12] = PHYRF
+ * [13-19]=RSVD
+ */
+ u32 per_blk_err_cnt[HTT_MAX_PER_BLK_ERR_CNT];
+ /* rx_ota_err_cnt -
+ * RXTD OTA (over-the-air) error count per error reason;
+ * [0] = voting fail; [1] = weak det fail; [2] = strong sig fail;
+ * [3] = cck fail; [4] = power surge; [5] = power drop;
+ * [6] = btcf timing timeout error; [7] = btcf packet detect error;
+ * [8] = coarse timing timeout error
+ * [9-13]=RSVD
+ */
+ u32 rx_ota_err_cnt[HTT_MAX_RX_OTA_ERR_CNT];
+};
+
+struct htt_phy_stats_tlv {
+ /* per chain hw noise floor values in dBm */
+ s32 nf_chain[HTT_STATS_MAX_CHAINS];
+ /* number of false radars detected */
+ u32 false_radar_cnt;
+ /* number of channel switches happened due to radar detection */
+ u32 radar_cs_cnt;
+ /* ani_level -
+ * ANI level (noise interference) corresponds to the channel
+ * the desense levels range from -5 to 15 in dB units,
+ * higher values indicating more noise interference.
+ */
+ s32 ani_level;
+ /* running time in minutes since FW boot */
+ u32 fw_run_time;
+};
+
+struct htt_peer_ctrl_path_txrx_stats_tlv {
+ /* peer mac address */
+ u8 peer_mac_addr[ETH_ALEN];
+ u8 rsvd[2];
+ /* Num of tx mgmt frames with subtype on peer level */
+ u32 peer_tx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+ /* Num of rx mgmt frames with subtype on peer level */
+ u32 peer_rx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+};
+
#ifdef CONFIG_ATH11K_DEBUGFS
void ath11k_debugfs_htt_stats_init(struct ath11k *ar);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 270c0edbb10f..fecd9718f5ce 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -419,15 +419,21 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct debug_htt_stats_req *stats_req;
+ int type = ar->debug.htt_stats.type;
int ret;
+ if ((type != ATH11K_DBG_HTT_EXT_STATS_PEER_INFO &&
+ type != ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
if (!stats_req)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
ar->debug.htt_stats.stats_req = stats_req;
- stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO;
+ stats_req->type = type;
memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
ret = ath11k_debugfs_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index ee768ccce46e..d6267bfa0264 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -195,6 +195,7 @@ struct ath11k_pdev_dp {
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
@@ -1592,6 +1593,13 @@ struct ath11k_htt_extd_stats_msg {
u8 data[0];
} __packed;
+#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
+#define HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
+#define HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
+#define HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+
struct htt_mac_addr {
u32 mac_addr_l32;
u32 mac_addr_h16;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 9a224817630a..75f6d55dca46 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -142,6 +142,18 @@ static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
return errmap;
}
+static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *rx_attention;
+ u32 errmap;
+
+ rx_attention = ath11k_dp_rx_get_attention(ab, desc);
+ errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+
+ return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
+}
+
static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
@@ -270,6 +282,18 @@ static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
__le32_to_cpu(attn->info1)));
}
+static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
+}
+
+static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
+}
+
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
@@ -2156,6 +2180,7 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
{
u8 *first_hdr;
u8 decap;
+ struct ethhdr *ehdr;
first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
@@ -2170,9 +2195,22 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
decrypted);
break;
case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
- /* TODO undecap support for middle/last msdu's of amsdu */
- ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
- enctype, status);
+ ehdr = (struct ethhdr *)msdu->data;
+
+ /* mac80211 allows fast path only for authorized STA */
+ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
+ ATH11K_SKB_RXCB(msdu)->is_eapol = true;
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ }
+
+ /* PN for mcast packets will be validated in mac80211;
+ * remove eth header and add 802.11 header.
+ */
+ if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
break;
case DP_RX_DECAP_TYPE_8023:
/* TODO: Handle undecap for these formats */
@@ -2180,35 +2218,62 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
}
}
+static struct ath11k_peer *
+ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ struct ath11k_peer *peer = NULL;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rxcb->peer_id)
+ peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
+
+ if (peer)
+ return peer;
+
+ if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
+ return NULL;
+
+ peer = ath11k_peer_find_by_addr(ab,
+ ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
+ return peer;
+}
+
static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
- bool fill_crypto_hdr, mcast;
+ bool fill_crypto_hdr;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
+ struct ath11k_skb_rxcb *rxcb;
struct ieee80211_hdr *hdr;
struct ath11k_peer *peer;
struct rx_attention *rx_attention;
u32 err_bitmap;
- hdr = (struct ieee80211_hdr *)msdu->data;
-
/* PN for multicast packets will be checked in mac80211 */
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+ rxcb->is_mcbc = fill_crypto_hdr;
- mcast = is_multicast_ether_addr(hdr->addr1);
- fill_crypto_hdr = mcast;
+ if (rxcb->is_mcbc) {
+ rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
+ rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
+ }
spin_lock_bh(&ar->ab->base_lock);
- peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
if (peer) {
- if (mcast)
+ if (rxcb->is_mcbc)
enctype = peer->sec_type_grp;
else
enctype = peer->sec_type;
} else {
- enctype = HAL_ENCRYPT_TYPE_OPEN;
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
}
spin_unlock_bh(&ar->ab->base_lock);
@@ -2247,8 +2312,11 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
if (!is_decrypted || fill_crypto_hdr)
return;
- hdr = (void *)msdu->data;
- hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
}
static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
@@ -2337,8 +2405,10 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= 5935 && center_freq <= 7105) {
+ if (center_freq >= ATH11K_MIN_6G_FREQ &&
+ center_freq <= ATH11K_MAX_6G_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
@@ -2356,57 +2426,56 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_desc, sizeof(struct hal_rx_desc));
}
- rx_status->freq = ieee80211_channel_to_frequency(channel_num,
- rx_status->band);
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
-static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
- size_t size)
-{
- u8 *qc;
- int tid;
-
- if (!ieee80211_is_data_qos(hdr->frame_control))
- return "";
-
- qc = ieee80211_get_qos_ctl(hdr);
- tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- snprintf(out, size, "tid %d", tid);
-
- return out;
-}
-
static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
- struct sk_buff *msdu)
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
{
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
};
- struct ieee80211_rx_status *status;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_rx_status *rx_status;
struct ieee80211_radiotap_he *he = NULL;
- char tid[32];
+ struct ieee80211_sta *pubsta = NULL;
+ struct ath11k_peer *peer;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+ bool is_mcbc = rxcb->is_mcbc;
+ bool is_eapol = rxcb->is_eapol;
- status = IEEE80211_SKB_RXCB(msdu);
- if (status->encoding == RX_ENC_HE) {
+ if (status->encoding == RX_ENC_HE &&
+ !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ !(status->flag & RX_FLAG_SKIP_MONITOR)) {
he = skb_push(msdu, sizeof(known));
memcpy(he, &known, sizeof(known));
status->flag |= RX_FLAG_RADIOTAP_HE;
}
+ if (!(status->flag & RX_FLAG_ONLY_MONITOR))
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer && peer->sta)
+ pubsta = peer->sta;
+ spin_unlock_bh(&ar->ab->base_lock);
+
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
- ieee80211_get_SA(hdr),
- ath11k_print_get_tid(hdr, tid, sizeof(tid)),
- is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
- "mcast" : "ucast",
- (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+ peer ? peer->addr : NULL,
+ rxcb->tid,
+ is_mcbc ? "mcast" : "ucast",
+ rxcb->seq_no,
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
@@ -2426,22 +2495,32 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
msdu->data, msdu->len);
+ rx_status = IEEE80211_SKB_RXCB(msdu);
+ *rx_status = *status;
+
/* TODO: trace rx packet */
- ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
+ /* PN for multicast packets are not validate in HW,
+ * so skip 802.3 rx path
+ * Also, fast_rx expectes the STA to be authorized, hence
+ * eapol packets are sent in slow path.
+ */
+ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
+ !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
+ rx_status->flag |= RX_FLAG_8023;
+
+ ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
}
static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
struct sk_buff *msdu,
- struct sk_buff_head *msdu_list)
+ struct sk_buff_head *msdu_list,
+ struct ieee80211_rx_status *rx_status)
{
struct ath11k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
struct rx_attention *rx_attention;
- struct ieee80211_rx_status rx_status = {0};
- struct ieee80211_rx_status *status;
struct ath11k_skb_rxcb *rxcb;
- struct ieee80211_hdr *hdr;
struct sk_buff *last_buf;
u8 l3_pad_bytes;
u8 *hdr_status;
@@ -2458,6 +2537,12 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
}
rx_desc = (struct hal_rx_desc *)msdu->data;
+ if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
+ ath11k_warn(ar->ab, "msdu len not valid\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
lrx_desc = (struct hal_rx_desc *)last_buf->data;
rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
@@ -2497,19 +2582,11 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
}
}
- hdr = (struct ieee80211_hdr *)msdu->data;
-
- /* Process only data frames */
- if (!ieee80211_is_data(hdr->frame_control))
- return -EINVAL;
-
- ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
- ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+ ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
- rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+ rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
- status = IEEE80211_SKB_RXCB(msdu);
- *status = rx_status;
return 0;
free_out:
@@ -2524,6 +2601,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
struct ath11k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath11k *ar;
+ struct ieee80211_rx_status rx_status = {0};
u8 mac_id;
int ret;
@@ -2546,7 +2624,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
continue;
}
- ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
+ ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"Unable to process msdu %d", ret);
@@ -2554,7 +2632,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
continue;
}
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
(*quota)--;
}
@@ -2636,10 +2714,14 @@ try_again:
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
- rxcb->mac_id = mac_id;
+ rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
+ desc.rx_mpdu_info.meta_data);
+ rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
+ desc.rx_mpdu_info.info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
desc.info0);
+ rxcb->mac_id = mac_id;
__skb_queue_tail(&msdu_list, msdu);
if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
@@ -2969,6 +3051,8 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
int num_buffs_reaped = 0;
+ u32 rx_buf_sz;
+ u16 log_type = 0;
__skb_queue_head_init(&skb_list);
@@ -2981,8 +3065,16 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
memset(&ppdu_info, 0, sizeof(ppdu_info));
ppdu_info.peer_id = HAL_INVALID_PEERID;
- if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar))
- trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ }
+
+ if (log_type)
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
@@ -3010,7 +3102,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
- trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
@@ -3310,7 +3402,7 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti
paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, paddr))
return -ENOMEM;
@@ -3375,7 +3467,7 @@ err_free_idr:
spin_unlock_bh(&rx_refill_ring->idr_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return ret;
}
@@ -3941,7 +4033,6 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
struct ieee80211_rx_status rxs = {0};
- struct ieee80211_rx_status *status;
bool drop = true;
switch (rxcb->err_rel_src) {
@@ -3961,10 +4052,7 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
return;
}
- status = IEEE80211_SKB_RXCB(msdu);
- *status = rxs;
-
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
}
int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
@@ -4848,7 +4936,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
- struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
+ struct ieee80211_rx_status *rxs = &dp->rx_status;
mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
tail_msdu, rxs);
@@ -4874,10 +4962,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
- status = IEEE80211_SKB_RXCB(mon_skb);
- *status = *rxs;
-
- ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
+ ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
@@ -5029,7 +5114,7 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
int ret = 0;
- if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 8bba5234f81f..70d2cf010a68 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -78,7 +78,7 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
}
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
- struct sk_buff *skb)
+ struct ath11k_sta *arsta, struct sk_buff *skb)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
@@ -145,7 +145,15 @@ tcl_ring_sel:
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
- ti.meta_data_flags = arvif->tcl_metadata;
+
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr3) && arsta &&
+ arsta->use_4addr_set) {
+ ti.meta_data_flags = arsta->tcl_metadata;
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
+ } else {
+ ti.meta_data_flags = arvif->tcl_metadata;
+ }
if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
@@ -614,6 +622,9 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
struct hal_srng *cmd_ring;
int cmd_num;
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
@@ -1068,12 +1079,16 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
- if (!reset)
+ if (!reset) {
tlv_filter.rx_filter =
HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
- else
+ } else {
tlv_filter = ath11k_mac_mon_status_filter_default;
+ if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
+ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
+ }
+
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
dp->mac_id + i,
HAL_RXDMA_MONITOR_STATUS,
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
index f8a9f9c8e444..698b907b878d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -17,7 +17,7 @@ struct ath11k_dp_htt_wbm_tx_status {
int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
- struct sk_buff *skb);
+ struct ath11k_sta *arsta, struct sk_buff *skb);
void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id);
int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
enum hal_reo_cmd_type type,
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index d54ec6aa6281..00b595b84939 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -496,6 +496,8 @@ struct hal_tlv_hdr {
#define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(29)
#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(30)
+#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+
struct rx_mpdu_desc {
u32 info0; /* %RX_MPDU_DESC_INFO */
u32 meta_data;
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index d9596903b0a5..7a343db1dde8 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -97,6 +97,7 @@ static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
config->num_multicast_filter_entries = 0x20;
config->num_wow_filters = 0x16;
config->num_keep_alive_pattern = 0;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
}
static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
@@ -197,6 +198,7 @@ static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
config->peer_map_unmap_v2_support = 1;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
}
static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw,
@@ -372,6 +374,17 @@ static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16
desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info);
}
+static bool ath11k_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.ipq8074.mpdu_start.addr2;
+}
+
static
struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
{
@@ -543,6 +556,17 @@ static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
return &desc->u.qcn9074.msdu_payload[0];
}
+static bool ath11k_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
+ RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9074.mpdu_start.addr2;
+}
+
static bool ath11k_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
@@ -703,6 +727,17 @@ static u8 *ath11k_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
return &desc->u.wcn6855.msdu_payload[0];
}
+static bool ath11k_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn6855.mpdu_start.addr2;
+}
+
static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
@@ -799,6 +834,8 @@ const struct ath11k_hw_ops ipq8074_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops ipq6018_ops = {
@@ -835,6 +872,8 @@ const struct ath11k_hw_ops ipq6018_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops qca6390_ops = {
@@ -871,6 +910,8 @@ const struct ath11k_hw_ops qca6390_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops qcn9074_ops = {
@@ -907,6 +948,8 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops wcn6855_ops = {
@@ -943,6 +986,8 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_wcn6855_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
};
#define ATH11K_TX_RING_MASK_0 0x1
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 62f5978b3005..1535075eed03 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -128,7 +128,7 @@ struct ath11k_hw_params {
struct {
const char *dir;
size_t board_size;
- size_t cal_size;
+ size_t cal_offset;
} fw;
const struct ath11k_hw_ops *hw_ops;
@@ -153,7 +153,14 @@ struct ath11k_hw_params {
bool vdev_start_delay;
bool htt_peer_map_v2;
bool tcl_0_only;
- u8 spectral_fft_sz;
+
+ struct {
+ u8 fft_sz;
+ u8 fft_pad_sz;
+ u8 summary_pad_sz;
+ u8 fft_hdr_len;
+ u16 max_fft_bins;
+ } spectral;
u16 interface_modes;
bool supports_monitor;
@@ -202,6 +209,8 @@ struct ath11k_hw_ops {
u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
void (*reo_setup)(struct ath11k_base *ab);
u16 (*mpdu_info_get_peerid)(u8 *tlv_data);
+ bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
+ u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
};
extern const struct ath11k_hw_ops ipq8074_ops;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index e9b3689331ec..da850f4b2919 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -150,6 +150,9 @@ static const struct ieee80211_channel ath11k_6ghz_channels[] = {
CHAN6G(225, 7075, 0),
CHAN6G(229, 7095, 0),
CHAN6G(233, 7115, 0),
+
+ /* new addition in IEEE Std 802.11ax-2021 */
+ CHAN6G(2, 5935, 0),
};
static struct ieee80211_rate ath11k_legacy_rates[] = {
@@ -354,6 +357,18 @@ ath11k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
return 1;
}
+static u32
+ath11k_mac_max_he_nss(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--)
+ if (he_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
static u8 ath11k_parse_mpdudensity(u8 mpdudensity)
{
/* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
@@ -488,7 +503,8 @@ struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar) {
+ if (pdev && pdev->ar &&
+ (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
@@ -715,32 +731,386 @@ void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
ar->num_stations = 0;
}
-static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id)
+static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
{
- int ret = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH11K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static void
+ath11k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *channel;
+ struct wmi_vdev_start_req_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ channel = chandef->chan;
+
+ arg.vdev_id = vdev_id;
+ arg.channel.freq = channel->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+
+ arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width];
+ arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ arg.channel.passive = !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
- return ret;
+ goto vdev_stop;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i started\n",
vdev_id);
+
return 0;
+
+vdev_stop:
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i stop: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ return -EIO;
}
-static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath11k_mac_monitor_vdev_stop(struct ath11k *ar)
{
- /* mac80211 requires this op to be present and that's why
- * there's an empty function, this can be extended when
- * required.
- */
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath11k_mac_monitor_vdev_create(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct vdev_create_params param = {};
+ int bit, ret;
+ u8 tmp_addr[6] = {0};
+ u16 nss;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ if (ar->ab->free_vdev_map == 0) {
+ ath11k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->ab->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ param.if_id = ar->monitor_vdev_id;
+ param.type = WMI_VDEV_TYPE_MONITOR;
+ param.subtype = WMI_VDEV_SUBTYPE_NONE;
+ param.pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ param.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ param.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+
+ ret = ath11k_wmi_vdev_create(ar, tmp_addr, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ ar->monitor_vdev_id = -1;
+ return ret;
+ }
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to recalc txpower for monitor vdev %d: %d\n",
+ ar->monitor_vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
+ ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->num_created_vdevs++;
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+
+err_vdev_del:
+ ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ ar->monitor_vdev_id = -1;
+ return ret;
+}
+
+static int ath11k_mac_monitor_vdev_delete(struct ath11k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH11K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+
+ ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
+ ar->num_created_vdevs--;
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_monitor_start(struct ath11k *ar)
+{
+ struct cfg80211_chan_def *chandef = NULL;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath11k_mac_get_any_chandef_iter,
+ &chandef);
+ if (!chandef)
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
+ ath11k_mac_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ set_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->num_started_vdevs++;
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during start: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor started\n");
return 0;
}
+static int ath11k_mac_monitor_stop(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+ ar->num_started_vdevs--;
+
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during stop: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor stopped ret %d\n", ret);
+
+ return 0;
+}
+
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ set_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to create monitor vdev: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor: %d",
+ ret);
+ goto err_mon_del;
+ }
+ } else {
+ clear_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete monitor vdev: %d",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+
+err_mon_del:
+ ath11k_mac_monitor_vdev_delete(ar);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
@@ -1035,7 +1405,7 @@ ath11k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
}
static bool
-ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[])
{
int nss;
@@ -1093,6 +1463,14 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
+ /* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20
+ * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset
+ * both flags if guard interval is Default GI
+ */
+ if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI)
+ arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40);
+
if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40))
@@ -1207,6 +1585,34 @@ ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
return tx_mcs_set;
}
+static u8 ath11k_get_nss_160mhz(struct ath11k *ar,
+ u8 max_nss)
+{
+ u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info;
+ u8 max_sup_nss = 0;
+
+ switch (nss_ratio_info) {
+ case WMI_NSS_RATIO_1BY2_NSS:
+ max_sup_nss = max_nss >> 1;
+ break;
+ case WMI_NSS_RATIO_3BY4_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n");
+ break;
+ case WMI_NSS_RATIO_1_NSS:
+ max_sup_nss = max_nss;
+ break;
+ case WMI_NSS_RATIO_2_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n");
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid nss ratio received from firmware: %d\n",
+ nss_ratio_info);
+ break;
+ }
+
+ return max_sup_nss;
+}
+
static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -1216,10 +1622,12 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
- const u16 *vht_mcs_mask;
+ u16 *vht_mcs_mask;
u8 ampdu_factor;
u8 max_nss, vht_mcs;
- int i;
+ int i, vht_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
@@ -1262,6 +1670,24 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+
+ if (vht_nss > sta->rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (vht_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting vht range mcs value to peer supported nss %d for peer %pM\n",
+ sta->rx_nss, sta->addr);
+ vht_mcs_mask[sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+ }
+
/* Calculate peer NSS capability from VHT capabilities if STA
* supports VHT.
*/
@@ -1294,10 +1720,95 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
/* TODO: Check */
arg->tx_max_mcs_nss = 0xFF;
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ if (arg->peer_phymode == MODE_11AC_VHT160 ||
+ arg->peer_phymode == MODE_11AC_VHT80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AC_VHT160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags,
+ arg->peer_bw_rxnss_override);
+}
+
+static int ath11k_mac_get_max_he_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1;
+ }
+ return 0;
+}
+
+static u16 ath11k_peer_assoc_h_he_limit(u16 tx_mcs_set,
+ const u16 he_mcs_limit[NL80211_HE_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_he_mcs_map(tx_mcs_set, nss) &
+ he_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0 ... 7:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ case 9:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+ break;
+ case 10:
+ case 11:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ break;
+ }
- /* TODO: rxnss_override */
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static bool
+ath11k_peer_assoc_h_he_masked(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++)
+ if (he_mcs_mask[nss])
+ return false;
+
+ return true;
}
static void ath11k_peer_assoc_h_he(struct ath11k *ar,
@@ -1305,13 +1816,30 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u8 ampdu_factor;
- u16 v;
+ enum nl80211_band band;
+ u16 *he_mcs_mask;
+ u8 max_nss, he_mcs;
+ u16 he_tx_mcs = 0, v = 0;
+ int i, he_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
if (!he_cap->has_he)
return;
+ band = def.chan->band;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+ if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
+ return;
+
arg->he_flag = true;
memcpy_and_pad(&arg->peer_he_cap_macinfo,
@@ -1388,25 +1916,48 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ if (he_nss > sta->rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (he_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting he range mcs value to peer supported nss %d for peer %pM\n",
+ sta->rx_nss, sta->addr);
+ he_mcs_mask[sta->rx_nss - 1] = he_mcs_mask[he_nss - 1];
+ }
+
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
arg->peer_he_mcs_count++;
+ he_tx_mcs = v;
}
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
fallthrough;
default:
@@ -1414,11 +1965,102 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
break;
}
+
+ /* Calculate peer NSS capability from HE capabilities if STA
+ * supports HE.
+ */
+ for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = he_tx_mcs >> (2 * i) & 3;
+
+ /* In case of fixed rates, MCS Range in he_tx_mcs might have
+ * unsupported range, with he_mcs_mask set, so check either of them
+ * to find nss.
+ */
+ if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ he_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->rx_nss, max_nss);
+
+ if (arg->peer_phymode == MODE_11AX_HE160 ||
+ arg->peer_phymode == MODE_11AX_HE80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AX_HE160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac he peer %pM nss %d mcs cnt %d nss_override 0x%x\n",
+ sta->addr, arg->peer_nss,
+ arg->peer_he_mcs_count,
+ arg->peer_bw_rxnss_override);
+}
+
+static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 ampdu_factor;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa)
+ return;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->he_6ghz_capa.capa);
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
+ arg->peer_he_caps_6ghz));
+
+ /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+ * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+ * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+ * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+ * Band Capabilities element in the 6 GHz band.
+ *
+ * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+ * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+ */
+ ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK,
+ he_cap->he_cap_elem.mac_cap_info[3]) +
+ FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP,
+ arg->peer_he_caps_6ghz);
+
+ arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
}
static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
@@ -1427,11 +2069,16 @@ static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
int smps;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !sta->he_6ghz_capa.capa)
return;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = le16_get_bits(sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+ }
switch (smps) {
case WLAN_HT_CAP_SM_PS_STATIC:
@@ -1621,6 +2268,7 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
@@ -1629,10 +2277,12 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->he_cap.has_he) {
+ if (sta->he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -1660,7 +2310,8 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check HE first */
- if (sta->he_cap.has_he) {
+ if (sta->he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
phymode = ath11k_mac_get_phymode_he(ar, sta);
} else if (sta->vht_cap.vht_supported &&
!ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
@@ -1702,11 +2353,12 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar,
ath11k_peer_assoc_h_basic(ar, vif, sta, arg);
ath11k_peer_assoc_h_crypto(ar, vif, sta, arg);
ath11k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath11k_peer_assoc_h_ht(ar, vif, sta, arg);
ath11k_peer_assoc_h_vht(ar, vif, sta, arg);
ath11k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he_6ghz(ar, vif, sta, arg);
ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
- ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath11k_peer_assoc_h_smps(sta, arg);
/* TODO: amsdu_disable req? */
@@ -1714,15 +2366,20 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar,
static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif,
const u8 *addr,
- const struct ieee80211_sta_ht_cap *ht_cap)
+ const struct ieee80211_sta_ht_cap *ht_cap,
+ u16 he_6ghz_capa)
{
int smps;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa)
return 0;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa);
+ }
if (smps >= ARRAY_SIZE(ath11k_smps_map))
return -EINVAL;
@@ -1775,7 +2432,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
}
ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->ht_cap);
+ &ap_sta->ht_cap,
+ le16_to_cpu(ap_sta->he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -1956,7 +2614,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
/* Set and enable SRG/non-SRG OBSS PD Threshold */
param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
- if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) {
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
if (ret)
ath11k_warn(ar->ab,
@@ -2383,18 +3041,21 @@ void __ath11k_mac_scan_finish(struct ath11k *ar)
break;
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
+ if (ar->scan.is_roc && ar->scan.roc_notify)
+ ieee80211_remain_on_channel_expired(ar->hw);
+ fallthrough;
+ case ATH11K_SCAN_STARTING:
if (!ar->scan.is_roc) {
struct cfg80211_scan_info info = {
- .aborted = (ar->scan.state ==
- ATH11K_SCAN_ABORTING),
+ .aborted = ((ar->scan.state ==
+ ATH11K_SCAN_ABORTING) ||
+ (ar->scan.state ==
+ ATH11K_SCAN_STARTING)),
};
ieee80211_scan_completed(ar->hw, &info);
- } else if (ar->scan.roc_notify) {
- ieee80211_remain_on_channel_expired(ar->hw);
}
- fallthrough;
- case ATH11K_SCAN_STARTING:
+
ar->scan.state = ATH11K_SCAN_IDLE;
ar->scan_channel = NULL;
ar->scan.roc_freq = 0;
@@ -2887,6 +3548,20 @@ ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar,
}
static int
+ath11k_mac_bitrate_mask_num_he_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++)
+ num_rates += hweight16(mask->control[band].he_mcs[i]);
+
+ return num_rates;
+}
+
+static int
ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
struct ieee80211_sta *sta,
const struct cfg80211_bitrate_mask *mask,
@@ -2914,6 +3589,10 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
return -EINVAL;
}
+ /* Avoid updating invalid nss as fixed rate*/
+ if (nss > sta->rx_nss)
+ return -EINVAL;
+
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
sta->addr);
@@ -2932,6 +3611,57 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
return ret;
}
+static int
+ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 he_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (hweight16(mask->control[band].he_mcs[i]) == 1) {
+ nss = i + 1;
+ he_rate = ffs(mask->control[band].he_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single he fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate */
+ if (nss > sta->rx_nss)
+ return -EINVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac setting fixed he rate for peer %pM, device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(he_rate, nss - 1,
+ WMI_RATE_PREAMBLE_HE);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update sta %pM fixed rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
static int ath11k_station_assoc(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2943,7 +3673,7 @@ static int ath11k_station_assoc(struct ath11k *ar,
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
- u8 num_vht_rates;
+ u8 num_vht_rates, num_he_rates;
lockdep_assert_held(&ar->conf_mutex);
@@ -2969,9 +3699,10 @@ static int ath11k_station_assoc(struct ath11k *ar,
}
num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask);
- /* If single VHT rate is configured (by set_bitrate_mask()),
- * peer_assoc will disable VHT. This is now enabled by a peer specific
+ /* If single VHT/HE rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT/HE. This is now enabled by a peer specific
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
@@ -2980,6 +3711,11 @@ static int ath11k_station_assoc(struct ath11k *ar,
band);
if (ret)
return ret;
+ } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
}
/* Re-assoc is run only to update supported rates for given station. It
@@ -2989,7 +3725,7 @@ static int ath11k_station_assoc(struct ath11k *ar,
return 0;
ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->ht_cap);
+ &sta->ht_cap, le16_to_cpu(sta->he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3050,8 +3786,9 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
u32 changed, bw, nss, smps;
- int err, num_vht_rates;
+ int err, num_vht_rates, num_he_rates;
const struct cfg80211_bitrate_mask *mask;
struct peer_assoc_params peer_arg;
@@ -3066,6 +3803,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
spin_lock_bh(&ar->data_lock);
@@ -3081,8 +3819,9 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mutex_lock(&ar->conf_mutex);
nss = max_t(u32, 1, nss);
- nss = min(nss, max(ath11k_mac_max_ht_nss(ht_mcs_mask),
- ath11k_mac_max_vht_nss(vht_mcs_mask)));
+ nss = min(nss, max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)),
+ ath11k_mac_max_he_nss(he_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
@@ -3118,6 +3857,8 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mask = &arvif->bitrate_mask;
num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
/* Peer_assoc_prepare will reject vht rates in
* bitrate_mask if its not available in range format and
@@ -3133,11 +3874,25 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
+ } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
} else {
- /* If the peer is non-VHT or no fixed VHT rate
+ /* If the peer is non-VHT/HE or no fixed VHT/HE rate
* is provided in the new bitrate mask we set the
- * other rates using peer_assoc command.
+ * other rates using peer_assoc command. Also clear
+ * the peer fixed rate settings as it has higher proprity
+ * than peer assoc
*/
+ err = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (err)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for sta %pM: %d\n",
+ sta->addr, err);
+
ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
&peer_arg, true);
@@ -3155,6 +3910,31 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mutex_unlock(&ar->conf_mutex);
}
+static void ath11k_sta_set_4addr_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ int ret = 0;
+
+ arsta = container_of(wk, struct ath11k_sta, set_4addr_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for peer %pM\n", sta->addr);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n",
+ sta->addr, ret);
+}
+
static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
struct ieee80211_sta *sta)
{
@@ -3234,11 +4014,13 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
if (ieee80211_vif_is_mesh(vif)) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret) {
- ath11k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
+ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
sta->addr, ret);
goto free_tx_stats;
}
@@ -3291,8 +4073,10 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
/* cancel must be done outside the mutex to avoid deadlock */
if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST))
+ new_state == IEEE80211_STA_NOTEXIST)) {
cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->set_4addr_wk);
+ }
mutex_lock(&ar->conf_mutex);
@@ -3301,6 +4085,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
ret = ath11k_mac_station_add(ar, vif, sta);
if (ret)
@@ -3395,6 +4180,19 @@ out:
return ret;
}
+static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ if (enabled && !arsta->use_4addr_set) {
+ ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk);
+ arsta->use_4addr_set = true;
+ }
+}
+
static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -3765,11 +4563,6 @@ ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
- /* TODO: Enable back VHT160 mode once association issues are fixed */
- /* Disabling VHT160 and VHT80+80 modes */
- vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
-
rxmcs_map = 0;
txmcs_map = 0;
for (i = 0; i < 8; i++) {
@@ -3814,7 +4607,9 @@ static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
rate_cap_rx_chainmask);
}
- if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && !ar->supports_6ghz) {
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ (ar->ab->hw_params.single_pdev_only ||
+ !ar->supports_6ghz)) {
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
if (ht_cap_info)
@@ -4313,6 +5108,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ath11k_sta *arsta = NULL;
u32 info_flags = info->flags;
bool is_prb_rsp;
int ret;
@@ -4338,7 +5134,10 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
- ret = ath11k_dp_tx(ar, arvif, skb);
+ if (control->sta)
+ arsta = (struct ath11k_sta *)control->sta->drv_priv;
+
+ ret = ath11k_dp_tx(ar, arvif, arsta, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
@@ -4639,7 +5438,8 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET ||
(vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_AP))
- vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
+ IEEE80211_OFFLOAD_DECAP_ENABLED);
if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
param_value = ATH11K_HW_TXRX_ETHERNET;
@@ -4655,6 +5455,22 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
}
+
+ param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
+ if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d rx decap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED;
+ }
}
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
@@ -4683,8 +5499,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
- ath11k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
- TARGET_NUM_VDEVS);
+ ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n",
+ ar->num_created_vdevs, TARGET_NUM_VDEVS);
ret = -EBUSY;
goto err;
}
@@ -4700,10 +5516,13 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI;
memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].ht_mcs));
memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].he_mcs));
}
bit = __ffs64(ab->free_vdev_map);
@@ -4724,6 +5543,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ ar->monitor_vdev_id = bit;
break;
default:
WARN_ON(1);
@@ -4825,6 +5645,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del;
}
break;
+ case WMI_VDEV_TYPE_MONITOR:
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ break;
default:
break;
}
@@ -4845,6 +5668,16 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ath11k_dp_vdev_tx_attach(ar, arvif);
+ if (vif->type != NL80211_IFTYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
+ ret);
+ goto err_peer_del;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -4942,6 +5775,18 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ ar->monitor_vdev_id = -1;
+ } else if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags) &&
+ !test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to delete vdev monitor during remove interface: %d",
+ ret);
+ }
+
err_vdev_del:
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
@@ -4961,7 +5806,6 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
- clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
/* TODO: recal traffic pause state based on the available vdevs */
@@ -4984,8 +5828,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct ath11k *ar = hw->priv;
- bool reset_flag = false;
- int ret = 0;
mutex_lock(&ar->conf_mutex);
@@ -4993,23 +5835,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
- /* For monitor mode */
- reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
-
- ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
- if (!ret) {
- if (!reset_flag)
- set_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- else
- clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- } else {
- ath11k_warn(ar->ab,
- "fail to set monitor filter: %d\n", ret);
- }
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n",
- changed_flags, *total_flags, reset_flag);
-
mutex_unlock(&ar->conf_mutex);
}
@@ -5118,20 +5943,6 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
-static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
-{
- lockdep_assert_held(&ar->conf_mutex);
-
- if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
- return -ESHUTDOWN;
-
- if (!wait_for_completion_timeout(&ar->vdev_setup_done,
- ATH11K_VDEV_SETUP_TIMEOUT_HZ))
- return -ETIMEDOUT;
-
- return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
-}
-
static int
ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
const struct cfg80211_chan_def *chandef,
@@ -5214,7 +6025,9 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
return ret;
}
- ar->num_started_vdevs++;
+ if (!restart)
+ ar->num_started_vdevs++;
+
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
@@ -5342,12 +6155,16 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
struct ath11k_vif *arvif;
int ret;
int i;
+ bool monitor_vif = false;
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < n_vifs; i++) {
arvif = (void *)vifs[i].vif->drv_priv;
+ if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
+ monitor_vif = true;
+
ath11k_dbg(ab, ATH11K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
arvif->vdev_id,
@@ -5368,6 +6185,8 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
arvif->vdev_id, ret);
continue;
}
+
+ ar->num_started_vdevs--;
}
/* All relevant vdevs are downed and associated channel resources
@@ -5405,6 +6224,24 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
continue;
}
}
+
+ /* Restart the internal monitor vdev on new channel */
+ if (!monitor_vif &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+ }
}
static void
@@ -5484,7 +6321,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
- ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr);
if (ret) {
ath11k_warn(ab, "failed put monitor up: %d\n", ret);
return ret;
@@ -5544,6 +6381,18 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
}
}
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+
+ arvif->is_started = true;
+ goto out;
+ }
+
ret = ath11k_mac_vdev_start(arvif, &ctx->def);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
@@ -5551,14 +6400,19 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
ctx->def.chan->center_freq, ret);
goto out;
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
- ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
- if (ret)
- goto out;
- }
arvif->is_started = true;
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+ }
+
/* TODO: Setup ps and cts/rts protection */
ret = 0;
@@ -5592,6 +6446,20 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
ath11k_peer_find_by_addr(ab, ar->mac_addr))
ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ arvif->is_started = false;
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
ret = ath11k_mac_vdev_stop(arvif);
if (ret)
ath11k_warn(ab, "failed to stop vdev %i: %d\n",
@@ -5603,6 +6471,16 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ ar->num_started_vdevs == 1 &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -5720,9 +6598,26 @@ ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
return false;
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask))
+ return false;
+
return num_rates == 1;
}
+static __le16
+ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap)
+{
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_80p80;
+
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_160;
+
+ return he_cap->he_mcs_nss_supp.tx_mcs_80;
+}
+
static bool
ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
enum nl80211_band band,
@@ -5731,8 +6626,10 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
{
struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u16 he_mcs_map = 0;
u8 ht_nss_mask = 0;
u8 vht_nss_mask = 0;
+ u8 he_nss_mask = 0;
int i;
/* No need to consider legacy here. Basic rates are always present
@@ -5759,7 +6656,20 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
return false;
}
- if (ht_nss_mask != vht_nss_mask)
+ he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(&sband->iftype_data->he_cap));
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (mask->control[band].he_mcs[i] == 0)
+ continue;
+
+ if (mask->control[band].he_mcs[i] ==
+ ath11k_mac_get_max_he_mcs_map(he_mcs_map, i))
+ he_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask)
return false;
if (ht_nss_mask == 0)
@@ -5806,42 +6716,125 @@ ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
return 0;
}
-static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
- u32 rate, u8 nss, u8 sgi, u8 ldpc)
+static int
+ath11k_mac_set_fixed_rate_gi_ltf(struct ath11k_vif *arvif, u8 he_gi, u8 he_ltf)
{
struct ath11k *ar = arvif->ar;
- u32 vdev_param;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */
+ if (he_gi && he_gi != 0xFF)
+ he_gi += 1;
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
- arvif->vdev_id, rate, nss, sgi);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_SGI, he_gi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he gi %d: %d\n",
+ he_gi, ret);
+ return ret;
+ }
+ /* start from 1 */
+ if (he_ltf != 0xFF)
+ he_ltf += 1;
- vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, rate);
+ WMI_VDEV_PARAM_HE_LTF, he_ltf);
if (ret) {
- ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
- rate, ret);
+ ath11k_warn(ar->ab, "failed to set he ltf %d: %d\n",
+ he_ltf, ret);
return ret;
}
- vdev_param = WMI_VDEV_PARAM_NSS;
+ return 0;
+}
+
+static int
+ath11k_mac_set_auto_rate_gi_ltf(struct ath11k_vif *arvif, u16 he_gi, u8 he_ltf)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+ u32 he_ar_gi_ltf;
+
+ if (he_gi != 0xFF) {
+ switch (he_gi) {
+ case NL80211_RATE_INFO_HE_GI_0_8:
+ he_gi = WMI_AUTORATE_800NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_1_6:
+ he_gi = WMI_AUTORATE_1600NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_3_2:
+ he_gi = WMI_AUTORATE_3200NS_GI;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he gi: %d\n", he_gi);
+ return -EINVAL;
+ }
+ }
+
+ if (he_ltf != 0xFF) {
+ switch (he_ltf) {
+ case NL80211_RATE_INFO_HE_1XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_1X;
+ break;
+ case NL80211_RATE_INFO_HE_2XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_2X;
+ break;
+ case NL80211_RATE_INFO_HE_4XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_4X;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he ltf: %d\n", he_ltf);
+ return -EINVAL;
+ }
+ }
+
+ he_ar_gi_ltf = he_gi | he_ltf;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, nss);
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG,
+ he_ar_gi_ltf);
if (ret) {
- ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
- nss, ret);
+ ath11k_warn(ar->ab,
+ "failed to set he autorate gi %u ltf %u: %d\n",
+ he_gi, he_ltf, ret);
return ret;
}
- vdev_param = WMI_VDEV_PARAM_SGI;
+ return 0;
+}
+
+static int ath11k_mac_set_rate_params(struct ath11k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc,
+ u8 he_gi, u8 he_ltf, bool he_fixed_rate)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n",
+ arvif->vdev_id, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
+
+ if (!arvif->vif->bss_conf.he_support) {
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, sgi);
+ vdev_param, nss);
if (ret) {
- ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
- sgi, ret);
+ ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
return ret;
}
@@ -5854,6 +6847,35 @@ static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
return ret;
}
+ if (arvif->vif->bss_conf.he_support) {
+ if (he_fixed_rate) {
+ ret = ath11k_mac_set_fixed_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = ath11k_mac_set_auto_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set auto rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ } else {
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -5882,6 +6904,31 @@ ath11k_mac_vht_mcs_range_present(struct ath11k *ar,
return true;
}
+static bool
+ath11k_mac_he_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 he_mcs;
+
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = mask->control[band].he_mcs[i];
+
+ switch (he_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(10) - 1:
+ case BIT(12) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void ath11k_mac_set_bitrate_mask_iter(void *data,
struct ieee80211_sta *sta)
{
@@ -5913,6 +6960,54 @@ static void ath11k_mac_disable_peer_fixed_rate(void *data,
sta->addr, ret);
}
+static bool
+ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ bool he_fixed_rate = false, vht_fixed_rate = false;
+ struct ath11k_peer *peer, *tmp;
+ const u16 *vht_mcs_mask, *he_mcs_mask;
+ u8 vht_nss, he_nss;
+ bool ret = true;
+
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1)
+ vht_fixed_rate = true;
+
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1)
+ he_fixed_rate = true;
+
+ if (!vht_fixed_rate && !he_fixed_rate)
+ return true;
+
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
+ if (peer->sta) {
+ if (vht_fixed_rate && (!peer->sta->vht_cap.vht_supported ||
+ peer->sta->rx_nss < vht_nss)) {
+ ret = false;
+ goto out;
+ }
+ if (he_fixed_rate && (!peer->sta->he_cap.has_he ||
+ peer->sta->rx_nss < he_nss)) {
+ ret = false;
+ goto out;
+ }
+ }
+ }
+
+out:
+ spin_unlock_bh(&ar->ab->base_lock);
+ rcu_read_unlock();
+ return ret;
+}
+
static int
ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5924,6 +7019,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ u8 he_ltf = 0;
+ u8 he_gi = 0;
u32 rate;
u8 nss;
u8 sgi;
@@ -5931,6 +7029,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
int single_nss;
int ret;
int num_rates;
+ bool he_fixed_rate = false;
if (ath11k_mac_vif_chan(vif, &def))
return -EPERM;
@@ -5938,12 +7037,16 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
band = def.chan->band;
ht_mcs_mask = mask->control[band].ht_mcs;
vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi;
if (sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
+ he_gi = mask->control[band].he_gi;
+ he_ltf = mask->control[band].he_ltf;
+
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing atleast one of used basic rates along with them.
* Fixed rate setting across different preambles(legacy, HT, VHT) is
@@ -5967,11 +7070,22 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
nss = single_nss;
+ mutex_lock(&ar->conf_mutex);
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+ mutex_unlock(&ar->conf_mutex);
} else {
rate = WMI_FIXED_RATE_NONE;
+
+ if (!ath11k_mac_validate_vht_he_fixed_rate_settings(ar, band, mask))
+ ath11k_warn(ar->ab,
+ "could not update fixed rate settings to all peers due to mcs/nss incompaitiblity\n");
nss = min_t(u32, ar->num_tx_chains,
- max(ath11k_mac_max_ht_nss(ht_mcs_mask),
- ath11k_mac_max_vht_nss(vht_mcs_mask)));
+ max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)),
+ ath11k_mac_max_he_nss(he_mcs_mask)));
/* If multiple rates across different preambles are given
* we can reconfigure this info with all peers using PEER_ASSOC
@@ -6002,16 +7116,28 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
* RATEMASK CMD
*/
ath11k_warn(ar->ab,
- "Setting more than one MCS Value in bitrate mask not supported\n");
+ "setting %d mcs values in bitrate mask not supported\n",
+ num_rates);
return -EINVAL;
}
+ num_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
+ if (num_rates == 1)
+ he_fixed_rate = true;
+
+ if (!ath11k_mac_he_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ ath11k_warn(ar->ab,
+ "Setting more than one HE MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ar->conf_mutex);
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_disable_peer_fixed_rate,
arvif);
- mutex_lock(&ar->conf_mutex);
-
arvif->bitrate_mask = *mask;
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_set_bitrate_mask_iter,
@@ -6022,9 +7148,10 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
- ret = ath11k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ ret = ath11k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
if (ret) {
- ath11k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+ ath11k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n",
arvif->vdev_id, ret);
}
@@ -6109,7 +7236,13 @@ static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
if (!sband || idx >= sband->n_channels) {
ret = -ENOENT;
goto exit;
@@ -6180,6 +7313,7 @@ static const struct ieee80211_ops ath11k_ops = {
.cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
.set_key = ath11k_mac_op_set_key,
.sta_state = ath11k_mac_op_sta_state,
+ .sta_set_4addr = ath11k_mac_op_sta_set_4addr,
.sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
.sta_rc_update = ath11k_mac_op_sta_rc_update,
.conf_tx = ath11k_mac_op_conf_tx,
@@ -6240,7 +7374,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
u32 supported_bands)
{
struct ieee80211_supported_band *band;
- struct ath11k_hal_reg_capabilities_ext *reg_cap;
+ struct ath11k_hal_reg_capabilities_ext *reg_cap, *temp_reg_cap;
void *channels;
u32 phy_id;
@@ -6250,6 +7384,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
ATH11K_NUM_CHANS);
reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+ temp_reg_cap = reg_cap;
if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
channels = kmemdup(ath11k_2ghz_channels,
@@ -6268,11 +7403,11 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
if (ar->ab->hw_params.single_pdev_only) {
phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath11k_mac_update_ch_list(ar, band,
- reg_cap->low_2ghz_chan,
- reg_cap->high_2ghz_chan);
+ temp_reg_cap->low_2ghz_chan,
+ temp_reg_cap->high_2ghz_chan);
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
@@ -6292,9 +7427,15 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
ath11k_mac_update_ch_list(ar, band,
- reg_cap->low_5ghz_chan,
- reg_cap->high_5ghz_chan);
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
}
if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) {
@@ -6317,12 +7458,12 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
if (ar->ab->hw_params.single_pdev_only) {
phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath11k_mac_update_ch_list(ar, band,
- reg_cap->low_5ghz_chan,
- reg_cap->high_5ghz_chan);
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
}
}
@@ -6367,7 +7508,9 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80);
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@@ -6505,8 +7648,16 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
- ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
- if (ht_cap & WMI_HT_CAP_ENABLED) {
+
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(ar->hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ }
+
+ if (cap->nss_ratio_enabled)
+ ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW);
+
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
@@ -6521,7 +7672,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
- if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || ar->supports_6ghz)
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -6590,7 +7741,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
/* Apply the regd received during initialization */
- ret = ath11k_regd_update(ar, true);
+ ret = ath11k_regd_update(ar);
if (ret) {
ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
goto err_unregister_hw;
@@ -6631,6 +7782,10 @@ int ath11k_mac_register(struct ath11k_base *ab)
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
@@ -6641,18 +7796,14 @@ int ath11k_mac_register(struct ath11k_base *ab)
ar->mac_addr[4] += i;
}
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+
ret = __ath11k_mac_register(ar);
if (ret)
goto err_cleanup;
-
- idr_init(&ar->txmgmt_idr);
- spin_lock_init(&ar->txmgmt_idr_lock);
}
- /* Initialize channel counters frequency value in hertz */
- ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
-
return 0;
err_cleanup:
@@ -6723,7 +7874,11 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 4bc59bdaf244..254ca4acc8e8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -115,6 +115,9 @@ struct ath11k_generic_iter {
#define WMI_MAX_SPATIAL_STREAM 3
#define ATH11K_CHAN_WIDTH_NUM 8
+#define ATH11K_BW_NSS_MAP_ENABLE BIT(31)
+#define ATH11K_PEER_RX_NSS_160MHZ GENMASK(2, 0)
+#define ATH11K_PEER_RX_NSS_80_80MHZ GENMASK(5, 3)
#define ATH11K_OBSS_PD_MAX_THRESHOLD -82
#define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD -62
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 5abb38cc3b55..7b3bce0ba76e 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -430,6 +430,8 @@ static void ath11k_pci_force_wake(struct ath11k_base *ab)
static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
{
+ mdelay(100);
+
if (power_on) {
ath11k_pci_enable_ltssm(ab);
ath11k_pci_clear_all_intrs(ab);
@@ -439,9 +441,9 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
}
ath11k_mhi_clear_vector(ab);
+ ath11k_pci_clear_dbg_registers(ab);
ath11k_pci_soc_global_reset(ab);
ath11k_mhi_set_mhictrl_reset(ab);
- ath11k_pci_clear_dbg_registers(ab);
}
int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index f49abefa9618..85471f8b3563 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -251,6 +251,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param)
{
struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -319,6 +320,16 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+ arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
+ FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
+ peer->peer_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+ }
+
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index b5e34d670715..8c615bc788ca 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -951,6 +951,78 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
num_macs),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -1770,7 +1842,7 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
chunk->vaddr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!chunk->vaddr) {
if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
@@ -1846,8 +1918,8 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
- ret = qmi_txn_init(&ab->qmi.handle, &txn,
- qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
+ ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei,
+ &resp);
if (ret < 0)
goto out;
@@ -1900,6 +1972,12 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
sizeof(ab->qmi.target.fw_build_id));
+ if (resp.eeprom_read_timeout_valid) {
+ ab->qmi.target.eeprom_caldata =
+ resp.eeprom_read_timeout;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cal data supported from eeprom\n");
+ }
+
ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id);
@@ -1917,98 +1995,73 @@ out:
return ret;
}
-static int
-ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
- struct qmi_wlanfw_bdf_download_req_msg_v01 *req,
- void __iomem *bdf_addr)
-{
- const struct firmware *fw_entry;
- struct ath11k_board_data bd;
- u32 fw_size;
- int ret;
-
- switch (type) {
- case ATH11K_QMI_FILE_TYPE_BDF_GOLDEN:
- memset(&bd, 0, sizeof(bd));
-
- ret = ath11k_core_fetch_bdf(ab, &bd);
- if (ret) {
- ath11k_warn(ab, "failed to load board file: %d\n", ret);
- return ret;
- }
-
- fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
- memcpy_toio(bdf_addr, bd.data, fw_size);
- ath11k_core_free_bdf(ab, &bd);
- break;
- case ATH11K_QMI_FILE_TYPE_CALDATA:
- fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
- if (IS_ERR(fw_entry)) {
- ret = PTR_ERR(fw_entry);
- ath11k_warn(ab, "failed to load %s: %d\n",
- ATH11K_DEFAULT_CAL_FILE, ret);
- return ret;
- }
-
- fw_size = min_t(u32, ab->hw_params.fw.board_size,
- fw_entry->size);
-
- memcpy_toio(bdf_addr + ATH11K_QMI_CALDATA_OFFSET,
- fw_entry->data, fw_size);
-
- release_firmware(fw_entry);
- break;
- default:
- return -EINVAL;
- }
-
- req->total_size = fw_size;
- return 0;
-}
-
-static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
+static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
+ const u8 *data, u32 len, u8 type)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
struct qmi_txn txn = {};
+ const u8 *temp = data;
void __iomem *bdf_addr = NULL;
- int type, ret;
+ int ret;
+ u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
+
memset(&resp, 0, sizeof(resp));
- bdf_addr = ioremap(ab->hw_params.bdf_addr, ATH11K_QMI_BDF_MAX_SIZE);
- if (!bdf_addr) {
- ath11k_warn(ab, "failed ioremap for board file\n");
- ret = -EIO;
- goto out;
+ if (ab->bus_params.fixed_bdf_addr) {
+ bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
+ if (!bdf_addr) {
+ ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
+ ret = -EIO;
+ goto err_free_req;
+ }
}
- for (type = 0; type < ATH11K_QMI_MAX_FILE_TYPE; type++) {
+ while (remaining) {
req->valid = 1;
req->file_id_valid = 1;
req->file_id = ab->qmi.target.board_id;
req->total_size_valid = 1;
+ req->total_size = remaining;
req->seg_id_valid = 1;
- req->seg_id = type;
- req->data_valid = 0;
- req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
- req->bdf_type = 0;
- req->bdf_type_valid = 0;
+ req->data_valid = 1;
+ req->bdf_type = type;
+ req->bdf_type_valid = 1;
req->end_valid = 1;
- req->end = 1;
+ req->end = 0;
- ret = ath11k_qmi_prepare_bdf_download(ab, type, req, bdf_addr);
- if (ret < 0)
- goto out_qmi_bdf;
+ if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ if (ab->bus_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ req->data_valid = 0;
+ req->end = 1;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ memcpy(req->data, temp, req->data_len);
+ }
+
+ if (ab->bus_params.fixed_bdf_addr) {
+ if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
+ bdf_addr += ab->hw_params.fw.cal_offset;
+
+ memcpy_toio(bdf_addr, temp, len);
+ }
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_bdf_download_resp_msg_v01_ei,
&resp);
if (ret < 0)
- goto out_qmi_bdf;
+ goto err_iounmap;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download req fixed addr type %d\n",
type);
@@ -2019,54 +2072,62 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
- goto out_qmi_bdf;
+ goto err_iounmap;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
- if (ret < 0)
- goto out_qmi_bdf;
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait board file download request: %d\n",
+ ret);
+ goto err_iounmap;
+ }
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "board file download request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
- goto out_qmi_bdf;
+ goto err_iounmap;
+ }
+
+ if (ab->bus_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ remaining = 0;
+ } else {
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n",
+ remaining);
}
}
-out_qmi_bdf:
- iounmap(bdf_addr);
-out:
+err_iounmap:
+ if (ab->bus_params.fixed_bdf_addr)
+ iounmap(bdf_addr);
+
+err_free_req:
kfree(req);
+
return ret;
}
static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
{
- struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
- struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct device *dev = ab->dev;
+ char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
struct ath11k_board_data bd;
- unsigned int remaining;
- struct qmi_txn txn = {};
- int ret;
- const u8 *temp;
- int bdf_type;
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
+ u32 fw_size, file_type;
+ int ret = 0, bdf_type;
+ const u8 *tmp;
memset(&bd, 0, sizeof(bd));
ret = ath11k_core_fetch_bdf(ab, &bd);
if (ret) {
- ath11k_warn(ab, "failed to fetch board file: %d\n", ret);
+ ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret);
goto out;
}
- temp = bd.data;
- remaining = bd.len;
-
if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
else
@@ -2074,67 +2135,61 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf_type %d\n", bdf_type);
- while (remaining) {
- req->valid = 1;
- req->file_id_valid = 1;
- req->file_id = ab->qmi.target.board_id;
- req->total_size_valid = 1;
- req->total_size = bd.len;
- req->seg_id_valid = 1;
- req->data_valid = 1;
- req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
- req->bdf_type = bdf_type;
- req->bdf_type_valid = 1;
- req->end_valid = 1;
- req->end = 0;
+ fw_size = bd.len;
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
- if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
- req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
- } else {
- req->data_len = remaining;
- req->end = 1;
- }
+ ret = ath11k_qmi_load_file_target_mem(ab, bd.data, fw_size, bdf_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load bdf file\n");
+ goto out;
+ }
- memcpy(req->data, temp, req->data_len);
+ /* QCA6390 does not support cal data, skip it */
+ if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF)
+ goto out;
- ret = qmi_txn_init(&ab->qmi.handle, &txn,
- qmi_wlanfw_bdf_download_resp_msg_v01_ei,
- &resp);
- if (ret < 0)
- goto out_qmi_bdf;
+ if (ab->qmi.target.eeprom_caldata) {
+ file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
+ tmp = filename;
+ fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ file_type = ATH11K_QMI_FILE_TYPE_CALDATA;
- ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n",
- remaining);
+ /* cal-<bus>-<id>.bin */
+ snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath11k_bus_str(ab->hif.bus), dev_name(dev));
+ fw_entry = ath11k_core_firmware_request(ab, filename);
+ if (!IS_ERR(fw_entry))
+ goto success;
- ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
- QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
- QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
- qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
- if (ret < 0) {
- qmi_txn_cancel(&txn);
- goto out_qmi_bdf;
+ fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
+ if (IS_ERR(fw_entry)) {
+ ret = PTR_ERR(fw_entry);
+ ath11k_warn(ab,
+ "qmi failed to load CAL data file:%s\n",
+ filename);
+ goto out;
}
+success:
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size);
+ tmp = fw_entry->data;
+ }
- ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
- if (ret < 0)
- goto out_qmi_bdf;
-
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "bdf download request failed: %d %d\n",
- resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
- goto out_qmi_bdf;
- }
- remaining -= req->data_len;
- temp += req->data_len;
- req->seg_id++;
+ ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load caldata\n");
+ goto out_qmi_cal;
}
-out_qmi_bdf:
- ath11k_core_free_bdf(ab, &bd);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi caldata type: %u\n", file_type);
+out_qmi_cal:
+ if (!ab->qmi.target.eeprom_caldata)
+ release_firmware(fw_entry);
out:
- kfree(req);
+ ath11k_core_free_bdf(ab, &bd);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi BDF download sequence completed\n");
+
return ret;
}
@@ -2519,10 +2574,7 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return ret;
}
- if (ab->bus_params.fixed_bdf_addr)
- ret = ath11k_qmi_load_bdf_fixed_addr(ab);
- else
- ret = ath11k_qmi_load_bdf_qmi(ab);
+ ret = ath11k_qmi_load_bdf_qmi(ab);
if (ret < 0) {
ath11k_warn(ab, "failed to load board data file: %d\n", ret);
return ret;
@@ -2707,8 +2759,10 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
list_del(&event->list);
spin_unlock(&qmi->event_lock);
- if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))
+ if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) {
+ kfree(event);
return;
+ }
switch (event->type) {
case ATH11K_QMI_EVENT_SERVER_ARRIVE:
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 3d5930330703..3bb0f9ef7996 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -10,11 +10,9 @@
#include <linux/soc/qcom/qmi.h>
#define ATH11K_HOST_VERSION_STRING "WIN"
-#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
+#define ATH11K_QMI_WLANFW_TIMEOUT_MS 10000
#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH11K_QMI_CALDB_ADDRESS 0x4BA00000
-#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
-#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
@@ -44,6 +42,7 @@ struct ath11k_base;
enum ath11k_qmi_file_type {
ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
ATH11K_QMI_FILE_TYPE_CALDATA,
+ ATH11K_QMI_FILE_TYPE_EEPROM,
ATH11K_QMI_MAX_FILE_TYPE,
};
@@ -104,6 +103,7 @@ struct target_info {
u32 board_id;
u32 soc_id;
u32 fw_version;
+ u32 eeprom_caldata;
char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
char bdf_ext[ATH11K_QMI_BDF_EXT_STR_LENGTH];
@@ -135,7 +135,7 @@ struct ath11k_qmi {
wait_queue_head_t cold_boot_waitq;
};
-#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 261
#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
@@ -285,7 +285,7 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
};
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
-#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
#define QMI_WLANFW_CAP_REQ_V01 0x0024
#define QMI_WLANFW_CAP_RESP_V01 0x0024
@@ -366,6 +366,14 @@ struct qmi_wlanfw_cap_resp_msg_v01 {
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
u8 num_macs_valid;
u8 num_macs;
+ u8 voltage_mv_valid;
+ u32 voltage_mv;
+ u8 time_freq_hz_valid;
+ u32 time_freq_hz;
+ u8 otp_version_valid;
+ u32 otp_version;
+ u8 eeprom_read_timeout_valid;
+ u32 eeprom_read_timeout;
};
struct qmi_wlanfw_cap_req_msg_v01 {
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index e1a1df169034..a66b5bdd2167 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -97,7 +97,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
- int params_len;
int i, ret;
bands = hw->wiphy->bands;
@@ -117,10 +116,8 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
if (WARN_ON(!num_channels))
return -EINVAL;
- params_len = sizeof(struct scan_chan_list_params) +
- num_channels * sizeof(struct channel_param);
- params = kzalloc(params_len, GFP_KERNEL);
-
+ params = kzalloc(struct_size(params, ch_param, num_channels),
+ GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -198,7 +195,7 @@ static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
sizeof(struct ieee80211_reg_rule));
}
-int ath11k_regd_update(struct ath11k *ar, bool init)
+int ath11k_regd_update(struct ath11k *ar)
{
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
@@ -209,7 +206,10 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
spin_lock_bh(&ab->base_lock);
- if (init) {
+ /* Prefer the latest regd update over default if it's available */
+ if (ab->new_regd[pdev_id]) {
+ regd = ab->new_regd[pdev_id];
+ } else {
/* Apply the regd received during init through
* WMI_REG_CHAN_LIST_CC event. In case of failure to
* receive the regd, initialize with a default world
@@ -222,8 +222,6 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
"failed to receive default regd during init\n");
regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
}
- } else {
- regd = ab->new_regd[pdev_id];
}
if (!regd) {
@@ -683,7 +681,7 @@ void ath11k_regd_update_work(struct work_struct *work)
regd_update_work);
int ret;
- ret = ath11k_regd_update(ar, false);
+ ret = ath11k_regd_update(ar);
if (ret) {
/* Firmware has already moved to the new regd. We need
* to maintain channel consistency across FW, Host driver
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 65d56d44796f..5fb9dc03a74e 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -31,6 +31,6 @@ void ath11k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect);
-int ath11k_regd_update(struct ath11k *ar, bool init);
+int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 1afe67759659..ac4da99b5577 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -11,22 +11,20 @@
#define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS 1
#define ATH11K_SPECTRAL_DWORD_SIZE 4
-/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes */
-#define ATH11K_SPECTRAL_BIN_SIZE 4
-#define ATH11K_SPECTRAL_ATH11K_MIN_BINS 64
-#define ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS 32
-#define ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS 256
+#define ATH11K_SPECTRAL_MIN_BINS 32
+#define ATH11K_SPECTRAL_MIN_IB_BINS (ATH11K_SPECTRAL_MIN_BINS >> 1)
+#define ATH11K_SPECTRAL_MAX_IB_BINS(x) ((x)->hw_params.spectral.max_fft_bins >> 1)
#define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095
/* Max channel computed by sum of 2g and 5g band channels */
#define ATH11K_SPECTRAL_TOTAL_CHANNEL 41
#define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL 70
-#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE (sizeof(struct fft_sample_ath11k) + \
- ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS)
+#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) (sizeof(struct fft_sample_ath11k) + \
+ ATH11K_SPECTRAL_MAX_IB_BINS(x))
#define ATH11K_SPECTRAL_TOTAL_SAMPLE (ATH11K_SPECTRAL_TOTAL_CHANNEL * \
ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL)
-#define ATH11K_SPECTRAL_SUB_BUFF_SIZE ATH11K_SPECTRAL_PER_SAMPLE_SIZE
+#define ATH11K_SPECTRAL_SUB_BUFF_SIZE(x) ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x)
#define ATH11K_SPECTRAL_NUM_SUB_BUF ATH11K_SPECTRAL_TOTAL_SAMPLE
#define ATH11K_SPECTRAL_20MHZ 20
@@ -444,8 +442,8 @@ static ssize_t ath11k_write_file_spectral_bins(struct file *file,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- if (val < ATH11K_SPECTRAL_ATH11K_MIN_BINS ||
- val > SPECTRAL_ATH11K_MAX_NUM_BINS)
+ if (val < ATH11K_SPECTRAL_MIN_BINS ||
+ val > ar->ab->hw_params.spectral.max_fft_bins)
return -EINVAL;
if (!is_power_of_2(val))
@@ -581,12 +579,12 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
struct spectral_tlv *tlv;
int tlv_len, bin_len, num_bins;
u16 length, freq;
- u8 chan_width_mhz;
+ u8 chan_width_mhz, bin_sz;
int ret;
lockdep_assert_held(&ar->spectral.lock);
- if (!ab->hw_params.spectral_fft_sz) {
+ if (!ab->hw_params.spectral.fft_sz) {
ath11k_warn(ab, "invalid bin size type for hw rev %d\n",
ab->hw_rev);
return -EINVAL;
@@ -596,7 +594,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header));
/* convert Dword into bytes */
tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
- bin_len = tlv_len - (sizeof(*fft_report) - sizeof(*tlv));
+ bin_len = tlv_len - ab->hw_params.spectral.fft_hdr_len;
if (data_len < (bin_len + sizeof(*fft_report))) {
ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n",
@@ -604,12 +602,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
return -EINVAL;
}
- num_bins = bin_len / ATH11K_SPECTRAL_BIN_SIZE;
+ bin_sz = ab->hw_params.spectral.fft_sz + ab->hw_params.spectral.fft_pad_sz;
+ num_bins = bin_len / bin_sz;
/* Only In-band bins are useful to user for visualize */
num_bins >>= 1;
- if (num_bins < ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS ||
- num_bins > ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS ||
+ if (num_bins < ATH11K_SPECTRAL_MIN_IB_BINS ||
+ num_bins > ATH11K_SPECTRAL_MAX_IB_BINS(ab) ||
!is_power_of_2(num_bins)) {
ath11k_warn(ab, "Invalid num of bins %d\n", num_bins);
return -EINVAL;
@@ -654,7 +653,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
fft_sample->freq2 = __cpu_to_be16(freq);
ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
- ab->hw_params.spectral_fft_sz);
+ ab->hw_params.spectral.fft_sz);
fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index,
search.peak_mag,
@@ -690,7 +689,7 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
goto unlock;
}
- sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS;
+ sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_MAX_IB_BINS(ab);
fft_sample = kmalloc(sample_sz, GFP_ATOMIC);
if (!fft_sample) {
ret = -ENOBUFS;
@@ -738,7 +737,8 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
* is 4 DWORD size (16 bytes).
* Need to remove this workaround once HW bug fixed
*/
- tlv_len = sizeof(*summary) - sizeof(*tlv);
+ tlv_len = sizeof(*summary) - sizeof(*tlv) +
+ ab->hw_params.spectral.summary_pad_sz;
if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) {
ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n",
@@ -901,7 +901,7 @@ static inline int ath11k_spectral_debug_register(struct ath11k *ar)
ar->spectral.rfs_scan = relay_open("spectral_scan",
ar->debug.debugfs_pdev,
- ATH11K_SPECTRAL_SUB_BUFF_SIZE,
+ ATH11K_SPECTRAL_SUB_BUFF_SIZE(ar->ab),
ATH11K_SPECTRAL_NUM_SUB_BUF,
&rfs_scan_cb, NULL);
if (!ar->spectral.rfs_scan) {
@@ -962,7 +962,7 @@ int ath11k_spectral_init(struct ath11k_base *ab)
ab->wmi_ab.svc_map))
return 0;
- if (!ab->hw_params.spectral_fft_sz)
+ if (!ab->hw_params.spectral.fft_sz)
return 0;
for (i = 0; i < ab->num_radios; i++) {
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index d2d2a3cb0826..25d18e9d5b0b 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -79,14 +79,15 @@ TRACE_EVENT(ath11k_htt_ppdu_stats,
);
TRACE_EVENT(ath11k_htt_rxdesc,
- TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+ TP_PROTO(struct ath11k *ar, const void *data, size_t log_type, size_t len),
- TP_ARGS(ar, data, len),
+ TP_ARGS(ar, data, log_type, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__field(u16, len)
+ __field(u16, log_type)
__dynamic_array(u8, rxdesc, len)
),
@@ -94,14 +95,16 @@ TRACE_EVENT(ath11k_htt_rxdesc,
__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
__entry->len = len;
+ __entry->log_type = log_type;
memcpy(__get_dynamic_array(rxdesc), data, len);
),
TP_printk(
- "%s %s rxdesc len %d",
+ "%s %s rxdesc len %d type %d",
__get_str(driver),
__get_str(device),
- __entry->len
+ __entry->len,
+ __entry->log_type
)
);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 6c253eae9d06..2d0acfb748cf 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -360,6 +360,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
+ pdev_cap->nss_ratio_enabled =
+ WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
+ pdev_cap->nss_ratio_info =
+ WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
} else {
return -EINVAL;
}
@@ -403,18 +407,18 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
sizeof(struct ath11k_ppe_threshold));
- }
- cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
- cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
- cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
- cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
- cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
- cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
- memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
- sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
- memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
- sizeof(struct ath11k_ppe_threshold));
+ cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
return 0;
}
@@ -783,14 +787,26 @@ int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
struct wmi_vdev_start_req_arg *arg)
{
+ u32 center_freq1 = arg->channel.band_center_freq1;
+
memset(chan, 0, sizeof(*chan));
chan->mhz = arg->channel.freq;
chan->band_center_freq1 = arg->channel.band_center_freq1;
- if (arg->channel.mode == MODE_11AC_VHT80_80)
+
+ if (arg->channel.mode == MODE_11AX_HE160) {
+ if (arg->channel.freq > arg->channel.band_center_freq1)
+ chan->band_center_freq1 = center_freq1 + 40;
+ else
+ chan->band_center_freq1 = center_freq1 - 40;
+
+ chan->band_center_freq2 = arg->channel.band_center_freq1;
+
+ } else if (arg->channel.mode == MODE_11AC_VHT80_80) {
chan->band_center_freq2 = arg->channel.band_center_freq2;
- else
+ } else {
chan->band_center_freq2 = 0;
+ }
chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
if (arg->channel.passive)
@@ -868,6 +884,8 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
}
cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
ptr = skb->data + sizeof(*cmd);
chan = ptr;
@@ -1339,6 +1357,7 @@ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->req_type = type;
+ cmd->pdev_id = ar->pdev->pdev_id;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI bss chan info req type %d\n", type);
@@ -1903,8 +1922,8 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
FIELD_PREP(WMI_TLV_LEN,
sizeof(*he_mcs) - TLV_HDR_SIZE);
- he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i];
- he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i];
+ he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
+ he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
ptr += sizeof(*he_mcs);
}
@@ -2285,7 +2304,7 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
u32 *reg1, *reg2;
- tchan_info = &chan_list->ch_param[0];
+ tchan_info = chan_list->ch_param;
while (chan_list->nallchans) {
len = sizeof(*cmd) + TLV_HDR_SIZE;
max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
@@ -3495,7 +3514,7 @@ ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
- wmi_cfg->flag1 = tg_cfg->atf_config;
+ wmi_cfg->flag1 = tg_cfg->flag1;
wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
wmi_cfg->sched_params = tg_cfg->sched_params;
wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
@@ -5234,9 +5253,11 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->hw_queued = src->hw_queued;
dst->hw_reaped = src->hw_reaped;
dst->underrun = src->underrun;
+ dst->hw_paused = src->hw_paused;
dst->tx_abort = src->tx_abort;
dst->mpdus_requeued = src->mpdus_requeued;
dst->tx_ko = src->tx_ko;
+ dst->tx_xretry = src->tx_xretry;
dst->data_rc = src->data_rc;
dst->self_triggers = src->self_triggers;
dst->sw_retry_failure = src->sw_retry_failure;
@@ -5247,6 +5268,16 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
dst->phy_underrun = src->phy_underrun;
dst->txop_ovf = src->txop_ovf;
+ dst->seq_posted = src->seq_posted;
+ dst->seq_failed_queueing = src->seq_failed_queueing;
+ dst->seq_completed = src->seq_completed;
+ dst->seq_restarted = src->seq_restarted;
+ dst->mu_seq_posted = src->mu_seq_posted;
+ dst->mpdus_sw_flush = src->mpdus_sw_flush;
+ dst->mpdus_hw_filter = src->mpdus_hw_filter;
+ dst->mpdus_truncated = src->mpdus_truncated;
+ dst->mpdus_ack_failed = src->mpdus_ack_failed;
+ dst->mpdus_expired = src->mpdus_expired;
}
static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
@@ -5266,6 +5297,7 @@ static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
dst->phy_errs = src->phy_errs;
dst->phy_err_drop = src->phy_err_drop;
dst->mpdu_errs = src->mpdu_errs;
+ dst->rx_ovfl_errs = src->rx_ovfl_errs;
}
static void
@@ -5503,11 +5535,15 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Num underruns", pdev->underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num HW Paused", pdev->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs requeued", pdev->mpdus_requeued);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "Excessive retries", pdev->tx_ko);
+ "PPDU OK", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_xretry);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"HW rate", pdev->data_rc);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
@@ -5531,6 +5567,26 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
"PHY underrun", pdev->phy_underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"MPDU is more than txop limit", pdev->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences posted", pdev->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num seq failed queueing ", pdev->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences completed ", pdev->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences restarted ", pdev->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MU sequences posted ", pdev->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS truncated ", pdev->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS expired ", pdev->mpdus_expired);
*length = len;
}
@@ -5575,6 +5631,8 @@ ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
"PHY errors drops", pdev->phy_err_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Overflow errors", pdev->rx_ovfl_errs);
*length = len;
}
@@ -5792,6 +5850,17 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
pdev_idx = reg_info->phy_id;
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock(&ab->base_lock);
+ goto mem_free;
+ }
+ spin_unlock(&ab->base_lock);
+
if (pdev_idx >= ab->num_radios) {
/* Process the event for phy0 only if single_pdev_only
* is true. If pdev_idx is valid but not 0, discard the
@@ -5829,10 +5898,10 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
}
spin_lock(&ab->base_lock);
- if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
- /* Once mac is registered, ar is valid and all CC events from
- * fw is considered to be received due to user requests
- * currently.
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
* Free previously built regd before assigning the newly
* generated regd to ar. NULL pointer handling will be
* taken care by kfree itself.
@@ -5842,13 +5911,9 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
ab->new_regd[pdev_idx] = regd;
ieee80211_queue_work(ar->hw, &ar->regd_update_work);
} else {
- /* Multiple events for the same *ar is not expected. But we
- * can still clear any previously stored default_regd if we
- * are receiving this event for the same radio by mistake.
- * NULL pointer handling will be taken care by kfree itself.
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
*/
- kfree(ab->default_regd[pdev_idx]);
- /* This regd would be applied during mac registration */
ab->default_regd[pdev_idx] = regd;
}
ab->dfs_region = reg_info->dfs_region;
@@ -6119,8 +6184,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
@@ -6141,8 +6208,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
sband = &ar->mac.sbands[status->band];
- status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
- status->band);
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
@@ -6220,8 +6289,9 @@ exit:
rcu_read_unlock();
}
-static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab,
- u32 vdev_id)
+static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
+ u32 vdev_id,
+ enum ath11k_scan_state state)
{
int i;
struct ath11k_pdev *pdev;
@@ -6233,7 +6303,7 @@ static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab,
ar = pdev->ar;
spin_lock_bh(&ar->data_lock);
- if (ar->scan.state == ATH11K_SCAN_ABORTING &&
+ if (ar->scan.state == state &&
ar->scan.vdev_id == vdev_id) {
spin_unlock_bh(&ar->data_lock);
return ar;
@@ -6263,10 +6333,15 @@ static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
* aborting scan's vdev id matches this event info.
*/
if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
- scan_ev.reason == WMI_SCAN_REASON_CANCELLED)
- ar = ath11k_get_ar_on_scan_abort(ab, scan_ev.vdev_id);
- else
+ scan_ev.reason == WMI_SCAN_REASON_CANCELLED) {
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_ABORTING);
+ if (!ar)
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_RUNNING);
+ } else {
ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
+ }
if (!ar) {
ath11k_warn(ab, "Received scan event for unknown vdev");
@@ -6301,6 +6376,8 @@ static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
ath11k_wmi_event_scan_start_failed(ar);
break;
case WMI_SCAN_EVENT_DEQUEUED:
+ __ath11k_mac_scan_finish(ar);
+ break;
case WMI_SCAN_EVENT_PREEMPTED:
case WMI_SCAN_EVENT_RESTARTED:
case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
@@ -7065,6 +7142,7 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+ case WMI_PEER_CREATE_CONF_EVENTID:
ath11k_dbg(ab, ATH11K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);
break;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index d35c47e0b19d..0584e68e7593 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -119,6 +119,22 @@ enum {
WMI_HOST_WLAN_2G_5G_CAP = 0x3,
};
+/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
+ * Used only for HE auto rate mode.
+ */
+enum {
+ /* HE LTF related configuration */
+ WMI_HE_AUTORATE_LTF_1X = BIT(0),
+ WMI_HE_AUTORATE_LTF_2X = BIT(1),
+ WMI_HE_AUTORATE_LTF_4X = BIT(2),
+
+ /* HE GI related configuration */
+ WMI_AUTORATE_400NS_GI = BIT(8),
+ WMI_AUTORATE_800NS_GI = BIT(9),
+ WMI_AUTORATE_1600NS_GI = BIT(10),
+ WMI_AUTORATE_3200NS_GI = BIT(11),
+};
+
/*
* wmi command groups.
*/
@@ -647,6 +663,9 @@ enum wmi_tlv_event_id {
WMI_PEER_RESERVED9_EVENTID,
WMI_PEER_RESERVED10_EVENTID,
WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_PEER_TX_PN_RESPONSE_EVENTID,
+ WMI_PEER_CFR_CAPTURE_EVENTID,
+ WMI_PEER_CREATE_CONF_EVENTID,
WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
WMI_HOST_SWBA_EVENTID,
WMI_TBTTOFFSET_UPDATE_EVENTID,
@@ -1044,7 +1063,9 @@ enum wmi_tlv_vdev_param {
WMI_VDEV_PARAM_HE_RANGE_EXT,
WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_HE_LTF = 0x74,
WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
@@ -2128,6 +2149,24 @@ enum wmi_direct_buffer_module {
WMI_DIRECT_BUF_MAX
};
+/* enum wmi_nss_ratio - NSS ratio received from FW during service ready ext
+ * event
+ * WMI_NSS_RATIO_1BY2_NSS -Max nss of 160MHz is equals to half of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_3BY4_NSS - Max nss of 160MHz is equals to 3/4 of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_1_NSS - Max nss of 160MHz is equals to the max nss of 80MHz
+ * WMI_NSS_RATIO_2_NSS - Max nss of 160MHz is equals to two times the max
+ * nss of 80MHz
+ */
+
+enum wmi_nss_ratio {
+ WMI_NSS_RATIO_1BY2_NSS = 0x0,
+ WMI_NSS_RATIO_3BY4_NSS = 0x1,
+ WMI_NSS_RATIO_1_NSS = 0x2,
+ WMI_NSS_RATIO_2_NSS = 0x3,
+};
+
struct wmi_host_pdev_band_to_mac {
u32 pdev_id;
u32 start_freq;
@@ -2244,6 +2283,8 @@ struct wmi_init_cmd {
u32 num_host_mem_chunks;
} __packed;
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+
struct wmi_resource_config {
u32 tlv_header;
u32 num_vdevs;
@@ -2370,6 +2411,12 @@ struct wmi_hw_mode_capabilities {
} __packed;
#define WMI_MAX_HECAP_PHY_SIZE (3)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS BIT(0)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS, _val)
+#define WMI_NSS_RATIO_INFO_BITPOS GENMASK(4, 1)
+#define WMI_NSS_RATIO_INFO_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_INFO_BITPOS, _val)
struct wmi_mac_phy_capabilities {
u32 hw_mode_id;
@@ -2403,6 +2450,12 @@ struct wmi_mac_phy_capabilities {
u32 he_cap_info_2g_ext;
u32 he_cap_info_5g_ext;
u32 he_cap_info_internal;
+ u32 wireless_modes;
+ u32 low_2ghz_chan_freq;
+ u32 high_2ghz_chan_freq;
+ u32 low_5ghz_chan_freq;
+ u32 high_5ghz_chan_freq;
+ u32 nss_ratio;
} __packed;
struct wmi_hal_reg_capabilities_ext {
@@ -2527,6 +2580,7 @@ struct wmi_vdev_down_cmd {
#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
#define WMI_VDEV_START_PMF_ENABLED BIT(1)
#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+#define WMI_VDEV_START_HW_ENCRYPTION_DISABLED BIT(4)
struct wmi_ssid {
u32 ssid_len;
@@ -2960,6 +3014,7 @@ struct wmi_pdev_bss_chan_info_req_cmd {
u32 tlv_header;
/* ref wmi_bss_chan_info_req_type */
u32 req_type;
+ u32 pdev_id;
} __packed;
struct wmi_ap_ps_peer_cmd {
@@ -3608,7 +3663,7 @@ struct wmi_stop_scan_cmd {
struct scan_chan_list_params {
u32 pdev_id;
u16 nallchans;
- struct channel_param ch_param[1];
+ struct channel_param ch_param[];
};
struct wmi_scan_chan_list_cmd {
@@ -3917,7 +3972,11 @@ struct wmi_vht_rate_set {
struct wmi_he_rate_set {
u32 tlv_header;
+
+ /* MCS at which the peer can receive */
u32 rx_mcs_set;
+
+ /* MCS at which the peer can transmit */
u32 tx_mcs_set;
} __packed;
@@ -4056,7 +4115,6 @@ struct wmi_vdev_stopped_event {
} __packed;
struct wmi_pdev_bss_chan_info_event {
- u32 pdev_id;
u32 freq; /* Units in MHz */
u32 noise_floor; /* units are dBm */
/* rx clear - how often the channel was unused */
@@ -4074,6 +4132,7 @@ struct wmi_pdev_bss_chan_info_event {
/*rx_cycle cnt for my bss in 64bits format */
u32 rx_bss_cycle_count_low;
u32 rx_bss_cycle_count_high;
+ u32 pdev_id;
} __packed;
#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
@@ -4168,6 +4227,9 @@ struct wmi_pdev_stats_tx {
/* Num underruns */
s32 underrun;
+ /* Num hw paused */
+ u32 hw_paused;
+
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
@@ -4177,6 +4239,8 @@ struct wmi_pdev_stats_tx {
/* excessive retries */
u32 tx_ko;
+ u32 tx_xretry;
+
/* data hw rate code */
u32 data_rc;
@@ -4206,6 +4270,40 @@ struct wmi_pdev_stats_tx {
/* MPDU is more than txop limit */
u32 txop_ovf;
+
+ /* Num sequences posted */
+ u32 seq_posted;
+
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+
+ /* Num sequences completed */
+ u32 seq_completed;
+
+ /* Num sequences restarted */
+ u32 seq_restarted;
+
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
} __packed;
struct wmi_pdev_stats_rx {
@@ -4240,6 +4338,9 @@ struct wmi_pdev_stats_rx {
/* Number of mpdu errors - FCS, MIC, ENC etc. */
s32 mpdu_errs;
+
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
} __packed;
struct wmi_pdev_stats {
@@ -5014,7 +5115,7 @@ struct target_resource_config {
u32 vo_minfree;
u32 rx_batchmode;
u32 tt_support;
- u32 atf_config;
+ u32 flag1;
u32 iphdr_pad_config;
u32 qwrap_config:16,
alloc_frag_desc_for_data_pkt:16;
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index f35cd8de228e..6914b37bb0fb 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -3,9 +3,7 @@ config ATH5K
tristate "Atheros 5xxx wireless cards support"
depends on (PCI || ATH25) && MAC80211
select ATH_COMMON
- select MAC80211_LEDS
- select LEDS_CLASS
- select NEW_LEDS
+ select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
select ATH5K_AHB if ATH25
select ATH5K_PCI if !ATH25
help
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 6a2a16856763..33e9928af363 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -89,7 +89,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
void ath5k_led_enable(struct ath5k_hw *ah)
{
- if (test_bit(ATH_STAT_LEDSOFT, ah->status)) {
+ if (IS_ENABLED(CONFIG_MAC80211_LEDS) &&
+ test_bit(ATH_STAT_LEDSOFT, ah->status)) {
ath5k_hw_set_gpio_output(ah, ah->led_pin);
ath5k_led_off(ah);
}
@@ -104,7 +105,8 @@ static void ath5k_led_on(struct ath5k_hw *ah)
void ath5k_led_off(struct ath5k_hw *ah)
{
- if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
+ if (!IS_ENABLED(CONFIG_MAC80211_LEDS) ||
+ !test_bit(ATH_STAT_LEDSOFT, ah->status))
return;
ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
}
@@ -146,7 +148,7 @@ ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
static void
ath5k_unregister_led(struct ath5k_led *led)
{
- if (!led->ah)
+ if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !led->ah)
return;
led_classdev_unregister(&led->led_dev);
ath5k_led_off(led->ah);
@@ -169,7 +171,7 @@ int ath5k_init_leds(struct ath5k_hw *ah)
char name[ATH5K_LED_MAX_NAME_LEN + 1];
const struct pci_device_id *match;
- if (!ah->pdev)
+ if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !ah->pdev)
return 0;
#ifdef CONFIG_ATH5K_AHB
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index 56d1a7764b9f..708c8969b503 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -19,9 +19,14 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/ath9k_platform.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/workqueue.h>
struct owl_ctx {
+ struct pci_dev *pdev;
struct completion eeprom_load;
+ struct work_struct work;
+ struct nvmem_cell *cell;
};
#define EEPROM_FILENAME_LEN 100
@@ -42,6 +47,12 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
u32 bar0;
bool swap_needed = false;
+ /* also note that we are doing *u16 operations on the file */
+ if (cal_len > 4096 || cal_len < 0x200 || (cal_len & 1) == 1) {
+ dev_err(&pdev->dev, "eeprom has an invalid size.\n");
+ return -EINVAL;
+ }
+
if (*cal_data != AR5416_EEPROM_MAGIC) {
if (*cal_data != swab16(AR5416_EEPROM_MAGIC)) {
dev_err(&pdev->dev, "invalid calibration data\n");
@@ -99,38 +110,31 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
return 0;
}
-static void owl_fw_cb(const struct firmware *fw, void *context)
+static void owl_rescan(struct pci_dev *pdev)
{
- struct pci_dev *pdev = (struct pci_dev *)context;
- struct owl_ctx *ctx = (struct owl_ctx *)pci_get_drvdata(pdev);
- struct pci_bus *bus;
-
- complete(&ctx->eeprom_load);
-
- if (!fw) {
- dev_err(&pdev->dev, "no eeprom data received.\n");
- goto release;
- }
-
- /* also note that we are doing *u16 operations on the file */
- if (fw->size > 4096 || fw->size < 0x200 || (fw->size & 1) == 1) {
- dev_err(&pdev->dev, "eeprom file has an invalid size.\n");
- goto release;
- }
-
- if (ath9k_pci_fixup(pdev, (const u16 *)fw->data, fw->size))
- goto release;
+ struct pci_bus *bus = pdev->bus;
pci_lock_rescan_remove();
- bus = pdev->bus;
pci_stop_and_remove_bus_device(pdev);
/* the device should come back with the proper
* ProductId. But we have to initiate a rescan.
*/
pci_rescan_bus(bus);
pci_unlock_rescan_remove();
+}
+
+static void owl_fw_cb(const struct firmware *fw, void *context)
+{
+ struct owl_ctx *ctx = (struct owl_ctx *)context;
+
+ complete(&ctx->eeprom_load);
-release:
+ if (fw) {
+ ath9k_pci_fixup(ctx->pdev, (const u16 *)fw->data, fw->size);
+ owl_rescan(ctx->pdev);
+ } else {
+ dev_err(&ctx->pdev->dev, "no eeprom data received.\n");
+ }
release_firmware(fw);
}
@@ -152,6 +156,43 @@ static const char *owl_get_eeprom_name(struct pci_dev *pdev)
return eeprom_name;
}
+static void owl_nvmem_work(struct work_struct *work)
+{
+ struct owl_ctx *ctx = container_of(work, struct owl_ctx, work);
+ void *buf;
+ size_t len;
+
+ complete(&ctx->eeprom_load);
+
+ buf = nvmem_cell_read(ctx->cell, &len);
+ if (!IS_ERR(buf)) {
+ ath9k_pci_fixup(ctx->pdev, buf, len);
+ kfree(buf);
+ owl_rescan(ctx->pdev);
+ } else {
+ dev_err(&ctx->pdev->dev, "no nvmem data received.\n");
+ }
+}
+
+static int owl_nvmem_probe(struct owl_ctx *ctx)
+{
+ int err;
+
+ ctx->cell = devm_nvmem_cell_get(&ctx->pdev->dev, "calibration");
+ if (IS_ERR(ctx->cell)) {
+ err = PTR_ERR(ctx->cell);
+ if (err == -ENOENT || err == -EOPNOTSUPP)
+ return 1; /* not present, try firmware_request */
+
+ return err;
+ }
+
+ INIT_WORK(&ctx->work, owl_nvmem_work);
+ schedule_work(&ctx->work);
+
+ return 0;
+}
+
static int owl_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -164,21 +205,27 @@ static int owl_probe(struct pci_dev *pdev,
pcim_pin_device(pdev);
- eeprom_name = owl_get_eeprom_name(pdev);
- if (!eeprom_name) {
- dev_err(&pdev->dev, "no eeprom filename found.\n");
- return -ENODEV;
- }
-
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
init_completion(&ctx->eeprom_load);
+ ctx->pdev = pdev;
pci_set_drvdata(pdev, ctx);
+
+ err = owl_nvmem_probe(ctx);
+ if (err <= 0)
+ return err;
+
+ eeprom_name = owl_get_eeprom_name(pdev);
+ if (!eeprom_name) {
+ dev_err(&pdev->dev, "no eeprom filename found.\n");
+ return -ENODEV;
+ }
+
err = request_firmware_nowait(THIS_MODULE, true, eeprom_name,
- &pdev->dev, GFP_KERNEL, pdev, owl_fw_cb);
+ &pdev->dev, GFP_KERNEL, ctx, owl_fw_cb);
if (err)
dev_err(&pdev->dev, "failed to request caldata (%d).\n", err);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index c22d457dbc54..e6b3cd49ea18 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -135,13 +135,23 @@ static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob,
offset, data);
}
+static bool ath9k_hw_nvram_read_nvmem(struct ath_hw *ah, off_t offset,
+ u16 *data)
+{
+ return ath9k_hw_nvram_read_array(ah->nvmem_blob,
+ ah->nvmem_blob_len / sizeof(u16),
+ offset, data);
+}
+
bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_platform_data *pdata = ah->dev->platform_data;
bool ret;
- if (ah->eeprom_blob)
+ if (ah->nvmem_blob)
+ ret = ath9k_hw_nvram_read_nvmem(ah, off, data);
+ else if (ah->eeprom_blob)
ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data);
else if (pdata && !pdata->use_eeprom)
ret = ath9k_hw_nvram_read_pdata(pdata, off, data);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b7b65b1c90e8..096a206f49ed 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -977,6 +977,8 @@ struct ath_hw {
bool disable_5ghz;
const struct firmware *eeprom_blob;
+ u16 *nvmem_blob; /* devres managed */
+ size_t nvmem_blob_len;
struct ath_dynack dynack;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index e9a36dd7144f..1568730fc01e 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/nvmem-consumer.h>
#include <linux/relay.h>
#include <linux/dmi.h>
#include <net/ieee80211_radiotap.h>
@@ -568,6 +569,57 @@ static void ath9k_eeprom_release(struct ath_softc *sc)
release_firmware(sc->sc_ah->eeprom_blob);
}
+static int ath9k_nvmem_request_eeprom(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+ int err;
+
+ cell = devm_nvmem_cell_get(sc->dev, "calibration");
+ if (IS_ERR(cell)) {
+ err = PTR_ERR(cell);
+
+ /* nvmem cell might not be defined, or the nvmem
+ * subsystem isn't included. In this case, follow
+ * the established "just return 0;" convention of
+ * ath9k_init_platform to say:
+ * "All good. Nothing to see here. Please go on."
+ */
+ if (err == -ENOENT || err == -EOPNOTSUPP)
+ return 0;
+
+ return err;
+ }
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* run basic sanity checks on the returned nvram cell length.
+ * That length has to be a multiple of a "u16" (i.e.: & 1).
+ * Furthermore, it has to be more than "let's say" 512 bytes
+ * but less than the maximum of AR9300_EEPROM_SIZE (16kb).
+ */
+ if ((len & 1) == 1 || len < 512 || len >= AR9300_EEPROM_SIZE) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ /* devres manages the calibration values release on shutdown */
+ ah->nvmem_blob = (u16 *)devm_kmemdup(sc->dev, buf, len, GFP_KERNEL);
+ kfree(buf);
+ if (IS_ERR(ah->nvmem_blob))
+ return PTR_ERR(ah->nvmem_blob);
+
+ ah->nvmem_blob_len = len;
+ ah->ah_flags &= ~AH_USE_EEPROM;
+ ah->ah_flags |= AH_NO_EEP_SWAP;
+
+ return 0;
+}
+
static int ath9k_init_platform(struct ath_softc *sc)
{
struct ath9k_platform_data *pdata = sc->dev->platform_data;
@@ -704,6 +756,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
return ret;
+ ret = ath9k_nvmem_request_eeprom(sc);
+ if (ret)
+ return ret;
+
if (ath9k_led_active_high != -1)
ah->config.led_active_high = ath9k_led_active_high == 1;
diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h
index 9c2e5458e425..e14f374f97d4 100644
--- a/drivers/net/wireless/ath/spectral_common.h
+++ b/drivers/net/wireless/ath/spectral_common.h
@@ -24,7 +24,6 @@
* could be acquired so far.
*/
#define SPECTRAL_ATH10K_MAX_NUM_BINS 256
-#define SPECTRAL_ATH11K_MAX_NUM_BINS 512
/* FFT sample format given to userspace via debugfs.
*
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index 389b5e7129a6..6af306ae41ad 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -120,7 +120,7 @@ static ssize_t write_file_dump(struct file *file,
if (begin == NULL)
break;
- if (kstrtou32(begin, 0, &arg[i]) != 0)
+ if (kstrtos32(begin, 0, &arg[i]) != 0)
break;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 455143c4164e..5f1f2480459a 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -3384,11 +3384,11 @@ struct tl_hal_flush_ac_rsp_msg {
struct wcn36xx_hal_enter_imps_req_msg {
struct wcn36xx_hal_msg_header header;
-};
+} __packed;
-struct wcn36xx_hal_exit_imps_req {
+struct wcn36xx_hal_exit_imps_req_msg {
struct wcn36xx_hal_msg_header header;
-};
+} __packed;
struct wcn36xx_hal_enter_bmps_req_msg {
struct wcn36xx_hal_msg_header header;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index ec913ec991f3..263af65a889a 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -432,6 +432,13 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_PS)
wcn36xx_change_ps(wcn, hw->conf.flags & IEEE80211_CONF_PS);
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (hw->conf.flags & IEEE80211_CONF_IDLE)
+ wcn36xx_smd_enter_imps(wcn);
+ else
+ wcn36xx_smd_exit_imps(wcn);
+ }
+
mutex_unlock(&wcn->conf_mutex);
return 0;
@@ -569,12 +576,14 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
sta_priv->is_data_encrypted = true;
/* Reconfigure bss with encrypt_type */
- if (NL80211_IFTYPE_STATION == vif->type)
+ if (NL80211_IFTYPE_STATION == vif->type) {
wcn36xx_smd_config_bss(wcn,
vif,
sta,
sta->addr,
true);
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ }
wcn36xx_smd_set_stakey(wcn,
vif_priv->encrypt_type,
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 57fa857b290b..3979171c92dd 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -2184,6 +2184,59 @@ out:
return ret;
}
+int wcn36xx_smd_enter_imps(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_enter_imps_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_IMPS_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_enter_imps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_enter_imps response failed err=%d\n", ret);
+ goto out;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "Entered idle mode\n");
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_exit_imps(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_exit_imps_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_IMPS_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_imps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_imps response failed err=%d\n", ret);
+ goto out;
+ }
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "Exited idle mode\n");
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
@@ -2623,30 +2676,52 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
size_t len)
{
struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
- struct wcn36xx_vif *tmp;
+ struct wcn36xx_vif *vif_priv;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *bss_conf;
struct ieee80211_sta *sta;
+ bool found = false;
if (len != sizeof(*rsp)) {
wcn36xx_warn("Corrupted delete sta indication\n");
return -EIO;
}
- wcn36xx_dbg(WCN36XX_DBG_HAL, "delete station indication %pM index %d\n",
- rsp->addr2, rsp->sta_id);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "delete station indication %pM index %d reason %d\n",
+ rsp->addr2, rsp->sta_id, rsp->reason_code);
- list_for_each_entry(tmp, &wcn->vif_list, list) {
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
rcu_read_lock();
- sta = ieee80211_find_sta(wcn36xx_priv_to_vif(tmp), rsp->addr2);
- if (sta)
- ieee80211_report_low_ack(sta, 0);
+ vif = wcn36xx_priv_to_vif(vif_priv);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ /* We could call ieee80211_find_sta too, but checking
+ * bss_conf is clearer.
+ */
+ bss_conf = &vif->bss_conf;
+ if (vif_priv->sta_assoc &&
+ !memcmp(bss_conf->bssid, rsp->addr2, ETH_ALEN)) {
+ found = true;
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "connection loss bss_index %d\n",
+ vif_priv->bss_index);
+ ieee80211_connection_loss(vif);
+ }
+ } else {
+ sta = ieee80211_find_sta(vif, rsp->addr2);
+ if (sta) {
+ found = true;
+ ieee80211_report_low_ack(sta, 0);
+ }
+ }
+
rcu_read_unlock();
- if (sta)
+ if (found)
return 0;
}
- wcn36xx_warn("STA with addr %pM and index %d not found\n",
- rsp->addr2,
- rsp->sta_id);
+ wcn36xx_warn("BSS or STA with addr %pM not found\n", rsp->addr2);
return -ENOENT;
}
@@ -3060,6 +3135,8 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_GTK_OFFLOAD_RSP:
case WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP:
case WCN36XX_HAL_HOST_RESUME_RSP:
+ case WCN36XX_HAL_ENTER_IMPS_RSP:
+ case WCN36XX_HAL_EXIT_IMPS_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index d8bded03945d..5f98c1d01ae4 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -163,4 +163,7 @@ int wcn36xx_smd_wlan_host_suspend_ind(struct wcn36xx *wcn);
int wcn36xx_smd_host_resume(struct wcn36xx *wcn);
+int wcn36xx_smd_enter_imps(struct wcn36xx *wcn);
+int wcn36xx_smd_exit_imps(struct wcn36xx *wcn);
+
#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index f7b96cd69242..fb727778312c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1783,8 +1783,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
val = WPA_AUTH_PSK;
break;
default:
- bphy_err(drvr, "invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid akm suite (%d)\n",
+ sme->crypto.akm_suites[0]);
return -EINVAL;
}
} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
@@ -1816,8 +1816,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
profile->is_ft = true;
break;
default:
- bphy_err(drvr, "invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid akm suite (%d)\n",
+ sme->crypto.akm_suites[0]);
return -EINVAL;
}
} else if (val & WPA3_AUTH_SAE_PSK) {
@@ -1838,8 +1838,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
}
break;
default:
- bphy_err(drvr, "invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid akm suite (%d)\n",
+ sme->crypto.akm_suites[0]);
return -EINVAL;
}
}
@@ -7463,23 +7463,18 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
s32 found_index;
int i;
+ country_codes = drvr->settings->country_codes;
+ if (!country_codes) {
+ brcmf_dbg(TRACE, "No country codes configured for device\n");
+ return -EINVAL;
+ }
+
if ((alpha2[0] == ccreq->country_abbrev[0]) &&
(alpha2[1] == ccreq->country_abbrev[1])) {
brcmf_dbg(TRACE, "Country code already set\n");
return -EAGAIN;
}
- country_codes = drvr->settings->country_codes;
- if (!country_codes) {
- brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n");
- memset(ccreq, 0, sizeof(*ccreq));
- ccreq->country_abbrev[0] = alpha2[0];
- ccreq->country_abbrev[1] = alpha2[1];
- ccreq->ccode[0] = alpha2[0];
- ccreq->ccode[1] = alpha2[1];
- return 0;
- }
-
found_index = -1;
for (i = 0; i < country_codes->table_size; i++) {
cc = &country_codes->table[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 6d5188b78f2d..0af452dca766 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -76,6 +76,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* Cyberbook T116 rugged tablet */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "20170531"),
+ },
+ /* The factory image nvram file is identical to the ACEPC T8 one */
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Match for the GPDwin which unfortunately uses somewhat
* generic dmi strings, which is why we test for 4 strings.
* Comparing against 23 other byt/cht boards, board_vendor
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 2f7bc3a70c65..513c7e6421b2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -29,7 +29,7 @@ static int brcmf_of_get_country_codes(struct device *dev,
return (count == -EINVAL) ? 0 : count;
}
- cc = devm_kzalloc(dev, sizeof(*cc) + count * sizeof(*cce), GFP_KERNEL);
+ cc = devm_kzalloc(dev, struct_size(cc, table, count), GFP_KERNEL);
if (!cc)
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ada6ce32c1f1..9a99f482c84a 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3777,7 +3777,7 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
&q->q.dma_addr, GFP_KERNEL);
if (!q->bd) {
- IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
+ IPW_ERROR("dma_alloc_coherent(%zd) failed\n",
sizeof(q->bd[0]) * count);
kfree(q->txb);
q->txb = NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 0e97d5e6c644..9f706fffb592 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -160,6 +160,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
mvm->ptk_icvlen = key->icv_len;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
+ mutex_unlock(&mvm->mutex);
/* don't upload key again */
return;
@@ -360,11 +361,11 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
if (sta) {
rsc = data->rsc->ucast_rsc;
} else {
- if (WARN_ON(data->gtks > ARRAY_SIZE(data->gtk_ids)))
+ if (WARN_ON(data->gtks >= ARRAY_SIZE(data->gtk_ids)))
return;
data->gtk_ids[data->gtks] = key->keyidx;
rsc = data->rsc->mcast_rsc[data->gtks % 2];
- if (WARN_ON(key->keyidx >
+ if (WARN_ON(key->keyidx >=
ARRAY_SIZE(data->rsc->mcast_key_id_map)))
return;
data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 25af88a3edce..e91f8e889df7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -662,12 +662,13 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
u32 *uid)
{
u32 id;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
if (!te_data->vif)
return false;
+ mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
iftype = te_data->vif->type;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 61b2797a34a8..e3996ff99bad 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -547,6 +547,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
+ IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index dba10d5a9bfd..23219f3747f8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1867,8 +1867,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
bcn_int -= data->bcn_delta;
data->bcn_delta = 0;
}
- hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
- ns_to_ktime(bcn_int * NSEC_PER_USEC));
+ hrtimer_forward_now(&data->beacon_timer,
+ ns_to_ktime(bcn_int * NSEC_PER_USEC));
return HRTIMER_RESTART;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 0961f4a5e415..d62a20de3ada 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -908,16 +908,20 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
- priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_type = MWIFIEX_BSS_TYPE_STA;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
case NL80211_IFTYPE_P2P_GO:
- priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
case NL80211_IFTYPE_AP:
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
break;
default:
mwifiex_dbg(adapter, ERROR,
@@ -939,6 +943,117 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
return 0;
}
+static bool
+is_vif_type_change_allowed(struct mwifiex_adapter *adapter,
+ enum nl80211_iftype old_iftype,
+ enum nl80211_iftype new_iftype)
+{
+ switch (old_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_STATION:
+ return true;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return adapter->curr_iface_comb.p2p_intf !=
+ adapter->iface_limit.p2p_intf;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_STATION:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ return true;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return adapter->curr_iface_comb.p2p_intf !=
+ adapter->iface_limit.p2p_intf;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_AP:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return adapter->curr_iface_comb.sta_intf !=
+ adapter->iface_limit.sta_intf;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return adapter->curr_iface_comb.p2p_intf !=
+ adapter->iface_limit.p2p_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ case NL80211_IFTYPE_P2P_GO:
+ return true;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_P2P_GO:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return true;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void
+update_vif_type_counter(struct mwifiex_adapter *adapter,
+ enum nl80211_iftype iftype,
+ int change)
+{
+ switch (iftype) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ adapter->curr_iface_comb.sta_intf += change;
+ break;
+ case NL80211_IFTYPE_AP:
+ adapter->curr_iface_comb.uap_intf += change;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ adapter->curr_iface_comb.p2p_intf += change;
+ break;
+ default:
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Unsupported iftype passed: %d\n",
+ __func__, iftype);
+ break;
+ }
+}
+
static int
mwifiex_change_vif_to_p2p(struct net_device *dev,
enum nl80211_iftype curr_iftype,
@@ -955,13 +1070,6 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
adapter = priv->adapter;
- if (adapter->curr_iface_comb.p2p_intf ==
- adapter->iface_limit.p2p_intf) {
- mwifiex_dbg(adapter, ERROR,
- "cannot create multiple P2P ifaces\n");
- return -1;
- }
-
mwifiex_dbg(adapter, INFO,
"%s: changing role to p2p\n", dev->name);
@@ -970,6 +1078,10 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
+ update_vif_type_counter(adapter, curr_iftype, -1);
+ update_vif_type_counter(adapter, type, +1);
+ dev->ieee80211_ptr->iftype = type;
+
switch (type) {
case NL80211_IFTYPE_P2P_CLIENT:
if (mwifiex_cfg80211_init_p2p_client(priv))
@@ -993,21 +1105,6 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
- switch (curr_iftype) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf--;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf--;
- break;
- default:
- break;
- }
-
- adapter->curr_iface_comb.p2p_intf++;
- dev->ieee80211_ptr->iftype = type;
-
return 0;
}
@@ -1027,15 +1124,6 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
adapter = priv->adapter;
- if ((curr_iftype != NL80211_IFTYPE_P2P_CLIENT &&
- curr_iftype != NL80211_IFTYPE_P2P_GO) &&
- (adapter->curr_iface_comb.sta_intf ==
- adapter->iface_limit.sta_intf)) {
- mwifiex_dbg(adapter, ERROR,
- "cannot create multiple station/adhoc ifaces\n");
- return -1;
- }
-
if (type == NL80211_IFTYPE_STATION)
mwifiex_dbg(adapter, INFO,
"%s: changing role to station\n", dev->name);
@@ -1047,26 +1135,17 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
return -1;
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
+
+ update_vif_type_counter(adapter, curr_iftype, -1);
+ update_vif_type_counter(adapter, type, +1);
+ dev->ieee80211_ptr->iftype = type;
+
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
- switch (curr_iftype) {
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- adapter->curr_iface_comb.p2p_intf--;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf--;
- break;
- default:
- break;
- }
-
- adapter->curr_iface_comb.sta_intf++;
- dev->ieee80211_ptr->iftype = type;
return 0;
}
@@ -1086,13 +1165,6 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
adapter = priv->adapter;
- if (adapter->curr_iface_comb.uap_intf ==
- adapter->iface_limit.uap_intf) {
- mwifiex_dbg(adapter, ERROR,
- "cannot create multiple AP ifaces\n");
- return -1;
- }
-
mwifiex_dbg(adapter, INFO,
"%s: changing role to AP\n", dev->name);
@@ -1100,27 +1172,17 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
return -1;
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
+
+ update_vif_type_counter(adapter, curr_iftype, -1);
+ update_vif_type_counter(adapter, type, +1);
+ dev->ieee80211_ptr->iftype = type;
+
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
- switch (curr_iftype) {
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- adapter->curr_iface_comb.p2p_intf--;
- break;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf--;
- break;
- default:
- break;
- }
-
- adapter->curr_iface_comb.uap_intf++;
- dev->ieee80211_ptr->iftype = type;
return 0;
}
/*
@@ -1141,6 +1203,27 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return -EBUSY;
}
+ if (type == NL80211_IFTYPE_UNSPECIFIED) {
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: no new type specified, keeping old type %d\n",
+ dev->name, curr_iftype);
+ return 0;
+ }
+
+ if (curr_iftype == type) {
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: interface already is of type %d\n",
+ dev->name, curr_iftype);
+ return 0;
+ }
+
+ if (!is_vif_type_change_allowed(priv->adapter, curr_iftype, type)) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: change from type %d to %d is not allowed\n",
+ dev->name, curr_iftype, type);
+ return -EOPNOTSUPP;
+ }
+
switch (curr_iftype) {
case NL80211_IFTYPE_ADHOC:
switch (type) {
@@ -1160,19 +1243,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as IBSS\n", dev->name);
- fallthrough;
- case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
- return 0;
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
case NL80211_IFTYPE_STATION:
switch (type) {
case NL80211_IFTYPE_ADHOC:
@@ -1191,22 +1265,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as STA\n", dev->name);
- fallthrough;
- case NL80211_IFTYPE_STATION: /* This shouldn't happen */
- return 0;
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
case NL80211_IFTYPE_AP:
switch (type) {
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
type, params);
break;
@@ -1214,69 +1280,60 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_GO:
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
type, params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as AP\n", dev->name);
- fallthrough;
- case NL80211_IFTYPE_AP: /* This shouldn't happen */
- return 0;
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -EFAULT;
+
switch (type) {
- case NL80211_IFTYPE_STATION:
- if (mwifiex_cfg80211_deinit_p2p(priv))
- return -EFAULT;
- priv->adapter->curr_iface_comb.p2p_intf--;
- priv->adapter->curr_iface_comb.sta_intf++;
- dev->ieee80211_ptr->iftype = type;
- if (mwifiex_deinit_priv_params(priv))
- return -1;
- if (mwifiex_init_new_priv_params(priv, dev, type))
- return -1;
- if (mwifiex_sta_init_cmd(priv, false, false))
- return -1;
- break;
case NL80211_IFTYPE_ADHOC:
- if (mwifiex_cfg80211_deinit_p2p(priv))
- return -EFAULT;
+ case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
type, params);
- break;
+ case NL80211_IFTYPE_P2P_GO:
+ return mwifiex_change_vif_to_p2p(dev, curr_iftype,
+ type, params);
case NL80211_IFTYPE_AP:
- if (mwifiex_cfg80211_deinit_p2p(priv))
- return -EFAULT;
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as P2P\n", dev->name);
- fallthrough;
+ default:
+ goto errnotsupp;
+ }
+
+ case NL80211_IFTYPE_P2P_GO:
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -EFAULT;
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
+ type, params);
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- return 0;
+ return mwifiex_change_vif_to_p2p(dev, curr_iftype,
+ type, params);
+ case NL80211_IFTYPE_AP:
+ return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
+ params);
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: unknown iftype: %d\n",
- dev->name, dev->ieee80211_ptr->iftype);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
return 0;
+
+errnotsupp:
+ mwifiex_dbg(priv->adapter, ERROR,
+ "unsupported interface type transition: %d to %d\n",
+ curr_iftype, type);
+ return -EOPNOTSUPP;
}
static void
@@ -2997,7 +3054,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
priv->bss_started = 0;
@@ -3108,23 +3165,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mwifiex_dev_debugfs_init(priv);
#endif
- switch (type) {
- case NL80211_IFTYPE_UNSPECIFIED:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf++;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf++;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- adapter->curr_iface_comb.p2p_intf++;
- break;
- default:
- /* This should be dead code; checked above */
- mwifiex_dbg(adapter, ERROR, "type not supported\n");
- return ERR_PTR(-EINVAL);
- }
+ update_vif_type_counter(adapter, type, +1);
return &priv->wdev;
@@ -3190,24 +3231,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
/* Clear the priv in adapter */
priv->netdev = NULL;
- switch (priv->bss_mode) {
- case NL80211_IFTYPE_UNSPECIFIED:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf--;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf--;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- adapter->curr_iface_comb.p2p_intf--;
- break;
- default:
- mwifiex_dbg(adapter, ERROR,
- "del_virtual_intf: type not supported\n");
- break;
- }
+ update_vif_type_counter(adapter, priv->bss_mode, -1);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 241305377e20..a9b5eb992220 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -62,8 +62,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
- pad = ((void *)skb->data - (sizeof(*local_tx_pd) + hroom)-
- NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
+ pad = ((uintptr_t)skb->data - (sizeof(*local_tx_pd) + hroom)) &
+ (MWIFIEX_DMA_ALIGN_SZ - 1);
skb_push(skb, sizeof(*local_tx_pd) + pad);
local_tx_pd = (struct txpd *) skb->data;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 9bbdb8dfce62..245ff644f81e 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -475,8 +475,8 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
- pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) &
- (MWIFIEX_DMA_ALIGN_SZ - 1);
+ pad = ((uintptr_t)skb->data - (sizeof(*txpd) + hroom)) &
+ (MWIFIEX_DMA_ALIGN_SZ - 1);
skb_push(skb, sizeof(*txpd) + pad);
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 96973ec7bd9a..dc4bfe7be378 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -129,8 +129,7 @@ static void cfg_scan_result(enum scan_event scan_event,
info->frame_len,
(s32)info->rssi * 100,
GFP_KERNEL);
- if (!bss)
- cfg80211_put_bss(wiphy, bss);
+ cfg80211_put_bss(wiphy, bss);
} else if (scan_event == SCAN_EVENT_DONE) {
mutex_lock(&priv->scan_req_lock);
@@ -729,6 +728,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
{
struct wilc_vif *vif = netdev_priv(dev);
struct wilc_priv *priv = &vif->priv;
+ struct wilc *wilc = vif->wilc;
u32 i = 0;
u32 associatedsta = ~0;
u32 inactive_time = 0;
@@ -755,6 +755,9 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
} else if (vif->iftype == WILC_STATION_MODE) {
struct rf_info stats;
+ if (!wilc->initialized)
+ return -EBUSY;
+
wilc_get_statistics(vif, &stats);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL) |
@@ -1581,6 +1584,7 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
}
netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
+ wilc_set_wowlan_trigger(vif, enabled);
srcu_read_unlock(&wl->srcu, srcu_idx);
}
@@ -1683,6 +1687,7 @@ static void wlan_init_locks(struct wilc *wl)
mutex_init(&wl->rxq_cs);
mutex_init(&wl->cfg_cmd_lock);
mutex_init(&wl->vif_mutex);
+ mutex_init(&wl->deinit_lock);
spin_lock_init(&wl->txq_spinlock);
mutex_init(&wl->txq_add_to_head_cs);
@@ -1701,6 +1706,7 @@ void wlan_deinit_locks(struct wilc *wilc)
mutex_destroy(&wilc->cfg_cmd_lock);
mutex_destroy(&wilc->txq_add_to_head_cs);
mutex_destroy(&wilc->vif_mutex);
+ mutex_destroy(&wilc->deinit_lock);
cleanup_srcu_struct(&wilc->srcu);
}
@@ -1724,7 +1730,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
*wilc = wl;
wl->io_type = io_type;
wl->hif_func = ops;
- wl->chip_ps_state = WILC_CHIP_WAKEDUP;
for (i = 0; i < NQUEUES; i++)
INIT_LIST_HEAD(&wl->txq[i].txq_head.list);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index a133736a7821..e69b9c7f3d31 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -23,6 +23,10 @@ struct wilc_set_multicast {
u8 *mc_list;
};
+struct host_if_wowlan_trigger {
+ u8 wowlan_trigger;
+};
+
struct wilc_del_all_sta {
u8 assoc_sta;
u8 mac[WILC_MAX_NUM_STA][ETH_ALEN];
@@ -34,6 +38,7 @@ union wilc_message_body {
struct wilc_set_multicast mc_info;
struct wilc_remain_ch remain_on_ch;
char *data;
+ struct host_if_wowlan_trigger wow_trigger;
};
struct host_if_msg {
@@ -962,6 +967,25 @@ error:
kfree(msg);
}
+void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled)
+{
+ int ret;
+ struct wid wid;
+ u8 wowlan_trigger = 0;
+
+ if (enabled)
+ wowlan_trigger = 1;
+
+ wid.id = WID_WOWLAN_TRIGGER;
+ wid.type = WID_CHAR;
+ wid.val = &wowlan_trigger;
+ wid.size = sizeof(char);
+
+ ret = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
+ if (ret)
+ pr_err("Failed to send wowlan trigger config packet\n");
+}
+
static void handle_scan_timer(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
@@ -1494,7 +1518,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
{
struct host_if_drv *hif_drv;
struct wilc_vif *vif = netdev_priv(dev);
- struct wilc *wilc = vif->wilc;
hif_drv = kzalloc(sizeof(*hif_drv), GFP_KERNEL);
if (!hif_drv)
@@ -1504,9 +1527,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
vif->hif_drv = hif_drv;
- if (wilc->clients_count == 0)
- mutex_init(&wilc->deinit_lock);
-
timer_setup(&vif->periodic_rssi, get_periodic_rssi, 0);
mod_timer(&vif->periodic_rssi, jiffies + msecs_to_jiffies(5000));
@@ -1518,8 +1538,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
hif_drv->p2p_timeout = 0;
- wilc->clients_count++;
-
return 0;
}
@@ -1550,7 +1568,6 @@ int wilc_deinit(struct wilc_vif *vif)
kfree(hif_drv);
vif->hif_drv = NULL;
- vif->wilc->clients_count--;
mutex_unlock(&vif->wilc->deinit_lock);
return result;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 58811911213b..cccd54ed0518 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -207,6 +207,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats);
int wilc_get_vif_idx(struct wilc_vif *vif);
int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power);
int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power);
+void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled);
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 86209b391a3d..79f73a72da57 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -264,9 +264,7 @@ struct wilc {
struct device *dev;
bool suspend_event;
- int clients_count;
struct workqueue_struct *hif_workqueue;
- enum chip_ps_states chip_ps_state;
struct wilc_cfg cfg;
void *bus_data;
struct net_device *monitor_dev;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 42e03a701ae1..26ebf6664342 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -978,6 +978,7 @@ static const struct wilc_hif_func wilc_hif_sdio = {
.hif_sync_ext = wilc_sdio_sync_ext,
.enable_interrupt = wilc_sdio_enable_interrupt,
.disable_interrupt = wilc_sdio_disable_interrupt,
+ .hif_reset = wilc_sdio_reset,
};
static int wilc_sdio_resume(struct device *dev)
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index dd481dc0b5ce..640850f989dd 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -47,6 +47,8 @@ struct wilc_spi {
static const struct wilc_hif_func wilc_hif_spi;
+static int wilc_spi_reset(struct wilc *wilc);
+
/********************************************
*
* Spi protocol Function
@@ -144,6 +146,12 @@ struct wilc_spi_rsp_data {
u8 data[];
} __packed;
+struct wilc_spi_special_cmd_rsp {
+ u8 skip_byte;
+ u8 rsp_cmd_type;
+ u8 status;
+} __packed;
+
static int wilc_bus_probe(struct spi_device *spi)
{
int ret;
@@ -466,7 +474,7 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
}
r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
- if (r->rsp_cmd_type != cmd) {
+ if (r->rsp_cmd_type != cmd && !clockless) {
if (!spi_priv->probing_crc)
dev_err(&spi->dev,
"Failed cmd, cmd (%02x), resp (%02x)\n",
@@ -474,7 +482,7 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
return -EINVAL;
}
- if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) {
dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
r->status);
return -EINVAL;
@@ -563,14 +571,18 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data,
}
r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
- if (r->rsp_cmd_type != cmd) {
+ /*
+ * Clockless registers operations might return unexptected responses,
+ * even if successful.
+ */
+ if (r->rsp_cmd_type != cmd && !clockless) {
dev_err(&spi->dev,
"Failed cmd response, cmd (%02x), resp (%02x)\n",
cmd, r->rsp_cmd_type);
return -EINVAL;
}
- if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) {
dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
r->status);
return -EINVAL;
@@ -709,6 +721,61 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
return 0;
}
+static int wilc_spi_special_cmd(struct wilc *wilc, u8 cmd)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u8 wb[32], rb[32];
+ int cmd_len, resp_len = 0;
+ struct wilc_spi_cmd *c;
+ struct wilc_spi_special_cmd_rsp *r;
+
+ if (cmd != CMD_TERMINATE && cmd != CMD_REPEAT && cmd != CMD_RESET)
+ return -EINVAL;
+
+ memset(wb, 0x0, sizeof(wb));
+ memset(rb, 0x0, sizeof(rb));
+ c = (struct wilc_spi_cmd *)wb;
+ c->cmd_type = cmd;
+
+ if (cmd == CMD_RESET)
+ memset(c->u.simple_cmd.addr, 0xFF, 3);
+
+ cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc);
+ resp_len = sizeof(*r);
+
+ if (spi_priv->crc7_enabled) {
+ c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
+ cmd_len += 1;
+ }
+ if (cmd_len + resp_len > ARRAY_SIZE(wb)) {
+ dev_err(&spi->dev, "spi buffer size too small (%d) (%d) (%zu)\n",
+ cmd_len, resp_len, ARRAY_SIZE(wb));
+ return -EINVAL;
+ }
+
+ if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
+ dev_err(&spi->dev, "Failed cmd write, bus error...\n");
+ return -EINVAL;
+ }
+
+ r = (struct wilc_spi_special_cmd_rsp *)&rb[cmd_len];
+ if (r->rsp_cmd_type != cmd) {
+ if (!spi_priv->probing_crc)
+ dev_err(&spi->dev,
+ "Failed cmd response, cmd (%02x), resp (%02x)\n",
+ cmd, r->rsp_cmd_type);
+ return -EINVAL;
+ }
+
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
+ r->status);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
{
struct spi_device *spi = to_spi_device(wilc->dev);
@@ -895,6 +962,19 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
*
********************************************/
+static int wilc_spi_reset(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ int result;
+
+ result = wilc_spi_special_cmd(wilc, CMD_RESET);
+ if (result && !spi_priv->probing_crc)
+ dev_err(&spi->dev, "Failed cmd reset\n");
+
+ return result;
+}
+
static int wilc_spi_deinit(struct wilc *wilc)
{
/*
@@ -1087,7 +1167,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
- ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
+ ret = wilc_spi_write_reg(wilc, WILC_INTR2_ENABLE, reg);
if (ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
WILC_INTR2_ENABLE);
@@ -1112,4 +1192,5 @@ static const struct wilc_hif_func wilc_hif_spi = {
.hif_block_tx_ext = wilc_spi_write,
.hif_block_rx_ext = wilc_spi_read,
.hif_sync_ext = wilc_spi_sync_ext,
+ .hif_reset = wilc_spi_reset,
};
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 200a103a0a85..ea81ef120fd1 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -10,6 +10,8 @@
#include "cfg80211.h"
#include "wlan_cfg.h"
+#define WAKE_UP_TRIAL_RETRY 10000
+
static inline bool is_wilc1000(u32 id)
{
return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
@@ -425,6 +427,11 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev,
return 0;
}
+ if (!wilc->initialized) {
+ tx_complete_fn(tx_data, 0);
+ return 0;
+ }
+
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe) {
@@ -474,6 +481,10 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
return 0;
}
+ if (!wilc->initialized) {
+ tx_complete_fn(priv, 0);
+ return 0;
+ }
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe) {
@@ -611,60 +622,67 @@ EXPORT_SYMBOL_GPL(chip_allow_sleep);
void chip_wakeup(struct wilc *wilc)
{
- u32 reg, clk_status_reg;
- const struct wilc_hif_func *h = wilc->hif_func;
-
- if (wilc->io_type == WILC_HIF_SPI) {
- do {
- h->hif_read_reg(wilc, WILC_SPI_WAKEUP_REG, &reg);
- h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG,
- reg | WILC_SPI_WAKEUP_BIT);
- h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG,
- reg & ~WILC_SPI_WAKEUP_BIT);
-
- do {
- usleep_range(2000, 2500);
- wilc_get_chipid(wilc, true);
- } while (wilc_get_chipid(wilc, true) == 0);
- } while (wilc_get_chipid(wilc, true) == 0);
- } else if (wilc->io_type == WILC_HIF_SDIO) {
- h->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG,
- WILC_SDIO_HOST_TO_FW_BIT);
- usleep_range(200, 400);
- h->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, &reg);
- do {
- h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
- reg | WILC_SDIO_WAKEUP_BIT);
- h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG,
- &clk_status_reg);
-
- while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) {
- usleep_range(2000, 2500);
+ u32 ret = 0;
+ u32 clk_status_val = 0, trials = 0;
+ u32 wakeup_reg, wakeup_bit;
+ u32 clk_status_reg, clk_status_bit;
+ u32 to_host_from_fw_reg, to_host_from_fw_bit;
+ u32 from_host_to_fw_reg, from_host_to_fw_bit;
+ const struct wilc_hif_func *hif_func = wilc->hif_func;
- h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG,
- &clk_status_reg);
- }
- if (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) {
- h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
- reg & ~WILC_SDIO_WAKEUP_BIT);
- }
- } while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT));
+ if (wilc->io_type == WILC_HIF_SDIO) {
+ wakeup_reg = WILC_SDIO_WAKEUP_REG;
+ wakeup_bit = WILC_SDIO_WAKEUP_BIT;
+ clk_status_reg = WILC_SDIO_CLK_STATUS_REG;
+ clk_status_bit = WILC_SDIO_CLK_STATUS_BIT;
+ from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG;
+ from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT;
+ to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG;
+ to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT;
+ } else {
+ wakeup_reg = WILC_SPI_WAKEUP_REG;
+ wakeup_bit = WILC_SPI_WAKEUP_BIT;
+ clk_status_reg = WILC_SPI_CLK_STATUS_REG;
+ clk_status_bit = WILC_SPI_CLK_STATUS_BIT;
+ from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG;
+ from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT;
+ to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG;
+ to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT;
}
- if (wilc->chip_ps_state == WILC_CHIP_SLEEPING_MANUAL) {
- if (wilc_get_chipid(wilc, false) < WILC_1000_BASE_ID_2B) {
- u32 val32;
+ /* indicate host wakeup */
+ ret = hif_func->hif_write_reg(wilc, from_host_to_fw_reg,
+ from_host_to_fw_bit);
+ if (ret)
+ return;
- h->hif_read_reg(wilc, WILC_REG_4_TO_1_RX, &val32);
- val32 |= BIT(6);
- h->hif_write_reg(wilc, WILC_REG_4_TO_1_RX, val32);
+ /* Set wake-up bit */
+ ret = hif_func->hif_write_reg(wilc, wakeup_reg,
+ wakeup_bit);
+ if (ret)
+ return;
- h->hif_read_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, &val32);
- val32 |= BIT(6);
- h->hif_write_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, val32);
+ while (trials < WAKE_UP_TRIAL_RETRY) {
+ ret = hif_func->hif_read_reg(wilc, clk_status_reg,
+ &clk_status_val);
+ if (ret) {
+ pr_err("Bus error %d %x\n", ret, clk_status_val);
+ return;
}
+ if (clk_status_val & clk_status_bit)
+ break;
+
+ trials++;
}
- wilc->chip_ps_state = WILC_CHIP_WAKEDUP;
+ if (trials >= WAKE_UP_TRIAL_RETRY) {
+ pr_err("Failed to wake-up the chip\n");
+ return;
+ }
+ /* Sometimes spi fail to read clock regs after reading
+ * writing clockless registers
+ */
+ if (wilc->io_type == WILC_HIF_SPI)
+ wilc->hif_func->hif_reset(wilc);
}
EXPORT_SYMBOL_GPL(chip_wakeup);
@@ -1071,6 +1089,7 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 addr, size, size2, blksz;
u8 *dma_buffer;
int ret = 0;
+ u32 reg = 0;
blksz = BIT(12);
@@ -1079,10 +1098,22 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
return -EIO;
offset = 0;
+ pr_debug("%s: Downloading firmware size = %d\n", __func__, buffer_size);
+
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
+
+ wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
+ reg &= ~BIT(10);
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg);
+ wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
+ if (reg & BIT(10))
+ pr_err("%s: Failed to reset\n", __func__);
+
+ release_bus(wilc, WILC_BUS_RELEASE_ONLY);
do {
addr = get_unaligned_le32(&buffer[offset]);
size = get_unaligned_le32(&buffer[offset + 4]);
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
offset += 8;
while (((int)size) && (offset < buffer_size)) {
if (size <= blksz)
@@ -1100,10 +1131,13 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
offset += size2;
size -= size2;
}
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- if (ret)
+ if (ret) {
+ pr_err("%s Bus error\n", __func__);
goto fail;
+ }
+ pr_debug("%s Offset = %d\n", __func__, offset);
} while (offset < buffer_size);
fail:
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index 771c25fa849b..13fde636aa0e 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -97,6 +97,8 @@
#define WILC_SPI_WAKEUP_REG 0x1
#define WILC_SPI_WAKEUP_BIT BIT(1)
+#define WILC_SPI_CLK_STATUS_REG 0x0f
+#define WILC_SPI_CLK_STATUS_BIT BIT(2)
#define WILC_SPI_HOST_TO_FW_REG 0x0b
#define WILC_SPI_HOST_TO_FW_BIT BIT(0)
@@ -300,7 +302,7 @@
#define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM)
#define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM)
/* time for expiring the completion of cfg packets */
-#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(2000)
+#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(3000)
#define IS_MANAGMEMENT 0x100
#define IS_MANAGMEMENT_CALLBACK 0x080
@@ -371,6 +373,7 @@ struct wilc_hif_func {
int (*hif_sync_ext)(struct wilc *wilc, int nint);
int (*enable_interrupt)(struct wilc *nic);
void (*disable_interrupt)(struct wilc *nic);
+ int (*hif_reset)(struct wilc *wilc);
};
#define WILC_MAX_CFG_FRAME_SIZE 1468
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
index fe2a7ed8e5cd..dba301378b7f 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
@@ -22,6 +22,7 @@ static const struct wilc_cfg_byte g_cfg_byte[] = {
{WID_STATUS, 0},
{WID_RSSI, 0},
{WID_LINKSPEED, 0},
+ {WID_WOWLAN_TRIGGER, 0},
{WID_NIL, 0}
};
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_if.h b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
index f85fd575136d..6eb7eb4ac294 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_if.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
@@ -48,12 +48,6 @@ enum {
WILC_FW_MAX_PSPOLL_PS = 4
};
-enum chip_ps_states {
- WILC_CHIP_WAKEDUP = 0,
- WILC_CHIP_SLEEPING_AUTO = 1,
- WILC_CHIP_SLEEPING_MANUAL = 2
-};
-
enum bus_acquire {
WILC_BUS_ACQUIRE_ONLY = 0,
WILC_BUS_ACQUIRE_AND_WAKEUP = 1,
@@ -662,6 +656,7 @@ enum {
WID_LOG_TERMINAL_SWITCH = 0x00CD,
WID_TX_POWER = 0x00CE,
+ WID_WOWLAN_TRIGGER = 0X00CF,
/* EMAC Short WID list */
/* RTS Threshold */
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index b5c67f656cfd..a3ffd1b0c9bc 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -1101,7 +1101,6 @@ static const struct usb_device_id rt2800usb_device_table[] = {
#ifdef CONFIG_RT2800USB_RT53XX
/* Arcadyan */
{ USB_DEVICE(0x043e, 0x7a12) },
- { USB_DEVICE(0x043e, 0x7a32) },
/* ASUS */
{ USB_DEVICE(0x0b05, 0x17e8) },
/* Azurewave */
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 774341b0005a..a42e2081b75f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4460,13 +4460,17 @@ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
{
+ struct ieee80211_hw *hw = priv->hw;
u32 val32;
u8 rate_idx = 0;
rate_cfg &= RESPONSE_RATE_BITMAP_ALL;
val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
- val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+ if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
+ val32 &= RESPONSE_RATE_RRSR_INIT_5G;
+ else
+ val32 &= RESPONSE_RATE_RRSR_INIT_2G;
val32 |= rate_cfg;
rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index a2a31f374a82..438b65ba9640 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -516,6 +516,8 @@
#define REG_RESPONSE_RATE_SET 0x0440
#define RESPONSE_RATE_BITMAP_ALL 0xfffff
#define RESPONSE_RATE_RRSR_CCK_ONLY_1M 0xffff1
+#define RESPONSE_RATE_RRSR_INIT_2G 0x15f
+#define RESPONSE_RATE_RRSR_INIT_5G 0x150
#define RSR_1M BIT(0)
#define RSR_2M BIT(1)
#define RSR_5_5M BIT(2)
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index dfd52cff5d02..682b23502e6e 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -12,6 +12,7 @@
#include "phy.h"
#include "reg.h"
#include "ps.h"
+#include "regd.h"
#ifdef CONFIG_RTW88_DEBUGFS
@@ -587,7 +588,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
struct rtw_power_params pwr_param = {0};
u8 bw = hal->current_band_width;
u8 ch = hal->current_channel;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd));
seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n",
@@ -828,6 +829,38 @@ static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v)
return 0;
}
+static ssize_t rtw_debugfs_set_edcca_enable(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ bool input;
+ int err;
+
+ err = kstrtobool_from_user(buffer, count, &input);
+ if (err)
+ return err;
+
+ rtw_edcca_enabled = input;
+ rtw_phy_adaptivity_set_mode(rtwdev);
+
+ return count;
+}
+
+static int rtw_debugfs_get_edcca_enable(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ seq_printf(m, "EDCCA %s: EDCCA mode %d\n",
+ rtw_edcca_enabled ? "enabled" : "disabled",
+ dm_info->edcca_mode);
+ return 0;
+}
+
static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
@@ -853,6 +886,7 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
+ set_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags);
rtw_write8(rtwdev, REG_HRCV_MSG, 1);
mutex_unlock(&rtwdev->mutex);
@@ -864,7 +898,9 @@ static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v)
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- seq_printf(m, "%d\n", test_bit(RTW_FLAG_RESTARTING, rtwdev->flags));
+ seq_printf(m, "%d\n",
+ test_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags) ||
+ test_bit(RTW_FLAG_RESTARTING, rtwdev->flags));
return 0;
}
@@ -1048,6 +1084,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_coex_info = {
.cb_read = rtw_debugfs_get_coex_info,
};
+static struct rtw_debugfs_priv rtw_debug_priv_edcca_enable = {
+ .cb_write = rtw_debugfs_set_edcca_enable,
+ .cb_read = rtw_debugfs_get_edcca_enable,
+};
+
static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = {
.cb_write = rtw_debugfs_set_fw_crash,
.cb_read = rtw_debugfs_get_fw_crash,
@@ -1131,6 +1172,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
}
rtw_debugfs_add_r(rf_dump);
rtw_debugfs_add_r(tx_pwr_tbl);
+ rtw_debugfs_add_rw(edcca_enable);
rtw_debugfs_add_rw(fw_crash);
rtw_debugfs_add_rw(dm_cap);
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 0dd3f9a88c8d..47c57f395f52 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -21,6 +21,7 @@ enum rtw_debug_mask {
RTW_DBG_WOW = 0x00001000,
RTW_DBG_CFO = 0x00002000,
RTW_DBG_PATH_DIV = 0x00004000,
+ RTW_DBG_ADAPTIVITY = 0x00008000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index e6399519584b..0c4f2a2f2d7f 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -183,6 +183,28 @@ static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
dm_info->scan_density);
}
+static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload,
+ u8 length)
+{
+ struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
+ struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload;
+
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
+ "Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n",
+ result->density, result->igi, result->l2h_th_init, result->l2h,
+ result->h2l, result->option);
+
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n",
+ rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask),
+ rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask));
+
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n",
+ rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ?
+ "Set" : "Unset");
+}
+
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_c2h_cmd *c2h;
@@ -252,6 +274,10 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
rtw_fw_scan_result(rtwdev, c2h->payload, len);
dev_kfree_skb_any(skb);
break;
+ case C2H_ADAPTIVITY:
+ rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
+ dev_kfree_skb_any(skb);
+ break;
default:
/* pass offset for further operation */
*((u32 *)skb->cb) = pkt_offset;
@@ -1556,12 +1582,10 @@ static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
u32 i;
u16 idx = 0;
u16 ctl;
- u8 rcr;
- rcr = rtw_read8(rtwdev, REG_RCR + 2);
ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
/* disable rx clock gate */
- rtw_write8(rtwdev, REG_RCR, rcr | BIT(3));
+ rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
do {
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
@@ -1580,7 +1604,8 @@ static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
out:
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
- rtw_write8(rtwdev, REG_RCR + 2, rcr);
+ /* restore rx clock gate */
+ rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
}
static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
@@ -1722,6 +1747,27 @@ void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
+void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ if (!rtw_edcca_enabled) {
+ dm_info->edcca_mode = RTW_EDCCA_NORMAL;
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
+ "EDCCA disabled by debugfs\n");
+ }
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
+ SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
+ SET_ADAPTIVITY_OPTION(h2c_pkt, 2);
+ SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
+ SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
+ SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 64dcde35a021..09c7afb99e63 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -41,6 +41,7 @@ enum rtw_c2h_cmd_id {
C2H_WLAN_INFO = 0x27,
C2H_WLAN_RFON = 0x32,
C2H_BCN_FILTER_NOTIFY = 0x36,
+ C2H_ADAPTIVITY = 0x37,
C2H_SCAN_RESULT = 0x38,
C2H_HW_FEATURE_DUMP = 0xfd,
C2H_HALMAC = 0xff,
@@ -56,6 +57,15 @@ struct rtw_c2h_cmd {
u8 payload[];
} __packed;
+struct rtw_c2h_adaptivity {
+ u8 density;
+ u8 igi;
+ u8 l2h_th_init;
+ u8 l2h;
+ u8 h2l;
+ u8 option;
+} __packed;
+
enum rtw_rsvd_packet_type {
RSVD_BEACON,
RSVD_DUMMY,
@@ -90,6 +100,7 @@ enum rtw_fw_feature {
FW_FEATURE_PG = BIT(3),
FW_FEATURE_BCN_FILTER = BIT(5),
FW_FEATURE_NOTIFY_SCAN = BIT(6),
+ FW_FEATURE_ADAPTIVITY = BIT(7),
FW_FEATURE_MAX = BIT(31),
};
@@ -375,6 +386,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57
#define H2C_CMD_WL_PHY_INFO 0x58
#define H2C_CMD_SCAN 0x59
+#define H2C_CMD_ADAPTIVITY 0x5A
#define H2C_CMD_COEX_TDMA_TYPE 0x60
#define H2C_CMD_QUERY_BT_INFO 0x61
@@ -428,6 +440,17 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_SCAN_START(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_ADAPTIVITY_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(11, 8))
+#define SET_ADAPTIVITY_OPTION(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 12))
+#define SET_ADAPTIVITY_IGI(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_ADAPTIVITY_L2H(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_ADAPTIVITY_DENSITY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+
#define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
#define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \
@@ -662,4 +685,5 @@ void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev);
int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
u32 *buffer);
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
+void rtw_fw_adaptivity(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 6bb55e663fc3..a0d4d6e31fb4 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -23,6 +23,14 @@ EXPORT_SYMBOL(rtw_disable_lps_deep_mode);
bool rtw_bf_support = true;
unsigned int rtw_debug_mask;
EXPORT_SYMBOL(rtw_debug_mask);
+/* EDCCA is enabled during normal behavior. For debugging purpose in
+ * a noisy environment, it can be disabled via edcca debugfs. Because
+ * all rtw88 devices will probably be affected if environment is noisy,
+ * rtw_edcca_enabled is just declared by driver instead of by device.
+ * So, turning it off will take effect for all rtw88 devices before
+ * there is a tough reason to maintain rtw_edcca_enabled by device.
+ */
+bool rtw_edcca_enabled = true;
module_param_named(disable_lps_deep, rtw_disable_lps_deep_mode, bool, 0644);
module_param_named(support_bf, rtw_bf_support, bool, 0644);
@@ -556,6 +564,7 @@ static void __fw_recovery_work(struct rtw_dev *rtwdev)
int ret = 0;
set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
+ clear_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags);
ret = rtw_fwcd_prep(rtwdev);
if (ret)
@@ -1964,7 +1973,11 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
- rtw_regd_init(rtwdev, rtw_regd_notifier);
+ ret = rtw_regd_init(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to init regd\n");
+ return ret;
+ }
ret = ieee80211_register_hw(hw);
if (ret) {
@@ -1972,8 +1985,11 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
return ret;
}
- if (regulatory_hint(hw->wiphy, rtwdev->regd.alpha2))
- rtw_err(rtwdev, "regulatory_hint fail\n");
+ ret = rtw_regd_hint(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to hint regd\n");
+ return ret;
+ }
rtw_debugfs_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 56812127a053..bbdd535b64e7 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -41,6 +41,7 @@
extern bool rtw_bf_support;
extern bool rtw_disable_lps_deep_mode;
extern unsigned int rtw_debug_mask;
+extern bool rtw_edcca_enabled;
extern const struct ieee80211_ops rtw_ops;
#define RTW_MAX_CHANNEL_NUM_2G 14
@@ -362,6 +363,7 @@ enum rtw_flags {
RTW_FLAG_BUSY_TRAFFIC,
RTW_FLAG_WOWLAN,
RTW_FLAG_RESTARTING,
+ RTW_FLAG_RESTART_TRIGGERING,
NUM_OF_RTW_FLAGS,
};
@@ -545,6 +547,11 @@ struct rtw_rf_sipi_addr {
u32 lssi_read_pi;
};
+struct rtw_hw_reg_offset {
+ struct rtw_hw_reg hw_reg;
+ u8 offset;
+};
+
struct rtw_backup_info {
u8 len;
u32 reg;
@@ -800,8 +807,22 @@ struct rtw_vif {
struct rtw_regulatory {
char alpha2[2];
- u8 chplan;
- u8 txpwr_regd;
+ u8 txpwr_regd_2g;
+ u8 txpwr_regd_5g;
+};
+
+enum rtw_regd_state {
+ RTW_REGD_STATE_WORLDWIDE,
+ RTW_REGD_STATE_PROGRAMMED,
+ RTW_REGD_STATE_SETTING,
+
+ RTW_REGD_STATE_NR,
+};
+
+struct rtw_regd {
+ enum rtw_regd_state state;
+ const struct rtw_regulatory *regulatory;
+ enum nl80211_dfs_regions dfs_region;
};
struct rtw_chip_ops {
@@ -839,6 +860,8 @@ struct rtw_chip_ops {
struct ieee80211_bss_conf *conf);
void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
u8 fixrate_en, u8 *new_rate);
+ void (*adaptivity_init)(struct rtw_dev *rtwdev);
+ void (*adaptivity)(struct rtw_dev *rtwdev);
void (*cfo_init)(struct rtw_dev *rtwdev);
void (*cfo_track)(struct rtw_dev *rtwdev);
void (*config_tx_path)(struct rtw_dev *rtwdev, u8 tx_path,
@@ -1194,6 +1217,10 @@ struct rtw_chip_info {
u8 bfer_su_max_num;
u8 bfer_mu_max_num;
+ struct rtw_hw_reg_offset *edcca_th;
+ s8 l2h_th_ini_cs;
+ s8 l2h_th_ini_ad;
+
const char *wow_fw_name;
const struct wiphy_wowlan_support *wowlan_stub;
const u8 max_sched_scan_ssids;
@@ -1542,6 +1569,20 @@ struct rtw_gapk_info {
u8 channel;
};
+#define EDCCA_TH_L2H_IDX 0
+#define EDCCA_TH_H2L_IDX 1
+#define EDCCA_TH_L2H_LB 48
+#define EDCCA_ADC_BACKOFF 12
+#define EDCCA_IGI_BASE 50
+#define EDCCA_IGI_L2H_DIFF 8
+#define EDCCA_L2H_H2L_DIFF 7
+#define EDCCA_L2H_H2L_DIFF_NORMAL 8
+
+enum rtw_edcca_mode {
+ RTW_EDCCA_NORMAL = 0,
+ RTW_EDCCA_ADAPTIVITY = 1,
+};
+
struct rtw_cfo_track {
bool is_adjust;
u8 crystal_cap;
@@ -1633,6 +1674,8 @@ struct rtw_dm_info {
struct rtw_gapk_info gapk;
bool is_bt_iqk_timeout;
+ s8 l2h_th_ini;
+ enum rtw_edcca_mode edcca_mode;
u8 scan_density;
};
@@ -1833,7 +1876,7 @@ struct rtw_dev {
struct rtw_efuse efuse;
struct rtw_sec_desc sec;
struct rtw_traffic_stats stats;
- struct rtw_regulatory regd;
+ struct rtw_regd regd;
struct rtw_bf_info bf_info;
struct rtw_dm_info dm_info;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 569dd3cfde35..bfddfcbe63f5 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -9,6 +9,7 @@
#include "fw.h"
#include "phy.h"
#include "debug.h"
+#include "regd.h"
struct phy_cfg_pair {
u32 addr;
@@ -119,6 +120,63 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
}
+void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l)
+{
+ struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
+
+ rtw_write32_mask(rtwdev,
+ edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask,
+ l2h + edcca_th[EDCCA_TH_L2H_IDX].offset);
+ rtw_write32_mask(rtwdev,
+ edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask,
+ h2l + edcca_th[EDCCA_TH_H2L_IDX].offset);
+}
+EXPORT_SYMBOL(rtw_phy_set_edcca_th);
+
+void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ /* turn off in debugfs for debug usage */
+ if (!rtw_edcca_enabled) {
+ dm_info->edcca_mode = RTW_EDCCA_NORMAL;
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "EDCCA disabled, cannot be set\n");
+ return;
+ }
+
+ switch (rtwdev->regd.dfs_region) {
+ case NL80211_DFS_ETSI:
+ dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
+ dm_info->l2h_th_ini = chip->l2h_th_ini_ad;
+ break;
+ case NL80211_DFS_JP:
+ dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
+ dm_info->l2h_th_ini = chip->l2h_th_ini_cs;
+ break;
+ default:
+ dm_info->edcca_mode = RTW_EDCCA_NORMAL;
+ break;
+ }
+}
+
+static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ rtw_phy_adaptivity_set_mode(rtwdev);
+ if (chip->ops->adaptivity_init)
+ chip->ops->adaptivity_init(rtwdev);
+}
+
+static void rtw_phy_adaptivity(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->chip->ops->adaptivity)
+ rtwdev->chip->ops->adaptivity(rtwdev);
+}
+
static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -159,6 +217,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev)
rtw_phy_cck_pd_init(rtwdev);
dm_info->iqk.done = false;
+ rtw_phy_adaptivity_init(rtwdev);
rtw_phy_cfo_init(rtwdev);
rtw_phy_tx_path_div_init(rtwdev);
}
@@ -711,6 +770,11 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
rtw_phy_cfo_track(rtwdev);
rtw_phy_dpk_track(rtwdev);
rtw_phy_pwr_track(rtwdev);
+
+ if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_ADAPTIVITY))
+ rtw_fw_adaptivity(rtwdev);
+ else
+ rtw_phy_adaptivity(rtwdev);
}
#define FRAC_BITS 3
@@ -1564,17 +1628,70 @@ static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev)
rtw_xref_txpwr_lmt_by_bw(rtwdev, regd);
}
+static void
+__cfg_txpwr_lmt_by_alt(struct rtw_hal *hal, u8 regd, u8 regd_alt, u8 bw, u8 rs)
+{
+ u8 ch;
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch] =
+ hal->tx_pwr_limit_2g[regd_alt][bw][rs][ch];
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch] =
+ hal->tx_pwr_limit_5g[regd_alt][bw][rs][ch];
+}
+
+static void
+rtw_cfg_txpwr_lmt_by_alt(struct rtw_dev *rtwdev, u8 regd, u8 regd_alt)
+{
+ u8 bw, rs;
+
+ for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ __cfg_txpwr_lmt_by_alt(&rtwdev->hal, regd, regd_alt,
+ bw, rs);
+}
+
void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
const struct rtw_table *tbl)
{
const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data;
const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size;
+ u32 regd_cfg_flag = 0;
+ u8 regd_alt;
+ u8 i;
for (; p < end; p++) {
+ regd_cfg_flag |= BIT(p->regd);
rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
p->bw, p->rs, p->ch, p->txpwr_lmt);
}
+ for (i = 0; i < RTW_REGD_MAX; i++) {
+ if (i == RTW_REGD_WW)
+ continue;
+
+ if (regd_cfg_flag & BIT(i))
+ continue;
+
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "txpwr regd %d does not be configured\n", i);
+
+ if (rtw_regd_has_alt(i, &regd_alt) &&
+ regd_cfg_flag & BIT(regd_alt)) {
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "cfg txpwr regd %d by regd %d as alternative\n",
+ i, regd_alt);
+
+ rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, regd_alt);
+ continue;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_REGD, "cfg txpwr regd %d by WW\n", i);
+ rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, RTW_REGD_WW);
+ }
+
rtw_xref_txpwr_lmt(rtwdev);
}
EXPORT_SYMBOL(rtw_parse_tbl_txpwr_lmt);
@@ -2014,7 +2131,7 @@ static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
u8 ch, u8 path, u8 rs)
{
struct rtw_hal *hal = &rtwdev->hal;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
u8 *rates;
u8 size;
u8 rate;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index 112ed125970a..02d1ec47ffb1 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -59,6 +59,8 @@ bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
struct rtw_swing_table *swing_table);
+void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l);
+void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev);
void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat);
void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index f5ce75095e90..84ba9ec489c3 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -361,10 +361,12 @@
#define REG_AGGR_BREAK_TIME 0x051A
#define REG_SLOT 0x051B
#define REG_TX_PTCL_CTRL 0x0520
+#define BIT_DIS_EDCCA BIT(15)
#define BIT_SIFS_BK_EN BIT(12)
#define REG_TXPAUSE 0x0522
#define BIT_AC_QUEUE GENMASK(7, 0)
#define REG_RD_CTRL 0x0524
+#define BIT_EDCCA_MSK_CNTDOWN_EN BIT(11)
#define BIT_DIS_TXOP_CFE BIT(10)
#define BIT_DIS_LSIG_CFE BIT(9)
#define BIT_DIS_STBC_CFE BIT(8)
@@ -406,6 +408,7 @@
#define BIT_MFBEN BIT(22)
#define BIT_DISCHKPPDLLEN BIT(21)
#define BIT_PKTCTL_DLEN BIT(20)
+#define BIT_DISGCLK BIT(19)
#define BIT_TIM_PARSER_EN BIT(18)
#define BIT_BC_MD_EN BIT(17)
#define BIT_UC_MD_EN BIT(16)
@@ -640,6 +643,9 @@
#define REG_HRCV_MSG 0x1cf
+#define REG_EDCCA_REPORT 0x2d38
+#define BIT_EDCCA_FLAG BIT(24)
+
#define REG_IGN_GNTBT4 0x4160
#define RF_MODE 0x00
diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c
index 69744dd65968..315c2b193e92 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.c
+++ b/drivers/net/wireless/realtek/rtw88/regd.c
@@ -7,288 +7,274 @@
#include "debug.h"
#include "phy.h"
-#define COUNTRY_CHPLAN_ENT(_alpha2, _chplan, _txpwr_regd) \
+#define COUNTRY_REGD_ENT(_alpha2, _regd_2g, _regd_5g) \
{.alpha2 = (_alpha2), \
- .chplan = (_chplan), \
- .txpwr_regd = (_txpwr_regd) \
+ .txpwr_regd_2g = (_regd_2g), \
+ .txpwr_regd_5g = (_regd_5g), \
}
+#define rtw_dbg_regd_dump(_dev, _msg, _args...) \
+do { \
+ struct rtw_dev *__d = (_dev); \
+ const struct rtw_regd *__r = &__d->regd; \
+ rtw_dbg(__d, RTW_DBG_REGD, _msg \
+ "apply alpha2 %c%c, regd {%d, %d}, dfs_region %d\n",\
+ ##_args, \
+ __r->regulatory->alpha2[0], \
+ __r->regulatory->alpha2[1], \
+ __r->regulatory->txpwr_regd_2g, \
+ __r->regulatory->txpwr_regd_5g, \
+ __r->dfs_region); \
+} while (0)
+
/* If country code is not correctly defined in efuse,
* use worldwide country code and txpwr regd.
*/
-static const struct rtw_regulatory rtw_defined_chplan =
- COUNTRY_CHPLAN_ENT("00", RTW_CHPLAN_REALTEK_DEFINE, RTW_REGD_WW);
-
-static const struct rtw_regulatory all_chplan_map[] = {
- COUNTRY_CHPLAN_ENT("AD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AE", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AF", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AO", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AR", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("AW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BB", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BH", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BO", RTW_CHPLAN_WORLD_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BR", RTW_CHPLAN_FCC2_FCC1, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BW", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BZ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CA", RTW_CHPLAN_IC1_IC2, RTW_REGD_IC),
- COUNTRY_CHPLAN_ENT("CC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CI", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CL", RTW_CHPLAN_WORLD_CHILE1, RTW_REGD_CHILE),
- COUNTRY_CHPLAN_ENT("CM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CX", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("CY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("DO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("DZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("EC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("EE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("EG", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("EH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ER", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ES", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ET", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FJ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("FK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("FO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GD", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("GE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GP", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GT", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("GU", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("GW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GY", RTW_CHPLAN_FCC1_NCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HK", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("HM", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("HN", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("HT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ID", RTW_CHPLAN_ETSI1_ETSI12, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IL", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JM", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("JO", RTW_CHPLAN_WORLD_ETSI8, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JP", RTW_CHPLAN_MKK1_MKK1, RTW_REGD_MKK),
- COUNTRY_CHPLAN_ENT("KE", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("KR", RTW_CHPLAN_KCC1_KCC3, RTW_REGD_KCC),
- COUNTRY_CHPLAN_ENT("KW", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("KZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("LI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MA", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ME", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MF", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MH", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ML", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MO", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MP", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MV", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MX", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MY", RTW_CHPLAN_WORLD_ETSI15, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NF", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("NG", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("NL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NP", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("NZ", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("OM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PA", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PE", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PK", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("QA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RU", RTW_CHPLAN_WORLD_ETSI14, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SR", RTW_CHPLAN_FCC2_FCC17, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("ST", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SV", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SX", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TK", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("TM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("TV", RTW_CHPLAN_ETSI1_NULL, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("TZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("UA", RTW_CHPLAN_WORLD_ETSI3, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("UG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("US", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("UY", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("UZ", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("VA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("VC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VE", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VN", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("VU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("WF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("WS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("YE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("YT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+static const struct rtw_regulatory rtw_reg_ww =
+ COUNTRY_REGD_ENT("00", RTW_REGD_WW, RTW_REGD_WW);
+
+static const struct rtw_regulatory rtw_reg_map[] = {
+ COUNTRY_REGD_ENT("AD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AG", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AN", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AR", RTW_REGD_MEXICO, RTW_REGD_MEXICO),
+ COUNTRY_REGD_ENT("AS", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AU", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("AW", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BB", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BO", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BS", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BZ", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CA", RTW_REGD_IC, RTW_REGD_IC),
+ COUNTRY_REGD_ENT("CC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CL", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CO", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CX", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("CY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("DO", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("DZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("EC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("EE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("EG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("EH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ER", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ES", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ET", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FJ", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("FK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("FO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GB", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GD", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("GE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GP", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GT", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("GU", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("GW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("HK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("HM", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("HN", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("HR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("HT", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("HU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ID", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("JE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("JM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("JO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("JP", RTW_REGD_MKK, RTW_REGD_MKK),
+ COUNTRY_REGD_ENT("KE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KN", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("KR", RTW_REGD_KCC, RTW_REGD_KCC),
+ COUNTRY_REGD_ENT("KW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("KZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LB", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("LI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ME", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MF", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("MG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MH", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("MK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ML", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MP", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("MQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MX", RTW_REGD_MEXICO, RTW_REGD_MEXICO),
+ COUNTRY_REGD_ENT("MY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NF", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("NG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NI", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("NL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NP", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NU", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("NZ", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("OM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PA", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PE", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PW", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("QA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SB", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("ST", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SV", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SX", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TK", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("TM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TT", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("TV", RTW_REGD_ETSI, RTW_REGD_WW),
+ COUNTRY_REGD_ENT("TW", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("TZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("UA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("UG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("US", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("UY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("UZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("VA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("VC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VE", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VG", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VI", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("VU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("WF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("WS", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("XK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("YE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("YT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ZA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ZM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ZW", RTW_REGD_ETSI, RTW_REGD_ETSI),
};
-static void rtw_regd_apply_beaconing_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
-{
- enum nl80211_band band;
- struct ieee80211_supported_band *sband;
- const struct ieee80211_reg_rule *reg_rule;
- struct ieee80211_channel *ch;
- unsigned int i;
-
- for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!wiphy->bands[band])
- continue;
-
- sband = wiphy->bands[band];
- for (i = 0; i < sband->n_channels; i++) {
- ch = &sband->channels[i];
-
- reg_rule = freq_reg_info(wiphy,
- MHZ_TO_KHZ(ch->center_freq));
- if (IS_ERR(reg_rule))
- continue;
-
- ch->flags &= ~IEEE80211_CHAN_DISABLED;
-
- if (!(reg_rule->flags & NL80211_RRF_NO_IR))
- ch->flags &= ~IEEE80211_CHAN_NO_IR;
- }
- }
-}
-
static void rtw_regd_apply_hw_cap_flags(struct wiphy *wiphy)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
@@ -321,78 +307,223 @@ out_5g:
}
}
-static void rtw_regd_apply_world_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+static bool rtw_reg_is_ww(const struct rtw_regulatory *reg)
{
- rtw_regd_apply_beaconing_flags(wiphy, initiator);
+ return reg == &rtw_reg_ww;
}
-static struct rtw_regulatory rtw_regd_find_reg_by_name(char *alpha2)
+static bool rtw_reg_match(const struct rtw_regulatory *reg, const char *alpha2)
+{
+ return memcmp(reg->alpha2, alpha2, 2) == 0;
+}
+
+static const struct rtw_regulatory *rtw_reg_find_by_name(const char *alpha2)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(all_chplan_map); i++) {
- if (!memcmp(all_chplan_map[i].alpha2, alpha2, 2))
- return all_chplan_map[i];
+ for (i = 0; i < ARRAY_SIZE(rtw_reg_map); i++) {
+ if (rtw_reg_match(&rtw_reg_map[i], alpha2))
+ return &rtw_reg_map[i];
}
- return rtw_defined_chplan;
+ return &rtw_reg_ww;
}
-static int rtw_regd_notifier_apply(struct rtw_dev *rtwdev,
- struct wiphy *wiphy,
- struct regulatory_request *request)
+static
+void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+
+/* call this before ieee80211_register_hw() */
+int rtw_regd_init(struct rtw_dev *rtwdev)
{
- if (request->initiator == NL80211_REGDOM_SET_BY_USER)
- return 0;
- rtwdev->regd = rtw_regd_find_reg_by_name(request->alpha2);
- rtw_regd_apply_world_flags(wiphy, request->initiator);
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+ const struct rtw_regulatory *chip_reg;
- return 0;
-}
+ if (!wiphy)
+ return -EINVAL;
-static int
-rtw_regd_init_wiphy(struct rtw_regulatory *reg, struct wiphy *wiphy,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
-{
- wiphy->reg_notifier = reg_notifier;
+ wiphy->reg_notifier = rtw_regd_notifier;
- wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG;
- wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
- wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
+ chip_reg = rtw_reg_find_by_name(rtwdev->efuse.country_code);
+ if (!rtw_reg_is_ww(chip_reg)) {
+ rtwdev->regd.state = RTW_REGD_STATE_PROGRAMMED;
- rtw_regd_apply_hw_cap_flags(wiphy);
+ /* Set REGULATORY_STRICT_REG before ieee80211_register_hw(),
+ * stack will wait for regulatory_hint() and consider it
+ * as the superset for our regulatory rule.
+ */
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ } else {
+ rtwdev->regd.state = RTW_REGD_STATE_WORLDWIDE;
+ }
+ rtwdev->regd.regulatory = &rtw_reg_ww;
+ rtwdev->regd.dfs_region = NL80211_DFS_UNSET;
+ rtw_dbg_regd_dump(rtwdev, "regd init state %d: ", rtwdev->regd.state);
+
+ rtw_regd_apply_hw_cap_flags(wiphy);
return 0;
}
-int rtw_regd_init(struct rtw_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+/* call this after ieee80211_register_hw() */
+int rtw_regd_hint(struct rtw_dev *rtwdev)
{
struct wiphy *wiphy = rtwdev->hw->wiphy;
+ int ret;
if (!wiphy)
return -EINVAL;
- rtwdev->regd = rtw_regd_find_reg_by_name(rtwdev->efuse.country_code);
- rtw_regd_init_wiphy(&rtwdev->regd, wiphy, reg_notifier);
+ if (rtwdev->regd.state == RTW_REGD_STATE_PROGRAMMED) {
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "country domain %c%c is PGed on efuse",
+ rtwdev->efuse.country_code[0],
+ rtwdev->efuse.country_code[1]);
+
+ ret = regulatory_hint(wiphy, rtwdev->efuse.country_code);
+ if (ret) {
+ rtw_warn(rtwdev,
+ "failed to hint regulatory: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
+static bool rtw_regd_mgmt_worldwide(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ next_regd->state = RTW_REGD_STATE_WORLDWIDE;
+
+ if (request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ !rtw_reg_is_ww(next_regd->regulatory)) {
+ next_regd->state = RTW_REGD_STATE_SETTING;
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ }
+
+ return true;
+}
+
+static bool rtw_regd_mgmt_programmed(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
+ rtw_reg_match(next_regd->regulatory, rtwdev->efuse.country_code)) {
+ next_regd->state = RTW_REGD_STATE_PROGRAMMED;
+ return true;
+ }
+
+ return false;
+}
+
+static bool rtw_regd_mgmt_setting(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ if (request->initiator != NL80211_REGDOM_SET_BY_USER)
+ return false;
+
+ next_regd->state = RTW_REGD_STATE_SETTING;
+
+ if (rtw_reg_is_ww(next_regd->regulatory)) {
+ next_regd->state = RTW_REGD_STATE_WORLDWIDE;
+ wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE;
+ }
+
+ return true;
+}
+
+static bool (*const rtw_regd_handler[RTW_REGD_STATE_NR])
+ (struct rtw_dev *, struct rtw_regd *, struct regulatory_request *) = {
+ [RTW_REGD_STATE_WORLDWIDE] = rtw_regd_mgmt_worldwide,
+ [RTW_REGD_STATE_PROGRAMMED] = rtw_regd_mgmt_programmed,
+ [RTW_REGD_STATE_SETTING] = rtw_regd_mgmt_setting,
+};
+
+static bool rtw_regd_state_hdl(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ next_regd->regulatory = rtw_reg_find_by_name(request->alpha2);
+ next_regd->dfs_region = request->dfs_region;
+ return rtw_regd_handler[rtwdev->regd.state](rtwdev, next_regd, request);
+}
+
+static
void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtw_dev *rtwdev = hw->priv;
struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_regd next_regd = {0};
+ bool hdl;
+
+ hdl = rtw_regd_state_hdl(rtwdev, &next_regd, request);
+ if (!hdl) {
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "regd state %d: ignore request %c%c of initiator %d\n",
+ rtwdev->regd.state,
+ request->alpha2[0],
+ request->alpha2[1],
+ request->initiator);
+ return;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_REGD, "regd state: %d -> %d\n",
+ rtwdev->regd.state, next_regd.state);
- rtw_regd_notifier_apply(rtwdev, wiphy, request);
- rtw_dbg(rtwdev, RTW_DBG_REGD,
- "get alpha2 %c%c from initiator %d, mapping to chplan 0x%x, txregd %d\n",
- request->alpha2[0], request->alpha2[1], request->initiator,
- rtwdev->regd.chplan, rtwdev->regd.txpwr_regd);
+ rtwdev->regd = next_regd;
+ rtw_dbg_regd_dump(rtwdev, "get alpha2 %c%c from initiator %d: ",
+ request->alpha2[0],
+ request->alpha2[1],
+ request->initiator);
+ rtw_phy_adaptivity_set_mode(rtwdev);
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
}
+
+u8 rtw_regd_get(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 band = hal->current_band_type;
+
+ return band == RTW_BAND_2G ?
+ rtwdev->regd.regulatory->txpwr_regd_2g :
+ rtwdev->regd.regulatory->txpwr_regd_5g;
+}
+EXPORT_SYMBOL(rtw_regd_get);
+
+struct rtw_regd_alternative_t {
+ bool set;
+ u8 alt;
+};
+
+#define DECL_REGD_ALT(_regd, _regd_alt) \
+ [(_regd)] = {.set = true, .alt = (_regd_alt)}
+
+static const struct rtw_regd_alternative_t
+rtw_regd_alt[RTW_REGD_MAX] = {
+ DECL_REGD_ALT(RTW_REGD_IC, RTW_REGD_FCC),
+ DECL_REGD_ALT(RTW_REGD_KCC, RTW_REGD_ETSI),
+ DECL_REGD_ALT(RTW_REGD_ACMA, RTW_REGD_ETSI),
+ DECL_REGD_ALT(RTW_REGD_CHILE, RTW_REGD_FCC),
+ DECL_REGD_ALT(RTW_REGD_UKRAINE, RTW_REGD_ETSI),
+ DECL_REGD_ALT(RTW_REGD_MEXICO, RTW_REGD_FCC),
+ DECL_REGD_ALT(RTW_REGD_CN, RTW_REGD_ETSI),
+};
+
+bool rtw_regd_has_alt(u8 regd, u8 *regd_alt)
+{
+ if (!rtw_regd_alt[regd].set)
+ return false;
+
+ *regd_alt = rtw_regd_alt[regd].alt;
+ return true;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/regd.h b/drivers/net/wireless/realtek/rtw88/regd.h
index 5d4578331788..34cb13d0cd9e 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.h
+++ b/drivers/net/wireless/realtek/rtw88/regd.h
@@ -64,8 +64,8 @@ enum country_code_type {
COUNTRY_CODE_MAX
};
-int rtw_regd_init(struct rtw_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
-void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+int rtw_regd_init(struct rtw_dev *rtwdev);
+int rtw_regd_hint(struct rtw_dev *rtwdev);
+u8 rtw_regd_get(struct rtw_dev *rtwdev);
+bool rtw_regd_has_alt(u8 regd, u8 *regd_alt);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 785b8181513f..80a6f4da6acd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -14,6 +14,7 @@
#include "reg.h"
#include "debug.h"
#include "bf.h"
+#include "regd.h"
static const s8 lna_gain_table_0[8] = {22, 8, -6, -22, -31, -40, -46, -52};
static const s8 lna_gain_table_1[16] = {10, 6, 2, -2, -6, -10, -14, -17,
@@ -60,6 +61,9 @@ static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+ if (rtwdev->efuse.rfe_option == 2 || rtwdev->efuse.rfe_option == 4)
+ efuse->txpwr_idx_table[0].pwr_idx_2g = map->txpwr_idx_table[1].pwr_idx_2g;
+
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtw8821ce_efuse_parsing(efuse, map);
@@ -304,7 +308,8 @@ static void rtw8821c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
if (channel <= 14) {
if (rtwdev->efuse.rfe_option == 0)
rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_WLG);
- else if (rtwdev->efuse.rfe_option == 2)
+ else if (rtwdev->efuse.rfe_option == 2 ||
+ rtwdev->efuse.rfe_option == 4)
rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_BTG);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x1);
rtw_write_rf(rtwdev, RF_PATH_A, 0x64, 0xf, 0xf);
@@ -773,6 +778,15 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
if (switch_status == coex_dm->cur_switch_status)
return;
+ if (coex_rfe->wlg_at_btg) {
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+
+ if (coex_rfe->ant_switch_polarity)
+ pos_type = COEX_SWITCH_TO_WLA;
+ else
+ pos_type = COEX_SWITCH_TO_WLG_BT;
+ }
+
coex_dm->cur_switch_status = switch_status;
if (coex_rfe->ant_switch_diversity &&
@@ -993,7 +1007,7 @@ static void rtw8821c_pwrtrack_set(struct rtw_dev *rtwdev)
s8 pwr_idx_offset_lower;
u8 channel = rtwdev->hal.current_channel;
u8 band_width = rtwdev->hal.current_band_width;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
u8 tx_rate = dm_info->tx_rate;
u8 max_pwr_idx = rtwdev->chip->max_power_index;
@@ -1498,6 +1512,7 @@ static const struct rtw_intf_phy_para_table phy_para_table_8821c = {
static const struct rtw_rfe_def rtw8821c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8821c, 0, 0),
[2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
+ [4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
};
static struct rtw_hw_reg rtw8821c_dig[] = {
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index f1789155e901..c409c8c29ec8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -15,6 +15,7 @@
#include "reg.h"
#include "debug.h"
#include "bf.h"
+#include "regd.h"
static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -1436,7 +1437,7 @@ static void rtw8822b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
u8 pwr_idx_offset, tx_pwr_idx;
u8 channel = rtwdev->hal.current_channel;
u8 band_width = rtwdev->hal.current_band_width;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
u8 tx_rate = dm_info->tx_rate;
u8 max_pwr_idx = rtwdev->chip->max_power_index;
@@ -1552,6 +1553,39 @@ static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
rtw_warn(rtwdev, "wrong bfee role\n");
}
+static void rtw8822b_adaptivity_init(struct rtw_dev *rtwdev)
+{
+ rtw_phy_set_edcca_th(rtwdev, RTW8822B_EDCCA_MAX, RTW8822B_EDCCA_MAX);
+
+ /* mac edcca state setting */
+ rtw_write32_clr(rtwdev, REG_TX_PTCL_CTRL, BIT_DIS_EDCCA);
+ rtw_write32_set(rtwdev, REG_RD_CTRL, BIT_EDCCA_MSK_CNTDOWN_EN);
+ rtw_write32_mask(rtwdev, REG_EDCCA_SOURCE, BIT_SOURCE_OPTION,
+ RTW8822B_EDCCA_SRC_DEF);
+ rtw_write32_mask(rtwdev, REG_EDCCA_POW_MA, BIT_MA_LEVEL, 0);
+
+ /* edcca decision opt */
+ rtw_write32_set(rtwdev, REG_EDCCA_DECISION, BIT_EDCCA_OPTION);
+}
+
+static void rtw8822b_adaptivity(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 l2h, h2l;
+ u8 igi;
+
+ igi = dm_info->igi_history[0];
+ if (dm_info->edcca_mode == RTW_EDCCA_NORMAL) {
+ l2h = max_t(s8, igi + EDCCA_IGI_L2H_DIFF, EDCCA_TH_L2H_LB);
+ h2l = l2h - EDCCA_L2H_H2L_DIFF_NORMAL;
+ } else {
+ l2h = min_t(s8, igi, dm_info->l2h_th_ini);
+ h2l = l2h - EDCCA_L2H_H2L_DIFF;
+ }
+
+ rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
+}
+
static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -2125,6 +2159,8 @@ static struct rtw_chip_ops rtw8822b_ops = {
.config_bfee = rtw8822b_bf_config_bfee,
.set_gid_table = rtw_bf_set_gid_table,
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
+ .adaptivity_init = rtw8822b_adaptivity_init,
+ .adaptivity = rtw8822b_adaptivity,
.coex_set_init = rtw8822b_coex_cfg_init,
.coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch,
@@ -2454,6 +2490,11 @@ static const struct rtw_reg_domain coex_info_hw_regs_8822b[] = {
{0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
};
+static struct rtw_hw_reg_offset rtw8822b_edcca_th[] = {
+ [EDCCA_TH_L2H_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE0}, .offset = 0},
+ [EDCCA_TH_H2L_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE1}, .offset = 0},
+};
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -2502,6 +2543,9 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
.rx_ldpc = true,
+ .edcca_th = rtw8822b_edcca_th,
+ .l2h_th_ini_cs = 10 + EDCCA_IGI_BASE,
+ .l2h_th_ini_ad = -14 + EDCCA_IGI_BASE,
.coex_para_ver = 0x20070206,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 6211f4b547b9..3fff8b881854 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -140,6 +140,8 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
+#define RTW8822B_EDCCA_MAX 0x7f
+#define RTW8822B_EDCCA_SRC_DEF 1
#define REG_HTSTFWT 0x800
#define REG_RXPSEL 0x808
#define BIT_RX_PSEL_RST (BIT(28) | BIT(29))
@@ -152,11 +154,17 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define REG_L1PKWT 0x840
#define REG_MRC 0x850
#define REG_CLKTRK 0x860
+#define REG_EDCCA_POW_MA 0x8a0
+#define BIT_MA_LEVEL GENMASK(1, 0)
#define REG_ADCCLK 0x8ac
#define REG_ADC160 0x8c4
#define REG_ADC40 0x8c8
+#define REG_EDCCA_DECISION 0x8dc
+#define BIT_EDCCA_OPTION BIT(5)
#define REG_CDDTXP 0x93c
#define REG_TXPSEL1 0x940
+#define REG_EDCCA_SOURCE 0x944
+#define BIT_SOURCE_OPTION GENMASK(29, 28)
#define REG_ACBB0 0x948
#define REG_ACBBRXFIR 0x94c
#define REG_ACGG2TBL 0x958
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index f3ad079967a6..46b881e8e4fe 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -4497,6 +4497,39 @@ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
+static void rtw8822c_adaptivity_init(struct rtw_dev *rtwdev)
+{
+ rtw_phy_set_edcca_th(rtwdev, RTW8822C_EDCCA_MAX, RTW8822C_EDCCA_MAX);
+
+ /* mac edcca state setting */
+ rtw_write32_clr(rtwdev, REG_TX_PTCL_CTRL, BIT_DIS_EDCCA);
+ rtw_write32_set(rtwdev, REG_RD_CTRL, BIT_EDCCA_MSK_CNTDOWN_EN);
+
+ /* edcca decistion opt */
+ rtw_write32_clr(rtwdev, REG_EDCCA_DECISION, BIT_EDCCA_OPTION);
+}
+
+static void rtw8822c_adaptivity(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 l2h, h2l;
+ u8 igi;
+
+ igi = dm_info->igi_history[0];
+ if (dm_info->edcca_mode == RTW_EDCCA_NORMAL) {
+ l2h = max_t(s8, igi + EDCCA_IGI_L2H_DIFF, EDCCA_TH_L2H_LB);
+ h2l = l2h - EDCCA_L2H_H2L_DIFF_NORMAL;
+ } else {
+ if (igi < dm_info->l2h_th_ini - EDCCA_ADC_BACKOFF)
+ l2h = igi + EDCCA_ADC_BACKOFF;
+ else
+ l2h = dm_info->l2h_th_ini;
+ h2l = l2h - EDCCA_L2H_H2L_DIFF;
+ }
+
+ rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
+}
+
static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -4912,6 +4945,8 @@ static struct rtw_chip_ops rtw8822c_ops = {
.config_bfee = rtw8822c_bf_config_bfee,
.set_gid_table = rtw_bf_set_gid_table,
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
+ .adaptivity_init = rtw8822c_adaptivity_init,
+ .adaptivity = rtw8822c_adaptivity,
.cfo_init = rtw8822c_cfo_init,
.cfo_track = rtw8822c_cfo_track,
.config_tx_path = rtw8822c_config_tx_path,
@@ -5197,6 +5232,15 @@ static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = {
.pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p,
};
+static struct rtw_hw_reg_offset rtw8822c_edcca_th[] = {
+ [EDCCA_TH_L2H_IDX] = {
+ {.addr = 0x84c, .mask = MASKBYTE2}, .offset = 0x80
+ },
+ [EDCCA_TH_H2L_IDX] = {
+ {.addr = 0x84c, .mask = MASKBYTE3}, .offset = 0x80
+ },
+};
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8822c = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -5289,6 +5333,9 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.bfer_mu_max_num = 1,
.rx_ldpc = true,
.tx_stbc = true,
+ .edcca_th = rtw8822c_edcca_th,
+ .l2h_th_ini_cs = 60,
+ .l2h_th_ini_ad = 45,
#ifdef CONFIG_PM
.wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 364afc6d851b..3df627419d81 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -162,6 +162,7 @@ const struct rtw_table name ## _tbl = { \
#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
+#define RTW8822C_EDCCA_MAX 0x7f
#define REG_ANAPARLDO_POW_MAC 0x0029
#define BIT_LDOE25_PON BIT(0)
#define XCAP_MASK GENMASK(6, 0)
@@ -174,6 +175,8 @@ const struct rtw_table name ## _tbl = { \
#define REG_ANTMAP0 0x820
#define BIT_ANT_PATH GENMASK(1, 0)
#define REG_ANTMAP 0x824
+#define REG_EDCCA_DECISION 0x844
+#define BIT_EDCCA_OPTION GENMASK(30, 29)
#define REG_DYMPRITH 0x86c
#define REG_DYMENTH0 0x870
#define REG_DYMENTH 0x874
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index a48e616e0fb9..6bfaab48b507 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -399,6 +399,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
tx_params = (struct skb_info *)info->driver_data;
+ /* info->driver_data and info->control part of union so make copy */
+ tx_params->have_key = !!info->control.hw_key;
wh = (struct ieee80211_hdr *)&skb->data[0];
tx_params->sta_id = 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index f4a26f16f00f..dca81a4bbdd7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -203,7 +203,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE);
if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
- info->control.hw_key) {
+ tx_params->have_key) {
if (rsi_is_cipher_wep(common))
ieee80211_size += 4;
else
@@ -214,15 +214,17 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
RSI_WIFI_DATA_Q);
data_desc->header_len = ieee80211_size;
- if (common->min_rate != RSI_RATE_AUTO) {
+ if (common->rate_config[common->band].fixed_enabled) {
/* Send fixed rate */
+ u16 fixed_rate = common->rate_config[common->band].fixed_hw_rate;
+
data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE);
- data_desc->rate_info = cpu_to_le16(common->min_rate);
+ data_desc->rate_info = cpu_to_le16(fixed_rate);
if (conf_is_ht40(&common->priv->hw->conf))
data_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE);
- if ((common->vif_info[0].sgi) && (common->min_rate & 0x100)) {
+ if (common->vif_info[0].sgi && (fixed_rate & 0x100)) {
/* Only MCS rates */
data_desc->rate_info |=
cpu_to_le16(ENABLE_SHORTGI_RATE);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index b66975f54567..e70c1c7fdf59 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -510,7 +510,6 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_P2P_GO)) {
rsi_send_rx_filter_frame(common, DISALLOW_BEACONS);
- common->min_rate = RSI_RATE_AUTO;
for (i = 0; i < common->max_stations; i++)
common->stations[i].sta = NULL;
}
@@ -1228,20 +1227,32 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
+ const unsigned int mcs_offset = ARRAY_SIZE(rsi_rates);
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
- enum nl80211_band band = hw->conf.chandef.chan->band;
+ int i;
mutex_lock(&common->mutex);
- common->fixedrate_mask[band] = 0;
- if (mask->control[band].legacy == 0xfff) {
- common->fixedrate_mask[band] =
- (mask->control[band].ht_mcs[0] << 12);
- } else {
- common->fixedrate_mask[band] =
- mask->control[band].legacy;
+ for (i = 0; i < ARRAY_SIZE(common->rate_config); i++) {
+ struct rsi_rate_config *cfg = &common->rate_config[i];
+ u32 bm;
+
+ bm = mask->control[i].legacy | (mask->control[i].ht_mcs[0] << mcs_offset);
+ if (hweight32(bm) == 1) { /* single rate */
+ int rate_index = ffs(bm) - 1;
+
+ if (rate_index < mcs_offset)
+ cfg->fixed_hw_rate = rsi_rates[rate_index].hw_value;
+ else
+ cfg->fixed_hw_rate = rsi_mcsrates[rate_index - mcs_offset];
+ cfg->fixed_enabled = true;
+ } else {
+ cfg->configured_mask = bm;
+ cfg->fixed_enabled = false;
+ }
}
+
mutex_unlock(&common->mutex);
return 0;
@@ -1378,46 +1389,6 @@ void rsi_indicate_pkt_to_os(struct rsi_common *common,
ieee80211_rx_irqsafe(hw, skb);
}
-static void rsi_set_min_rate(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct rsi_common *common)
-{
- u8 band = hw->conf.chandef.chan->band;
- u8 ii;
- u32 rate_bitmap;
- bool matched = false;
-
- common->bitrate_mask[band] = sta->supp_rates[band];
-
- rate_bitmap = (common->fixedrate_mask[band] & sta->supp_rates[band]);
-
- if (rate_bitmap & 0xfff) {
- /* Find out the min rate */
- for (ii = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
- if (rate_bitmap & BIT(ii)) {
- common->min_rate = rsi_rates[ii].hw_value;
- matched = true;
- break;
- }
- }
- }
-
- common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
-
- if ((common->vif_info[0].is_ht) && (rate_bitmap >> 12)) {
- for (ii = 0; ii < ARRAY_SIZE(rsi_mcsrates); ii++) {
- if ((rate_bitmap >> 12) & BIT(ii)) {
- common->min_rate = rsi_mcsrates[ii];
- matched = true;
- break;
- }
- }
- }
-
- if (!matched)
- common->min_rate = 0xffff;
-}
-
/**
* rsi_mac80211_sta_add() - This function notifies driver about a peer getting
* connected.
@@ -1516,9 +1487,9 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) {
- rsi_set_min_rate(hw, sta, common);
+ common->bitrate_mask[common->band] = sta->supp_rates[common->band];
+ common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
if (sta->ht_cap.ht_supported) {
- common->vif_info[0].is_ht = true;
common->bitrate_mask[NL80211_BAND_2GHZ] =
sta->supp_rates[NL80211_BAND_2GHZ];
if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
@@ -1592,7 +1563,6 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
bss->qos = sta->wme;
common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
- common->min_rate = 0xffff;
common->vif_info[0].is_ht = false;
common->vif_info[0].sgi = false;
common->vif_info[0].seq_start = 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index d98483298555..143224a3802b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -211,9 +211,10 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len)
bt_pkt_type = frame_desc[offset + BT_RX_PKT_TYPE_OFST];
if (bt_pkt_type == BT_CARD_READY_IND) {
rsi_dbg(INFO_ZONE, "BT Card ready recvd\n");
- if (rsi_bt_ops.attach(common, &g_proto_ops))
- rsi_dbg(ERR_ZONE,
- "Failed to attach BT module\n");
+ if (common->fsm_state == FSM_MAC_INIT_DONE)
+ rsi_attach_bt(common);
+ else
+ common->bt_defer_attach = true;
} else {
if (common->bt_adapter)
rsi_bt_ops.recv_pkt(common->bt_adapter,
@@ -278,6 +279,15 @@ void rsi_set_bt_context(void *priv, void *bt_context)
}
#endif
+void rsi_attach_bt(struct rsi_common *common)
+{
+#ifdef CONFIG_RSI_COEX
+ if (rsi_bt_ops.attach(common, &g_proto_ops))
+ rsi_dbg(ERR_ZONE,
+ "Failed to attach BT module\n");
+#endif
+}
+
/**
* rsi_91x_init() - This function initializes os interface operations.
* @oper_mode: One of DEV_OPMODE_*.
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 891fd5f0fa76..0848f7a7e76c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -276,7 +276,7 @@ static void rsi_set_default_parameters(struct rsi_common *common)
common->channel_width = BW_20MHZ;
common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
common->channel = 1;
- common->min_rate = 0xffff;
+ memset(&common->rate_config, 0, sizeof(common->rate_config));
common->fsm_state = FSM_CARD_NOT_READY;
common->iface_down = true;
common->endpoint = EP_2GHZ_20MHZ;
@@ -1314,7 +1314,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
u8 band = hw->conf.chandef.chan->band;
u8 num_supported_rates = 0;
u8 rate_table_offset, rate_offset = 0;
- u32 rate_bitmap;
+ u32 rate_bitmap, configured_rates;
u16 *selected_rates, min_rate;
bool is_ht = false, is_sgi = false;
u16 frame_len = sizeof(struct rsi_auto_rate);
@@ -1364,6 +1364,10 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
is_sgi = true;
}
+ /* Limit to any rates administratively configured by cfg80211 */
+ configured_rates = common->rate_config[band].configured_mask ?: 0xffffffff;
+ rate_bitmap &= configured_rates;
+
if (band == NL80211_BAND_2GHZ) {
if ((rate_bitmap == 0) && (is_ht))
min_rate = RSI_RATE_MCS0;
@@ -1389,10 +1393,13 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
num_supported_rates = jj;
if (is_ht) {
- for (ii = 0; ii < ARRAY_SIZE(mcs); ii++)
- selected_rates[jj++] = mcs[ii];
- num_supported_rates += ARRAY_SIZE(mcs);
- rate_offset += ARRAY_SIZE(mcs);
+ for (ii = 0; ii < ARRAY_SIZE(mcs); ii++) {
+ if (configured_rates & BIT(ii + ARRAY_SIZE(rsi_rates))) {
+ selected_rates[jj++] = mcs[ii];
+ num_supported_rates++;
+ rate_offset++;
+ }
+ }
}
sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL);
@@ -1482,7 +1489,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
qos_enable,
aid, sta_id,
vif);
- if (common->min_rate == 0xffff)
+ if (!common->rate_config[common->band].fixed_enabled)
rsi_send_auto_rate_request(common, sta, sta_id, vif);
if (opmode == RSI_OPMODE_STA &&
!(assoc_cap & WLAN_CAPABILITY_PRIVACY) &&
@@ -2071,6 +2078,9 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
if (common->reinit_hw) {
complete(&common->wlan_init_completion);
} else {
+ if (common->bt_defer_attach)
+ rsi_attach_bt(common);
+
return rsi_mac80211_attach(common);
}
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index e0c502bc4270..9f16128e4ffa 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -24,10 +24,7 @@
/* Default operating mode is wlan STA + BT */
static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL;
module_param(dev_oper_mode, ushort, 0444);
-MODULE_PARM_DESC(dev_oper_mode,
- "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n"
- "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n"
- "6[AP + BT classic], 14[AP + BT classic + BT LE]");
+MODULE_PARM_DESC(dev_oper_mode, DEV_OPMODE_PARAM_DESC);
/**
* rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg.
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 416976f09888..6a120211800d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -25,10 +25,7 @@
/* Default operating mode is wlan STA + BT */
static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL;
module_param(dev_oper_mode, ushort, 0444);
-MODULE_PARM_DESC(dev_oper_mode,
- "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n"
- "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n"
- "6[AP + BT classic], 14[AP + BT classic + BT LE]");
+MODULE_PARM_DESC(dev_oper_mode, DEV_OPMODE_PARAM_DESC);
static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t flags);
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index d044a440fa08..5b07262a9740 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -28,6 +28,17 @@
#define DEV_OPMODE_AP_BT 6
#define DEV_OPMODE_AP_BT_DUAL 14
+#define DEV_OPMODE_PARAM_DESC \
+ __stringify(DEV_OPMODE_WIFI_ALONE) "[Wi-Fi alone], " \
+ __stringify(DEV_OPMODE_BT_ALONE) "[BT classic alone], " \
+ __stringify(DEV_OPMODE_BT_LE_ALONE) "[BT LE alone], " \
+ __stringify(DEV_OPMODE_BT_DUAL) "[BT classic + BT LE alone], " \
+ __stringify(DEV_OPMODE_STA_BT) "[Wi-Fi STA + BT classic], " \
+ __stringify(DEV_OPMODE_STA_BT_LE) "[Wi-Fi STA + BT LE], " \
+ __stringify(DEV_OPMODE_STA_BT_DUAL) "[Wi-Fi STA + BT classic + BT LE], " \
+ __stringify(DEV_OPMODE_AP_BT) "[Wi-Fi AP + BT classic], " \
+ __stringify(DEV_OPMODE_AP_BT_DUAL) "[Wi-Fi AP + BT classic + BT LE]"
+
#define FLASH_WRITE_CHUNK_SIZE (4 * 1024)
#define FLASH_SECTOR_SIZE (4 * 1024)
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 0f535850a383..dcf8fb40698b 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -61,6 +61,7 @@ enum RSI_FSM_STATES {
extern u32 rsi_zone_enabled;
extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
+#define RSI_MAX_BANDS 2
#define RSI_MAX_VIFS 3
#define NUM_EDCA_QUEUES 4
#define IEEE80211_ADDR_LEN 6
@@ -139,6 +140,7 @@ struct skb_info {
u8 internal_hdr_size;
struct ieee80211_vif *vif;
u8 vap_id;
+ bool have_key;
};
enum edca_queue {
@@ -229,6 +231,12 @@ struct rsi_9116_features {
u32 ps_options;
};
+struct rsi_rate_config {
+ u32 configured_mask; /* configured by mac80211 bits 0-11=legacy 12+ mcs */
+ u16 fixed_hw_rate;
+ bool fixed_enabled;
+};
+
struct rsi_common {
struct rsi_hw *priv;
struct vif_priv vif_info[RSI_MAX_VIFS];
@@ -254,8 +262,8 @@ struct rsi_common {
u8 channel_width;
u16 rts_threshold;
- u16 bitrate_mask[2];
- u32 fixedrate_mask[2];
+ u32 bitrate_mask[RSI_MAX_BANDS];
+ struct rsi_rate_config rate_config[RSI_MAX_BANDS];
u8 rf_reset;
struct transmit_q_stats tx_stats;
@@ -276,7 +284,6 @@ struct rsi_common {
u8 mac_id;
u8 radio_id;
u16 rate_pwr[20];
- u16 min_rate;
/* WMM algo related */
u8 selected_qnum;
@@ -320,6 +327,7 @@ struct rsi_common {
struct ieee80211_vif *roc_vif;
bool eapol4_confirm;
+ bool bt_defer_attach;
void *bt_adapter;
struct cfg80211_scan_request *hwscan;
@@ -401,5 +409,6 @@ struct rsi_host_intf_ops {
enum rsi_host_intf rsi_get_host_intf(void *priv);
void rsi_set_bt_context(void *priv, void *bt_context);
+void rsi_attach_bt(struct rsi_common *common);
#endif
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index a7ceef10bf6a..850c26bc9524 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -65,7 +65,6 @@ static const struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
- { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe501), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
index 42dbe7fe663c..17da85a8f337 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -23,31 +23,11 @@ static int ipc_devlink_get_param(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct iosm_devlink *ipc_devlink = devlink_priv(dl);
- int rc = 0;
- switch (id) {
- case IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH:
+ if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
ctx->val.vu8 = ipc_devlink->param.erase_full_flash;
- break;
-
- case IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION:
- ctx->val.vu8 = ipc_devlink->param.download_region;
- break;
-
- case IOSM_DEVLINK_PARAM_ID_ADDRESS:
- ctx->val.vu32 = ipc_devlink->param.address;
- break;
- case IOSM_DEVLINK_PARAM_ID_REGION_COUNT:
- ctx->val.vu8 = ipc_devlink->param.region_count;
- break;
-
- default:
- rc = -EOPNOTSUPP;
- break;
- }
-
- return rc;
+ return 0;
}
/* Set the param values for the specific param ID's */
@@ -55,31 +35,11 @@ static int ipc_devlink_set_param(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct iosm_devlink *ipc_devlink = devlink_priv(dl);
- int rc = 0;
- switch (id) {
- case IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH:
+ if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
ipc_devlink->param.erase_full_flash = ctx->val.vu8;
- break;
-
- case IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION:
- ipc_devlink->param.download_region = ctx->val.vu8;
- break;
-
- case IOSM_DEVLINK_PARAM_ID_ADDRESS:
- ipc_devlink->param.address = ctx->val.vu32;
- break;
-
- case IOSM_DEVLINK_PARAM_ID_REGION_COUNT:
- ipc_devlink->param.region_count = ctx->val.vu8;
- break;
-
- default:
- rc = -EOPNOTSUPP;
- break;
- }
- return rc;
+ return 0;
}
/* Devlink param structure array */
@@ -89,21 +49,6 @@ static const struct devlink_param iosm_devlink_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
ipc_devlink_get_param, ipc_devlink_set_param,
NULL),
- DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION,
- "download_region", DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- ipc_devlink_get_param, ipc_devlink_set_param,
- NULL),
- DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_ADDRESS,
- "address", DEVLINK_PARAM_TYPE_U32,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- ipc_devlink_get_param, ipc_devlink_set_param,
- NULL),
- DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_REGION_COUNT,
- "region_count", DEVLINK_PARAM_TYPE_U8,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- ipc_devlink_get_param, ipc_devlink_set_param,
- NULL),
};
/* Get devlink flash component type */
@@ -134,18 +79,23 @@ static int ipc_devlink_flash_update(struct devlink *devlink,
{
struct iosm_devlink *ipc_devlink = devlink_priv(devlink);
enum iosm_flash_comp_type fls_type;
+ struct iosm_devlink_image *header;
int rc = -EINVAL;
u8 *mdm_rsp;
- if (!params->component)
+ header = (struct iosm_devlink_image *)params->fw->data;
+
+ if (!header || params->fw->size <= IOSM_DEVLINK_HDR_SIZE ||
+ (memcmp(header->magic_header, IOSM_DEVLINK_MAGIC_HEADER,
+ IOSM_DEVLINK_MAGIC_HEADER_LEN) != 0))
return -EINVAL;
mdm_rsp = kzalloc(IOSM_EBL_DW_PACK_SIZE, GFP_KERNEL);
if (!mdm_rsp)
return -ENOMEM;
- fls_type = ipc_devlink_get_flash_comp_type(params->component,
- strlen(params->component));
+ fls_type = ipc_devlink_get_flash_comp_type(header->image_type,
+ IOSM_DEVLINK_MAX_IMG_LEN);
switch (fls_type) {
case FLASH_COMP_TYPE_PSI:
@@ -165,16 +115,16 @@ static int ipc_devlink_flash_update(struct devlink *devlink,
break;
default:
devlink_flash_update_status_notify(devlink, "Invalid component",
- params->component, 0, 0);
+ NULL, 0, 0);
break;
}
if (!rc)
devlink_flash_update_status_notify(devlink, "Flashing success",
- params->component, 0, 0);
+ header->image_type, 0, 0);
else
devlink_flash_update_status_notify(devlink, "Flashing failed",
- params->component, 0, 0);
+ header->image_type, 0, 0);
kfree(mdm_rsp);
return rc;
@@ -182,7 +132,6 @@ static int ipc_devlink_flash_update(struct devlink *devlink,
/* Call back function for devlink ops */
static const struct devlink_ops devlink_flash_ops = {
- .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT,
.flash_update = ipc_devlink_flash_update,
};
@@ -305,7 +254,6 @@ struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem)
ipc_devlink->devlink_ctx = devlink_ctx;
ipc_devlink->pcie = ipc_imem->pcie;
ipc_devlink->dev = ipc_imem->dev;
- devlink_register(devlink_ctx);
rc = devlink_params_register(devlink_ctx, iosm_devlink_params,
ARRAY_SIZE(iosm_devlink_params));
@@ -315,7 +263,6 @@ struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem)
goto param_reg_fail;
}
- devlink_params_publish(devlink_ctx);
ipc_devlink->cd_file_info = list;
rc = ipc_devlink_create_region(ipc_devlink);
@@ -334,6 +281,7 @@ struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem)
init_completion(&ipc_devlink->devlink_sio.read_sem);
skb_queue_head_init(&ipc_devlink->devlink_sio.rx_list);
+ devlink_register(devlink_ctx);
dev_dbg(ipc_devlink->dev, "iosm devlink register success");
return ipc_devlink;
@@ -341,7 +289,6 @@ struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem)
chnl_get_fail:
ipc_devlink_destroy_region(ipc_devlink);
region_create_fail:
- devlink_params_unpublish(devlink_ctx);
devlink_params_unregister(devlink_ctx, iosm_devlink_params,
ARRAY_SIZE(iosm_devlink_params));
param_reg_fail:
@@ -358,8 +305,8 @@ void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink)
{
struct devlink *devlink_ctx = ipc_devlink->devlink_ctx;
+ devlink_unregister(devlink_ctx);
ipc_devlink_destroy_region(ipc_devlink);
- devlink_params_unpublish(devlink_ctx);
devlink_params_unregister(devlink_ctx, iosm_devlink_params,
ARRAY_SIZE(iosm_devlink_params));
if (ipc_devlink->devlink_sio.devlink_read_pend) {
@@ -370,6 +317,5 @@ void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink)
skb_queue_purge(&ipc_devlink->devlink_sio.rx_list);
ipc_imem_sys_devlink_close(ipc_devlink);
- devlink_unregister(devlink_ctx);
devlink_free(devlink_ctx);
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.h b/drivers/net/wwan/iosm/iosm_ipc_devlink.h
index fa2b388a2f8a..35c2d013b9cc 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.h
@@ -12,6 +12,18 @@
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_pcie.h"
+/* Image ext max len */
+#define IOSM_DEVLINK_MAX_IMG_LEN 3
+/* Magic Header */
+#define IOSM_DEVLINK_MAGIC_HEADER "IOSM_DEVLINK_HEADER"
+/* Magic Header len */
+#define IOSM_DEVLINK_MAGIC_HEADER_LEN 20
+/* Devlink image type */
+#define IOSM_DEVLINK_IMG_TYPE 4
+/* Reserve header size */
+#define IOSM_DEVLINK_RESERVED 34
+/* Devlink Image Header size */
+#define IOSM_DEVLINK_HDR_SIZE sizeof(struct iosm_devlink_image)
/* MAX file name length */
#define IOSM_MAX_FILENAME_LEN 32
/* EBL response size */
@@ -32,19 +44,11 @@
* enum iosm_devlink_param_id - Enum type to different devlink params
* @IOSM_DEVLINK_PARAM_ID_BASE: Devlink param base ID
* @IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH: Set if full erase required
- * @IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION: Set if fls file to be
- * flashed is Loadmap/region file
- * @IOSM_DEVLINK_PARAM_ID_ADDRESS: Address of the region to be
- * flashed
- * @IOSM_DEVLINK_PARAM_ID_REGION_COUNT: Max region count
*/
enum iosm_devlink_param_id {
IOSM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH,
- IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION,
- IOSM_DEVLINK_PARAM_ID_ADDRESS,
- IOSM_DEVLINK_PARAM_ID_REGION_COUNT,
};
/**
@@ -94,23 +98,35 @@ struct iosm_devlink_sio {
/**
* struct iosm_flash_params - List of flash params required for flashing
- * @address: Address of the region file to be flashed
- * @region_count: Maximum no of regions for each fls file
- * @download_region: To be set if region is being flashed
* @erase_full_flash: To set the flashing mode
* erase_full_flash = 1; full erase
* erase_full_flash = 0; no erase
* @erase_full_flash_done: Flag to check if it is a full erase
*/
struct iosm_flash_params {
- u32 address;
- u8 region_count;
- u8 download_region;
u8 erase_full_flash;
u8 erase_full_flash_done;
};
/**
+ * struct iosm_devlink_image - Structure with Fls file header info
+ * @magic_header: Header of the firmware image
+ * @image_type: Firmware image type
+ * @region_address: Address of the region to be flashed
+ * @download_region: Field to identify if it is a region
+ * @last_region: Field to identify if it is last region
+ * @reserved: Reserved field
+ */
+struct iosm_devlink_image {
+ char magic_header[IOSM_DEVLINK_MAGIC_HEADER_LEN];
+ char image_type[IOSM_DEVLINK_IMG_TYPE];
+ __le32 region_address;
+ u8 download_region;
+ u8 last_region;
+ u8 reserved[IOSM_DEVLINK_RESERVED];
+} __packed;
+
+/**
* struct iosm_ebl_ctx_data - EBL ctx data used during flashing
* @ebl_sw_info_version: SWID version info obtained from EBL
* @m_ebl_resp: Buffer used to read and write the ebl data
diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.c b/drivers/net/wwan/iosm/iosm_ipc_flash.c
index ebceedf7c9f5..d890914aa349 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_flash.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_flash.c
@@ -330,18 +330,20 @@ ipc_flash_erase_err:
static int ipc_flash_download_region(struct iosm_devlink *ipc_devlink,
const struct firmware *fw, u8 *mdm_rsp)
{
+ u32 raw_len, rest_len = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ struct iosm_devlink_image *fls_data;
__le32 reg_info[2]; /* 0th position region address, 1st position size */
+ u32 nand_address;
char *file_ptr;
- u32 rest_len;
- u32 raw_len;
int ret;
- file_ptr = (char *)fw->data;
- reg_info[0] = cpu_to_le32(ipc_devlink->param.address);
+ fls_data = (struct iosm_devlink_image *)fw->data;
+ file_ptr = (void *)(fls_data + 1);
+ nand_address = le32_to_cpu(fls_data->region_address);
+ reg_info[0] = cpu_to_le32(nand_address);
if (!ipc_devlink->param.erase_full_flash_done) {
- reg_info[1] = cpu_to_le32(ipc_devlink->param.address +
- fw->size - 2);
+ reg_info[1] = cpu_to_le32(nand_address + rest_len - 2);
ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_START,
(u8 *)reg_info, IOSM_MDM_SEND_8,
mdm_rsp);
@@ -359,8 +361,6 @@ static int ipc_flash_download_region(struct iosm_devlink *ipc_devlink,
if (ret)
goto dl_region_fail;
- rest_len = fw->size;
-
/* Request Flash Write Raw Image */
ret = ipc_flash_send_data(ipc_devlink, IOSM_EBL_DW_PACK_SIZE,
FLASH_WRITE_IMAGE_RAW, (u8 *)&rest_len,
@@ -399,9 +399,12 @@ dl_region_fail:
int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink,
const struct firmware *fw, u8 *mdm_rsp)
{
+ u32 fw_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ struct iosm_devlink_image *fls_data;
u16 flash_cmd;
int ret;
+ fls_data = (struct iosm_devlink_image *)fw->data;
if (ipc_devlink->param.erase_full_flash) {
ipc_devlink->param.erase_full_flash = false;
ret = ipc_flash_full_erase(ipc_devlink, mdm_rsp);
@@ -410,19 +413,20 @@ int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink,
}
/* Request Sec Start */
- if (!ipc_devlink->param.download_region) {
+ if (!fls_data->download_region) {
ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_START,
- (u8 *)fw->data, fw->size, mdm_rsp);
+ (u8 *)fw->data +
+ IOSM_DEVLINK_HDR_SIZE, fw_size,
+ mdm_rsp);
if (ret)
goto ipc_flash_err;
} else {
/* Download regions */
- ipc_devlink->param.region_count -= IOSM_SET_FLAG;
ret = ipc_flash_download_region(ipc_devlink, fw, mdm_rsp);
if (ret)
goto ipc_flash_err;
- if (!ipc_devlink->param.region_count) {
+ if (fls_data->last_region) {
/* Request Sec End */
flash_cmd = IOSM_MDM_SEND_DATA;
ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_END,
@@ -445,17 +449,18 @@ ipc_flash_err:
int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink,
const struct firmware *fw)
{
+ u32 bytes_read, psi_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
u8 psi_ack_byte[IOSM_PSI_ACK], read_data[2];
- u32 bytes_read;
u8 *psi_code;
int ret;
dev_dbg(ipc_devlink->dev, "Boot transfer PSI");
- psi_code = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ psi_code = kmemdup(fw->data + IOSM_DEVLINK_HDR_SIZE, psi_size,
+ GFP_KERNEL);
if (!psi_code)
return -ENOMEM;
- ret = ipc_imem_sys_devlink_write(ipc_devlink, psi_code, fw->size);
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, psi_code, psi_size);
if (ret) {
dev_err(ipc_devlink->dev, "RPSI Image write failed");
goto ipc_flash_psi_free;
@@ -501,7 +506,7 @@ ipc_flash_psi_free:
int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink,
const struct firmware *fw)
{
- u32 ebl_size = fw->size;
+ u32 ebl_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
u8 read_data[2];
u32 bytes_read;
int ret;
@@ -553,8 +558,9 @@ int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink,
goto ipc_flash_ebl_err;
}
- ret = ipc_imem_sys_devlink_write(ipc_devlink, (unsigned char *)fw->data,
- fw->size);
+ ret = ipc_imem_sys_devlink_write(ipc_devlink,
+ (u8 *)fw->data + IOSM_DEVLINK_HDR_SIZE,
+ ebl_size);
if (ret) {
dev_err(ipc_devlink->dev, "EBL data transfer failed");
goto ipc_flash_ebl_err;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 1cf49e9959f4..cff3b43ca4d7 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -476,8 +476,8 @@ static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
container_of(hr_timer, struct iosm_imem, startup_timer);
if (ktime_to_ns(ipc_imem->hrtimer_period)) {
- hrtimer_forward(&ipc_imem->startup_timer, ktime_get(),
- ipc_imem->hrtimer_period);
+ hrtimer_forward_now(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period);
result = HRTIMER_RESTART;
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index c58996c1e230..fe8e21ad8ed9 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -494,6 +494,9 @@ static const struct net_device_ops xenvif_netdev_ops = {
struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
unsigned int handle)
{
+ static const u8 dummy_addr[ETH_ALEN] = {
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
+ };
int err;
struct net_device *dev;
struct xenvif *vif;
@@ -551,8 +554,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
* stolen by an Ethernet bridge for STP purposes.
* (FE:FF:FF:FF:FF:FF)
*/
- eth_broadcast_addr(dev->dev_addr);
- dev->dev_addr[0] &= ~0x01;
+ eth_hw_addr_set(dev, dummy_addr);
netif_carrier_off(dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 39a01c2a3058..0f7fd159f0f2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -499,7 +499,7 @@ check_frags:
* the header's copy failed, and they are
* sharing a slot, send an error
*/
- if (i == 0 && sharedslot)
+ if (i == 0 && !first_shinfo && sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
else
@@ -1474,7 +1474,7 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
RING_IDX rsp_prod, req_prod;
- int err = -ENOMEM;
+ int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
&tx_ring_ref, 1, &addr);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e31b98403f31..57437e4b8a94 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2157,6 +2157,7 @@ static int talk_to_netback(struct xenbus_device *dev,
unsigned int max_queues = 0;
struct netfront_queue *queue = NULL;
unsigned int num_queues = 1;
+ u8 addr[ETH_ALEN];
info->netdev->irq = 0;
@@ -2170,11 +2171,12 @@ static int talk_to_netback(struct xenbus_device *dev,
"feature-split-event-channels", 0);
/* Read mac addr. */
- err = xen_net_read_mac(dev, info->netdev->dev_addr);
+ err = xen_net_read_mac(dev, addr);
if (err) {
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
goto out_unlocked;
}
+ eth_hw_addr_set(info->netdev, addr);
info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
"feature-xdp-headroom", 0);
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 86f593c73ed6..067295124eb9 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -237,8 +237,6 @@ static int microread_i2c_probe(struct i2c_client *client,
struct microread_i2c_phy *phy;
int r;
- dev_dbg(&client->dev, "client %p\n", client);
-
phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
GFP_KERNEL);
if (!phy)
@@ -262,8 +260,6 @@ static int microread_i2c_probe(struct i2c_client *client,
if (r < 0)
goto err_irq;
- nfc_info(&client->dev, "Probed\n");
-
return 0;
err_irq:
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 00689e18dc46..e2a77a5fc887 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -23,8 +23,6 @@ static int microread_mei_probe(struct mei_cl_device *cldev,
struct nfc_mei_phy *phy;
int r;
- pr_info("Probing NFC microread\n");
-
phy = nfc_mei_phy_alloc(cldev);
if (!phy)
return -ENOMEM;
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index f5610b6b9894..673eb5e9b887 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -156,7 +156,7 @@ static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
return IRQ_HANDLED;
}
-static struct pn533_phy_ops i2c_phy_ops = {
+static const struct pn533_phy_ops i2c_phy_ops = {
.send_frame = pn533_i2c_send_frame,
.send_ack = pn533_i2c_send_ack,
.abort_cmd = pn533_i2c_abort_cmd,
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index da180335422c..787bcbd290f7 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2733,7 +2733,7 @@ EXPORT_SYMBOL_GPL(pn533_finalize_setup);
struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
- struct pn533_phy_ops *phy_ops,
+ const struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
struct device *dev)
{
diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
index 5f94f38a2a08..09e35b8693f5 100644
--- a/drivers/nfc/pn533/pn533.h
+++ b/drivers/nfc/pn533/pn533.h
@@ -177,7 +177,7 @@ struct pn533 {
struct device *dev;
void *phy;
- struct pn533_phy_ops *phy_ops;
+ const struct pn533_phy_ops *phy_ops;
};
typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg,
@@ -232,7 +232,7 @@ struct pn533_phy_ops {
struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
- struct pn533_phy_ops *phy_ops,
+ const struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
struct device *dev);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 7bdaf8263070..2caf997f9bc9 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -123,7 +123,7 @@ static int pn532_dev_down(struct pn533 *dev)
return 0;
}
-static struct pn533_phy_ops uart_phy_ops = {
+static const struct pn533_phy_ops uart_phy_ops = {
.send_frame = pn532_uart_send_frame,
.send_ack = pn532_uart_send_ack,
.abort_cmd = pn532_uart_abort_cmd,
@@ -224,7 +224,7 @@ static int pn532_receive_buf(struct serdev_device *serdev,
return i;
}
-static struct serdev_device_ops pn532_serdev_ops = {
+static const struct serdev_device_ops pn532_serdev_ops = {
.receive_buf = pn532_receive_buf,
.write_wakeup = serdev_device_write_wakeup,
};
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index bd7f7478d189..6f71ac72012e 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -429,7 +429,7 @@ static void pn533_send_complete(struct urb *urb)
}
}
-static struct pn533_phy_ops usb_phy_ops = {
+static const struct pn533_phy_ops usb_phy_ops = {
.send_frame = pn533_usb_send_frame,
.send_ack = pn533_usb_send_ack,
.abort_cmd = pn533_usb_abort_cmd,
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 1af7a1e632cf..c20fdbac51c5 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -357,6 +357,7 @@ s3fwrn5_fw_is_custom(const struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
{
+ struct device *dev = &fw_info->ndev->nfc_dev->dev;
struct s3fwrn5_fw_cmd_get_bootinfo_rsp bootinfo;
int ret;
@@ -364,8 +365,7 @@ int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
ret = s3fwrn5_fw_get_bootinfo(fw_info, &bootinfo);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Failed to get bootinfo, ret=%02x\n", ret);
+ dev_err(dev, "Failed to get bootinfo, ret=%02x\n", ret);
goto err;
}
@@ -373,8 +373,7 @@ int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
ret = s3fwrn5_fw_get_base_addr(&bootinfo, &fw_info->base_addr);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Unknown hardware version\n");
+ dev_err(dev, "Unknown hardware version\n");
goto err;
}
@@ -409,6 +408,7 @@ bool s3fwrn5_fw_check_version(const struct s3fwrn5_fw_info *fw_info, u32 version
int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
{
+ struct device *dev = &fw_info->ndev->nfc_dev->dev;
struct s3fwrn5_fw_image *fw = &fw_info->fw;
u8 hash_data[SHA1_DIGEST_SIZE];
struct crypto_shash *tfm;
@@ -421,8 +421,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
tfm = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(tfm)) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Cannot allocate shash (code=%pe)\n", tfm);
+ dev_err(dev, "Cannot allocate shash (code=%pe)\n", tfm);
return PTR_ERR(tfm);
}
@@ -430,21 +429,18 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
crypto_free_shash(tfm);
if (ret) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Cannot compute hash (code=%d)\n", ret);
+ dev_err(dev, "Cannot compute hash (code=%d)\n", ret);
return ret;
}
/* Firmware update process */
- dev_info(&fw_info->ndev->nfc_dev->dev,
- "Firmware update: %s\n", fw_info->fw_name);
+ dev_info(dev, "Firmware update: %s\n", fw_info->fw_name);
ret = s3fwrn5_fw_enter_update_mode(fw_info, hash_data,
SHA1_DIGEST_SIZE, fw_info->sig, fw_info->sig_size);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Unable to enter update mode\n");
+ dev_err(dev, "Unable to enter update mode\n");
return ret;
}
@@ -452,21 +448,18 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
ret = s3fwrn5_fw_update_sector(fw_info,
fw_info->base_addr + off, fw->image + off);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Firmware update error (code=%d)\n", ret);
+ dev_err(dev, "Firmware update error (code=%d)\n", ret);
return ret;
}
}
ret = s3fwrn5_fw_complete_update_mode(fw_info);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Unable to complete update mode\n");
+ dev_err(dev, "Unable to complete update mode\n");
return ret;
}
- dev_info(&fw_info->ndev->nfc_dev->dev,
- "Firmware update: success\n");
+ dev_info(dev, "Firmware update: success\n");
return ret;
}
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
index e374e670b36b..ca6828f55ba0 100644
--- a/drivers/nfc/s3fwrn5/nci.c
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -47,6 +47,7 @@ const struct nci_driver_ops s3fwrn5_nci_prop_ops[4] = {
int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
{
+ struct device *dev = &info->ndev->nfc_dev->dev;
const struct firmware *fw;
struct nci_prop_fw_cfg_cmd fw_cfg;
struct nci_prop_set_rfreg_cmd set_rfreg;
@@ -55,7 +56,7 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
int i, len;
int ret;
- ret = request_firmware(&fw, fw_name, &info->ndev->nfc_dev->dev);
+ ret = request_firmware(&fw, fw_name, dev);
if (ret < 0)
return ret;
@@ -77,13 +78,11 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
/* Start rfreg configuration */
- dev_info(&info->ndev->nfc_dev->dev,
- "rfreg configuration update: %s\n", fw_name);
+ dev_info(dev, "rfreg configuration update: %s\n", fw_name);
ret = nci_prop_cmd(info->ndev, NCI_PROP_START_RFREG, 0, NULL);
if (ret < 0) {
- dev_err(&info->ndev->nfc_dev->dev,
- "Unable to start rfreg update\n");
+ dev_err(dev, "Unable to start rfreg update\n");
goto out;
}
@@ -97,8 +96,7 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
ret = nci_prop_cmd(info->ndev, NCI_PROP_SET_RFREG,
len+1, (__u8 *)&set_rfreg);
if (ret < 0) {
- dev_err(&info->ndev->nfc_dev->dev,
- "rfreg update error (code=%d)\n", ret);
+ dev_err(dev, "rfreg update error (code=%d)\n", ret);
goto out;
}
set_rfreg.index++;
@@ -110,13 +108,11 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
ret = nci_prop_cmd(info->ndev, NCI_PROP_STOP_RFREG,
sizeof(stop_rfreg), (__u8 *)&stop_rfreg);
if (ret < 0) {
- dev_err(&info->ndev->nfc_dev->dev,
- "Unable to stop rfreg update\n");
+ dev_err(dev, "Unable to stop rfreg update\n");
goto out;
}
- dev_info(&info->ndev->nfc_dev->dev,
- "rfreg configuration update: success\n");
+ dev_info(dev, "rfreg configuration update: success\n");
out:
release_firmware(fw);
return ret;
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index ccf6152ebb9f..cbd968f013c7 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -157,7 +157,6 @@ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
{
struct st_nci_i2c_phy *phy = phy_id;
- struct i2c_client *client;
struct sk_buff *skb = NULL;
int r;
@@ -166,9 +165,6 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
-
if (phy->ndlc->hard_fault)
return IRQ_HANDLED;
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index e9dc313b333e..755460a73c0d 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -239,8 +239,6 @@ static void ndlc_t1_timeout(struct timer_list *t)
{
struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
- pr_debug("\n");
-
schedule_work(&ndlc->sm_work);
}
@@ -248,8 +246,6 @@ static void ndlc_t2_timeout(struct timer_list *t)
{
struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
- pr_debug("\n");
-
schedule_work(&ndlc->sm_work);
}
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 5fd89f72969d..7764b1a4c3cf 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -638,8 +638,6 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
{
struct st_nci_info *info = nci_get_drvdata(ndev);
- pr_debug("\n");
-
switch (se_idx) {
case ST_NCI_ESE_HOST_ID:
info->se_info.cb = cb;
@@ -671,8 +669,6 @@ static void st_nci_se_wt_timeout(struct timer_list *t)
u8 param = 0x01;
struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
- pr_debug("\n");
-
info->se_info.bwi_active = false;
if (!info->se_info.xch_error) {
@@ -692,8 +688,6 @@ static void st_nci_se_activation_timeout(struct timer_list *t)
struct st_nci_info *info = from_timer(info, t,
se_info.se_active_timer);
- pr_debug("\n");
-
info->se_info.se_active = false;
complete(&info->se_info.req_completion);
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index a620c34790e6..4e723992e74c 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -169,7 +169,6 @@ static int st_nci_spi_read(struct st_nci_spi_phy *phy,
static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
{
struct st_nci_spi_phy *phy = phy_id;
- struct spi_device *dev;
struct sk_buff *skb = NULL;
int r;
@@ -178,9 +177,6 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- dev = phy->spi_dev;
- dev_dbg(&dev->dev, "IRQ\n");
-
if (phy->ndlc->hard_fault)
return IRQ_HANDLED;
@@ -278,6 +274,7 @@ static int st_nci_spi_remove(struct spi_device *dev)
static struct spi_device_id st_nci_spi_id_table[] = {
{ST_NCI_SPI_DRIVER_NAME, 0},
+ {"st21nfcb-spi", 0},
{}
};
MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 279d88128b2e..f126ce96a7df 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -421,7 +421,6 @@ static int st21nfca_hci_i2c_read(struct st21nfca_i2c_phy *phy,
static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
{
struct st21nfca_i2c_phy *phy = phy_id;
- struct i2c_client *client;
int r;
@@ -430,9 +429,6 @@ static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
-
if (phy->hard_fault != 0)
return IRQ_HANDLED;
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index c8bdf078d111..a43fc4117fa5 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -257,8 +257,6 @@ static void st21nfca_se_wt_timeout(struct timer_list *t)
struct st21nfca_hci_info *info = from_timer(info, t,
se_info.bwi_timer);
- pr_debug("\n");
-
info->se_info.bwi_active = false;
if (!info->se_info.xch_error) {
@@ -278,8 +276,6 @@ static void st21nfca_se_activation_timeout(struct timer_list *t)
struct st21nfca_hci_info *info = from_timer(info, t,
se_info.se_active_timer);
- pr_debug("\n");
-
info->se_info.se_active = false;
complete(&info->se_info.req_completion);
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index d16cf3ff644e..b23f47936473 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1226,11 +1226,9 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
&reset_cmd,
ST95HF_RESET_CMD_LEN,
ASYNC);
- if (result) {
+ if (result)
dev_err(&spictx->spidev->dev,
"ST95HF reset failed in remove() err = %d\n", result);
- return result;
- }
/* wait for 3 ms to complete the controller reset process */
usleep_range(3000, 4000);
@@ -1239,7 +1237,7 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
if (stcontext->st95hf_supply)
regulator_disable(stcontext->st95hf_supply);
- return result;
+ return 0;
}
/* Register as SPI protocol driver */
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 8890fcd59c39..29ca9c328df2 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -2170,8 +2170,6 @@ static int trf7970a_suspend(struct device *dev)
struct spi_device *spi = to_spi_device(dev);
struct trf7970a *trf = spi_get_drvdata(spi);
- dev_dbg(dev, "Suspend\n");
-
mutex_lock(&trf->lock);
trf7970a_shutdown(trf);
@@ -2187,8 +2185,6 @@ static int trf7970a_resume(struct device *dev)
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
- dev_dbg(dev, "Resume\n");
-
mutex_lock(&trf->lock);
ret = trf7970a_startup(trf);
@@ -2206,8 +2202,6 @@ static int trf7970a_pm_runtime_suspend(struct device *dev)
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
- dev_dbg(dev, "Runtime suspend\n");
-
mutex_lock(&trf->lock);
ret = trf7970a_power_down(trf);
@@ -2223,8 +2217,6 @@ static int trf7970a_pm_runtime_resume(struct device *dev)
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
- dev_dbg(dev, "Runtime resume\n");
-
ret = trf7970a_power_up(trf);
if (!ret)
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 72de88ff0d30..ef4950f80832 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -380,7 +380,6 @@ static int pmem_attach_disk(struct device *dev,
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
struct request_queue *q;
- struct device *gendev;
struct gendisk *disk;
void *addr;
int rc;
@@ -489,10 +488,8 @@ static int pmem_attach_disk(struct device *dev,
}
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev;
- gendev = disk_to_dev(disk);
- gendev->groups = pmem_attribute_groups;
- device_add_disk(dev, disk, NULL);
+ device_add_disk(dev, disk, pmem_attribute_groups);
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 7efb31b87f37..f8dd664b2eda 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
-#include <linux/list_sort.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pr.h>
@@ -979,6 +978,7 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
{
struct nvme_command *cmd = nvme_req(req)->cmd;
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
if (!(req->rq_flags & RQF_DONTPREP)) {
@@ -1027,7 +1027,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
return BLK_STS_IOERR;
}
- nvme_req(req)->genctr++;
+ if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
+ nvme_req(req)->genctr++;
cmd->common.command_id = nvme_cid(req);
trace_nvme_setup_cmd(req, cmd);
return ret;
@@ -3524,7 +3525,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) {
- if (h->ns_id == nsid && nvme_tryget_ns_head(h))
+ if (h->ns_id != nsid)
+ continue;
+ if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
return h;
}
@@ -3547,10 +3550,15 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
return 0;
}
+static void nvme_cdev_rel(struct device *dev)
+{
+ ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
+}
+
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
{
cdev_device_del(cdev, cdev_device);
- ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(cdev_device->devt));
+ put_device(cdev_device);
}
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
@@ -3563,14 +3571,14 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
return minor;
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
cdev_device->class = nvme_ns_chr_class;
+ cdev_device->release = nvme_cdev_rel;
device_initialize(cdev_device);
cdev_init(cdev, fops);
cdev->owner = owner;
ret = cdev_device_add(cdev, cdev_device);
- if (ret) {
+ if (ret)
put_device(cdev_device);
- ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
- }
+
return ret;
}
@@ -3602,11 +3610,9 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
ns->ctrl->instance, ns->head->instance);
if (ret)
return ret;
- ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
- ns->ctrl->ops->module);
- if (ret)
- kfree_const(ns->cdev_device.kobj.name);
- return ret;
+
+ return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
+ ns->ctrl->ops->module);
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
@@ -3714,15 +3720,6 @@ out_unlock:
return ret;
}
-static int ns_cmp(void *priv, const struct list_head *a,
- const struct list_head *b)
-{
- struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
- struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
-
- return nsa->head->ns_id - nsb->head->ns_id;
-}
-
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
@@ -3743,6 +3740,22 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
}
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
+/*
+ * Add the namespace to the controller list while keeping the list ordered.
+ */
+static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
+{
+ struct nvme_ns *tmp;
+
+ list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
+ if (tmp->head->ns_id < ns->head->ns_id) {
+ list_add(&ns->list, &tmp->list);
+ return;
+ }
+ }
+ list_add(&ns->list, &ns->ctrl->namespaces);
+}
+
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_ns_ids *ids)
{
@@ -3793,9 +3806,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
goto out_unlink_ns;
down_write(&ctrl->namespaces_rwsem);
- list_add_tail(&ns->list, &ctrl->namespaces);
+ nvme_ns_add_to_ctrl_list(ns);
up_write(&ctrl->namespaces_rwsem);
-
nvme_get_ctrl(ctrl);
if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
@@ -3843,6 +3855,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
+ if (list_empty(&ns->head->list)) {
+ list_del_init(&ns->head->entry);
+ last_path = true;
+ }
mutex_unlock(&ns->ctrl->subsys->lock);
/* guarantee not available in head->list */
@@ -3856,20 +3872,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
- if (blk_get_integrity(ns->disk))
- blk_integrity_unregister(ns->disk);
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);
up_write(&ns->ctrl->namespaces_rwsem);
- /* Synchronize with nvme_init_ns_head() */
- mutex_lock(&ns->head->subsys->lock);
- if (list_empty(&ns->head->list)) {
- list_del_init(&ns->head->entry);
- last_path = true;
- }
- mutex_unlock(&ns->head->subsys->lock);
if (last_path)
nvme_mpath_shutdown_disk(ns->head);
nvme_put_ns(ns);
@@ -4083,10 +4090,6 @@ static void nvme_scan_work(struct work_struct *work)
if (nvme_scan_ns_list(ctrl) != 0)
nvme_scan_ns_sequential(ctrl);
mutex_unlock(&ctrl->scan_lock);
-
- down_write(&ctrl->namespaces_rwsem);
- list_sort(NULL, &ctrl->namespaces, ns_cmp);
- up_write(&ctrl->namespaces_rwsem);
}
/*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b08a61ca283f..aa14ad963d91 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2487,6 +2487,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
*/
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
@@ -2510,6 +2511,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* clean up the admin queue. Same thing as above.
*/
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
@@ -2951,6 +2953,13 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.queue_count == 1)
return 0;
+ if (prior_ioq_cnt != nr_io_queues) {
+ dev_info(ctrl->ctrl.device,
+ "reconnect: revising io queue count from %d to %d\n",
+ prior_ioq_cnt, nr_io_queues);
+ blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+ }
+
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_free_io_queues;
@@ -2959,15 +2968,6 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queues;
- if (prior_ioq_cnt != nr_io_queues) {
- dev_info(ctrl->ctrl.device,
- "reconnect: revising io queue count from %d to %d\n",
- prior_ioq_cnt, nr_io_queues);
- nvme_wait_freeze(&ctrl->ctrl);
- blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
- nvme_unfreeze(&ctrl->ctrl);
- }
-
return 0;
out_delete_hw_queues:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5d7bc58a27bd..fba06618c6c2 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -431,8 +431,6 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
return ret;
ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
&nvme_ns_head_chr_fops, THIS_MODULE);
- if (ret)
- kfree_const(head->cdev_device.kobj.name);
return ret;
}
@@ -600,14 +598,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- unsigned nsid = le32_to_cpu(desc->nsids[n]);
-
+ unsigned nsid;
+again:
+ nsid = le32_to_cpu(desc->nsids[n]);
if (ns->head->ns_id < nsid)
continue;
if (ns->head->ns_id == nsid)
nvme_update_ns_ana_state(desc, ns);
if (++n == nr_nsids)
break;
+ if (ns->head->ns_id > nsid)
+ goto again;
}
up_read(&ctrl->namespaces_rwsem);
return 0;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9871c0c9374c..ed79a6c7e804 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -138,6 +138,12 @@ enum nvme_quirks {
* 48 bits.
*/
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
+
+ /*
+ * The controller requires the command_id value be be limited, so skip
+ * encoding the generation sequence number.
+ */
+ NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b82492cd7503..149ecf73df38 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1330,7 +1330,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
iod->aborted = 1;
cmd.abort.opcode = nvme_admin_abort_cmd;
- cmd.abort.cid = req->tag;
+ cmd.abort.cid = nvme_cid(req);
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
@@ -3369,7 +3369,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
NVME_QUIRK_128_BYTES_SQES |
- NVME_QUIRK_SHARED_TAGS },
+ NVME_QUIRK_SHARED_TAGS |
+ NVME_QUIRK_SKIP_CID_GEN },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a68704e39084..042c594bc57e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
- nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ nvme_rdma_destroy_queue_ib(queue);
mutex_destroy(&queue->queue_lock);
}
@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
for (i = 0; i < queue->queue_size; i++) {
ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
if (ret)
- goto out_destroy_queue_ib;
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
if (ret) {
dev_err(ctrl->ctrl.device,
"rdma_connect_locked failed (%d).\n", ret);
- goto out_destroy_queue_ib;
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
- nvme_rdma_destroy_queue_ib(queue);
- fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index e2ab12f3f51c..3c1c29dd3020 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
} while (ret > 0);
}
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !list_empty(&queue->send_list) ||
+ !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
- } else if (last) {
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
+
+ if (last && nvme_tcp_queue_more(queue))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
@@ -613,7 +620,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
data->ttag = pdu->ttag;
data->command_id = nvme_cid(rq);
- data->data_offset = cpu_to_le32(req->data_sent);
+ data->data_offset = pdu->r2t_offset;
data->data_length = cpu_to_le32(req->pdu_len);
return 0;
}
@@ -906,12 +913,6 @@ done:
read_unlock_bh(&sk->sk_callback_lock);
}
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
-{
- return !list_empty(&queue->send_list) ||
- !llist_empty(&queue->req_list) || queue->more_requests;
-}
-
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
{
queue->request = NULL;
@@ -952,7 +953,15 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
nvme_tcp_ddgst_update(queue->snd_hash, page,
offset, ret);
- /* fully successful last write*/
+ /*
+ * update the request iterator except for the last payload send
+ * in the request where we don't want to modify it as we may
+ * compete with the RX path completing the request.
+ */
+ if (req->data_sent + ret < req->data_len)
+ nvme_tcp_advance_req(req, ret);
+
+ /* fully successful last send in current PDU */
if (last && ret == len) {
if (queue->data_digest) {
nvme_tcp_ddgst_final(queue->snd_hash,
@@ -964,7 +973,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
}
return 1;
}
- nvme_tcp_advance_req(req, ret);
}
return -EAGAIN;
}
@@ -1145,8 +1153,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
pending = true;
else if (unlikely(result < 0))
break;
- } else
- pending = !llist_empty(&queue->req_list);
+ }
result = nvme_tcp_try_recv(queue);
if (result > 0)
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d784f3c200b4..be5d82421e3a 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%*s\n",
+ return snprintf(page, PAGE_SIZE, "%.*s\n",
NVMET_SN_MAX_SIZE, subsys->serial);
}
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 39854d43758b..da414617a54d 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -109,6 +109,7 @@ config MTK_EFUSE
config NVMEM_NINTENDO_OTP
tristate "Nintendo Wii and Wii U OTP Support"
+ depends on WII || COMPILE_TEST
help
This is a driver exposing the OTP of a Nintendo Wii or Wii U console.
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 3d87fadaa160..8976da38b375 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -1383,7 +1383,8 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
*p-- = 0;
/* clear msb bits if any leftover in the last byte */
- *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
+ if (cell->nbits % BITS_PER_BYTE)
+ *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
}
static int __nvmem_cell_read(struct nvmem_device *nvmem,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 3dfeae8912df..80b5fd44ab1c 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -70,10 +70,6 @@ config OF_IRQ
def_bool y
depends on !SPARC && IRQ_DOMAIN
-config OF_NET
- depends on NETDEVICES
- def_bool y
-
config OF_RESERVED_MEM
def_bool OF_EARLY_FLATTREE
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index c13b982084a3..e0360a44306e 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
-obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_UNITTEST) += unittest.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f720c0d246f2..0ac17256258d 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -36,6 +36,7 @@ LIST_HEAD(aliases_lookup);
struct device_node *of_root;
EXPORT_SYMBOL(of_root);
struct device_node *of_chosen;
+EXPORT_SYMBOL(of_chosen);
struct device_node *of_aliases;
struct device_node *of_stdout;
static const char *of_stdout_options;
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 5b043ee30824..b0800c260f64 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -85,7 +85,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
break;
}
- if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
+ /*
+ * Attempt to initialize a restricted-dma-pool region if one was found.
+ * Note that count can hold a negative error code.
+ */
+ if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
}
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 59c1390cdf42..9da8835ba5a5 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -21,6 +21,7 @@
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/memblock.h>
+#include <linux/kmemleak.h>
#include "of_private.h"
@@ -46,6 +47,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
err = memblock_mark_nomap(base, size);
if (err)
memblock_free(base, size);
+ kmemleak_ignore_phys(base);
}
return err;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 3fd74bb34819..a3483484a5a2 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1291,7 +1291,6 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
DEFINE_SIMPLE_PROP(leds, "leds", NULL)
DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
-DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
@@ -1380,7 +1379,6 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_resets, },
{ .parse_prop = parse_leds, },
{ .parse_prop = parse_backlight, },
- { .parse_prop = parse_phy_handle, },
{ .parse_prop = parse_gpio_compat, },
{ .parse_prop = parse_interrupts, },
{ .parse_prop = parse_regulators, },
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 0c473d75e625..43e615aa12ff 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -110,7 +110,7 @@ config PCI_PF_STUB
config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend"
- depends on X86 && XEN
+ depends on XEN_PV
select PCI_XEN
select XEN_XENBUS_FRONTEND
default y
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index eaec915ffe62..67c46e52c0dc 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -3301,9 +3301,17 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
return 0;
if (!keep_devs) {
- /* Delete any children which might still exist. */
+ struct list_head removed;
+
+ /* Move all present children to the list on stack */
+ INIT_LIST_HEAD(&removed);
spin_lock_irqsave(&hbus->device_list_lock, flags);
- list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) {
+ list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
+ list_move_tail(&hpdev->list_entry, &removed);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ /* Remove all children in the list */
+ list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
list_del(&hpdev->list_entry);
if (hpdev->pci_slot)
pci_destroy_slot(hpdev->pci_slot);
@@ -3311,7 +3319,6 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
put_pcichild(hpdev);
put_pcichild(hpdev);
}
- spin_unlock_irqrestore(&hbus->device_list_lock, flags);
}
ret = hv_send_resources_released(hdev);
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 014868752cd4..dcefdb42ac46 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -62,14 +62,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
- switch (zdev->state) {
- case ZPCI_FN_STATE_STANDBY:
- *value = 0;
- break;
- default:
- *value = 1;
- break;
- }
+ *value = zpci_is_device_configured(zdev) ? 1 : 0;
return 0;
}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0099a00af361..4b4792940e86 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -535,6 +535,7 @@ static int msi_verify_entries(struct pci_dev *dev)
static int msi_capability_init(struct pci_dev *dev, int nvec,
struct irq_affinity *affd)
{
+ const struct attribute_group **groups;
struct msi_desc *entry;
int ret;
@@ -558,12 +559,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
if (ret)
goto err;
- dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
- if (IS_ERR(dev->msi_irq_groups)) {
- ret = PTR_ERR(dev->msi_irq_groups);
+ groups = msi_populate_sysfs(&dev->dev);
+ if (IS_ERR(groups)) {
+ ret = PTR_ERR(groups);
goto err;
}
+ dev->msi_irq_groups = groups;
+
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 1);
@@ -691,6 +694,7 @@ static void msix_mask_all(void __iomem *base, int tsize)
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, struct irq_affinity *affd)
{
+ const struct attribute_group **groups;
void __iomem *base;
int ret, tsize;
u16 control;
@@ -730,12 +734,14 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
msix_update_entries(dev, entries);
- dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
- if (IS_ERR(dev->msi_irq_groups)) {
- ret = PTR_ERR(dev->msi_irq_groups);
+ groups = msi_populate_sysfs(&dev->dev);
+ if (IS_ERR(groups)) {
+ ret = PTR_ERR(groups);
goto out_free;
}
+ dev->msi_irq_groups = groups;
+
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index a1b1e2a01632..260a06fb78a6 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -937,7 +937,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev);
void pci_set_acpi_fwnode(struct pci_dev *dev)
{
- if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+ if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
ACPI_COMPANION_SET(&dev->dev,
acpi_pci_find_companion(&dev->dev));
}
@@ -1249,6 +1249,9 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
bool check_children;
u64 addr;
+ if (!dev->parent)
+ return NULL;
+
down_read(&pci_acpi_companion_lookup_sem);
adev = pci_acpi_find_companion_hook ?
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e5089af8ad90..4537d1ea14fd 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5435,7 +5435,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
/*
- * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * Create device link for GPUs with integrated USB xHCI Host
* controller to VGA.
*/
static void quirk_gpu_usb(struct pci_dev *usb)
@@ -5444,9 +5444,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
/*
- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * Create device link for GPUs with integrated Type-C UCSI controller
* to VGA. Currently there is no class code defined for UCSI device over PCI
* so using UNKNOWN class for now and it will be updated when UCSI
* over PCI gets a class code.
@@ -5459,6 +5461,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_SERIAL_UNKNOWN, 8,
quirk_gpu_usb_typec_ucsi);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN, 8,
+ quirk_gpu_usb_typec_ucsi);
/*
* Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 25557b272a4f..4be24890132e 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -99,6 +99,24 @@ error:
return off ?: PCI_VPD_SZ_INVALID;
}
+static bool pci_vpd_available(struct pci_dev *dev)
+{
+ struct pci_vpd *vpd = &dev->vpd;
+
+ if (!vpd->cap)
+ return false;
+
+ if (vpd->len == 0) {
+ vpd->len = pci_vpd_size(dev);
+ if (vpd->len == PCI_VPD_SZ_INVALID) {
+ vpd->cap = 0;
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
* Wait for last operation to complete.
* This code has to spin since there is no other notification from the PCI
@@ -145,7 +163,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
loff_t end = pos + count;
u8 *buf = arg;
- if (!vpd->cap)
+ if (!pci_vpd_available(dev))
return -ENODEV;
if (pos < 0)
@@ -206,7 +224,7 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
loff_t end = pos + count;
int ret = 0;
- if (!vpd->cap)
+ if (!pci_vpd_available(dev))
return -ENODEV;
if (pos < 0 || (pos & 3) || (count & 3))
@@ -242,14 +260,11 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
void pci_vpd_init(struct pci_dev *dev)
{
+ if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+ return;
+
dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
mutex_init(&dev->vpd.lock);
-
- if (!dev->vpd.len)
- dev->vpd.len = pci_vpd_size(dev);
-
- if (dev->vpd.len == PCI_VPD_SZ_INVALID)
- dev->vpd.cap = 0;
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
@@ -294,13 +309,14 @@ const struct attribute_group pci_dev_vpd_attr_group = {
void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
{
- unsigned int len = dev->vpd.len;
+ unsigned int len;
void *buf;
int cnt;
- if (!dev->vpd.cap)
+ if (!pci_vpd_available(dev))
return ERR_PTR(-ENODEV);
+ len = dev->vpd.len;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
index d2d0ed4b27c8..f650e19a315c 100644
--- a/drivers/pcmcia/pcmcia_cis.c
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/cistpl.h>
@@ -398,7 +399,6 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
void *priv)
{
struct net_device *dev = priv;
- int i;
if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
return -EINVAL;
@@ -412,8 +412,7 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
return -EINVAL;
}
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
+ eth_hw_addr_set(dev, &tuple->TupleData[2]);
return 0;
}
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 3cbc3baf087f..295cc7952d0e 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -952,6 +952,8 @@ int armpmu_register(struct arm_pmu *pmu)
pmu->name, pmu->num_events,
has_nmi ? ", using NMIs" : "");
+ kvm_host_pmu_init(pmu);
+
return 0;
out_destroy:
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index a4ac87c8b4f8..5082102d7d0d 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2306,7 +2306,7 @@ EXPORT_SYMBOL_GPL(devm_pinctrl_register_and_init);
/**
* devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().
- * @dev: device for which which resource was allocated
+ * @dev: device for which resource was allocated
* @pctldev: the pinctrl device to unregister.
*/
void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index c001f2ed20f8..8d0f88e9ca88 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -445,6 +445,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
+ int err;
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
@@ -457,6 +458,15 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ if (on)
+ err = enable_irq_wake(gpio_dev->irq);
+ else
+ err = disable_irq_wake(gpio_dev->irq);
+
+ if (err)
+ dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n",
+ on ? "enable" : "disable");
+
return 0;
}
@@ -902,7 +912,6 @@ static struct pinctrl_desc amd_pinctrl_desc = {
static int amd_gpio_probe(struct platform_device *pdev)
{
int ret = 0;
- int irq_base;
struct resource *res;
struct amd_gpio *gpio_dev;
struct gpio_irq_chip *girq;
@@ -925,9 +934,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
if (!gpio_dev->base)
return -ENOMEM;
- irq_base = platform_get_irq(pdev, 0);
- if (irq_base < 0)
- return irq_base;
+ gpio_dev->irq = platform_get_irq(pdev, 0);
+ if (gpio_dev->irq < 0)
+ return gpio_dev->irq;
#ifdef CONFIG_PM_SLEEP
gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,
@@ -987,7 +996,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
goto out2;
}
- ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler,
+ ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 95e763424042..1d4317073654 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -98,6 +98,7 @@ struct amd_gpio {
struct resource *res;
struct platform_device *pdev;
u32 *saved_regs;
+ int irq;
};
/* KERNCZ configuration*/
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index ae33e376695f..5ce260f152ce 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -2092,6 +2092,23 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
return false;
}
+static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank,
+ unsigned int pin, u32 arg)
+{
+ struct rockchip_pin_output_deferred *cfg;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ cfg->pin = pin;
+ cfg->arg = arg;
+
+ list_add_tail(&cfg->head, &bank->deferred_output);
+
+ return 0;
+}
+
/* set the pin config settings for a specified pin */
static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned num_configs)
@@ -2136,6 +2153,22 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (rc != RK_FUNC_GPIO)
return -EINVAL;
+ /*
+ * Check for gpio driver not being probed yet.
+ * The lock makes sure that either gpio-probe has completed
+ * or the gpio driver hasn't probed yet.
+ */
+ mutex_lock(&bank->deferred_lock);
+ if (!gpio || !gpio->direction_output) {
+ rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg);
+ mutex_unlock(&bank->deferred_lock);
+ if (rc)
+ return rc;
+
+ break;
+ }
+ mutex_unlock(&bank->deferred_lock);
+
rc = gpio->direction_output(gpio, pin - bank->pin_base,
arg);
if (rc)
@@ -2204,6 +2237,11 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
if (rc != RK_FUNC_GPIO)
return -EINVAL;
+ if (!gpio || !gpio->get) {
+ arg = 0;
+ break;
+ }
+
rc = gpio->get(gpio, pin - bank->pin_base);
if (rc < 0)
return rc;
@@ -2450,6 +2488,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
pin_bank->name, pin);
pdesc++;
}
+
+ INIT_LIST_HEAD(&pin_bank->deferred_output);
+ mutex_init(&pin_bank->deferred_lock);
}
ret = rockchip_pinctrl_parse_dt(pdev, info);
@@ -2716,6 +2757,31 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static int rockchip_pinctrl_remove(struct platform_device *pdev)
+{
+ struct rockchip_pinctrl *info = platform_get_drvdata(pdev);
+ struct rockchip_pin_bank *bank;
+ struct rockchip_pin_output_deferred *cfg;
+ int i;
+
+ of_platform_depopulate(&pdev->dev);
+
+ for (i = 0; i < info->ctrl->nr_banks; i++) {
+ bank = &info->ctrl->pin_banks[i];
+
+ mutex_lock(&bank->deferred_lock);
+ while (!list_empty(&bank->deferred_output)) {
+ cfg = list_first_entry(&bank->deferred_output,
+ struct rockchip_pin_output_deferred, head);
+ list_del(&cfg->head);
+ kfree(cfg);
+ }
+ mutex_unlock(&bank->deferred_lock);
+ }
+
+ return 0;
+}
+
static struct rockchip_pin_bank px30_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
IOMUX_SOURCE_PMU,
@@ -3175,6 +3241,7 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
static struct platform_driver rockchip_pinctrl_driver = {
.probe = rockchip_pinctrl_probe,
+ .remove = rockchip_pinctrl_remove,
.driver = {
.name = "rockchip-pinctrl",
.pm = &rockchip_pinctrl_dev_pm_ops,
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 589d4d2a98c9..91f10279d084 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -141,6 +141,8 @@ struct rockchip_drv {
* @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
* @recalced_mask: bit mask to indicate a need to recalulate the mask
* @route_mask: bits describing the routing pins of per bank
+ * @deferred_output: gpio output settings to be done after gpio bank probed
+ * @deferred_lock: mutex for the deferred_output shared btw gpio and pinctrl
*/
struct rockchip_pin_bank {
struct device *dev;
@@ -169,6 +171,8 @@ struct rockchip_pin_bank {
u32 toggle_edge_mode;
u32 recalced_mask;
u32 route_mask;
+ struct list_head deferred_output;
+ struct mutex deferred_lock;
};
/**
@@ -243,6 +247,12 @@ struct rockchip_pin_config {
unsigned int nconfigs;
};
+struct rockchip_pin_output_deferred {
+ struct list_head head;
+ unsigned int pin;
+ u32 arg;
+};
+
/**
* struct rockchip_pin_group: represent group of pins of a pinmux function.
* @name: name of the pin group, used to lookup the group.
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 32ea2a8ec02b..5ff4207df66e 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -3,7 +3,8 @@ if (ARCH_QCOM || COMPILE_TEST)
config PINCTRL_MSM
tristate "Qualcomm core pin controller driver"
- depends on GPIOLIB && (QCOM_SCM || !QCOM_SCM) #if QCOM_SCM=m this can't be =y
+ depends on GPIOLIB
+ select QCOM_SCM
select PINMUX
select PINCONF
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index afddf6d60dbe..9017ede409c9 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1496,6 +1496,7 @@ static const struct of_device_id sc7280_pinctrl_of_match[] = {
static struct platform_driver sc7280_pinctrl_driver = {
.driver = {
.name = "sc7280-pinctrl",
+ .pm = &msm_pinctrl_dev_pm_ops,
.of_match_table = sc7280_pinctrl_of_match,
},
.probe = sc7280_pinctrl_probe,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 98bf0e2a2a8d..b2562e893139 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved.
*/
#include <linux/gpio/driver.h>
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/spmi.h>
#include <linux/types.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
@@ -171,6 +172,8 @@ struct pmic_gpio_state {
struct pinctrl_dev *ctrl;
struct gpio_chip chip;
struct irq_chip irq;
+ u8 usid;
+ u8 pid_base;
};
static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -949,12 +952,36 @@ static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
unsigned int *parent_hwirq,
unsigned int *parent_type)
{
- *parent_hwirq = child_hwirq + 0xc0;
+ struct pmic_gpio_state *state = gpiochip_get_data(chip);
+
+ *parent_hwirq = child_hwirq + state->pid_base;
*parent_type = child_type;
return 0;
}
+static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct pmic_gpio_state *state = gpiochip_get_data(chip);
+ struct irq_fwspec *fwspec;
+
+ fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
+
+ fwspec->param_count = 4;
+ fwspec->param[0] = state->usid;
+ fwspec->param[1] = parent_hwirq;
+ /* param[2] must be left as 0 */
+ fwspec->param[3] = parent_type;
+
+ return fwspec;
+}
+
static int pmic_gpio_probe(struct platform_device *pdev)
{
struct irq_domain *parent_domain;
@@ -965,6 +992,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
struct pmic_gpio_pad *pad, *pads;
struct pmic_gpio_state *state;
struct gpio_irq_chip *girq;
+ const struct spmi_device *parent_spmi_dev;
int ret, npins, i;
u32 reg;
@@ -984,6 +1012,9 @@ static int pmic_gpio_probe(struct platform_device *pdev)
state->dev = &pdev->dev;
state->map = dev_get_regmap(dev->parent, NULL);
+ parent_spmi_dev = to_spmi_device(dev->parent);
+ state->usid = parent_spmi_dev->usid;
+ state->pid_base = reg >> 8;
pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
if (!pindesc)
@@ -1059,7 +1090,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
- girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
+ girq->populate_parent_alloc_arg = pmic_gpio_populate_parent_fwspec;
girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index 7646708d57e4..a916cd89cbbe 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -98,7 +98,7 @@ mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
if (ret)
goto access_error;
- *regval |= rol32(val, regsize * i);
+ *regval |= rol32(val, regsize * i * 8);
}
}
@@ -141,7 +141,7 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
/* Convert buffer to input value. */
- ret = kstrtou32(buf, len, &input_val);
+ ret = kstrtou32(buf, 0, &input_val);
if (ret)
return ret;
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 3481479a2942..fc95620101e8 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -71,7 +71,7 @@
#define AMD_CPU_ID_YC 0x14B5
#define PMC_MSG_DELAY_MIN_US 100
-#define RESPONSE_REGISTER_LOOP_MAX 200
+#define RESPONSE_REGISTER_LOOP_MAX 20000
#define SOC_SUBSYSTEM_IP_MAX 12
#define DELAY_MIN_US 2000
@@ -476,6 +476,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0006", 0},
{"AMDI0007", 0},
{"AMD0004", 0},
+ {"AMD0005", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 821aba31821c..2fffa57e596e 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -166,8 +166,8 @@ config DELL_WMI
config DELL_WMI_PRIVACY
bool "Dell WMI Hardware Privacy Support"
+ depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
depends on DELL_WMI
- depends on LEDS_TRIGGER_AUDIO
help
This option adds integration with the "Dell Hardware Privacy"
feature of Dell laptops to the dell-wmi driver.
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 7f3a03f937f6..658bab4b7964 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -141,9 +141,11 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index a33a5826e81a..08598942a6d7 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -118,12 +118,30 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
{ }
};
+/*
+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
+ * reports. Accept such reports only from devices in this list.
+ */
+static const struct dmi_system_id dmi_auto_add_switch[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ {} /* Array terminator */
+};
+
struct intel_hid_priv {
struct input_dev *input_dev;
struct input_dev *array;
struct input_dev *switches;
bool wakeup_mode;
- bool dual_accel;
+ bool auto_add_switch;
};
#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -452,10 +470,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
* Some convertible have unreliable VGBS return which could cause incorrect
* SW_TABLET_MODE report, in these cases we enable support when receiving
* the first event instead of during driver setup.
- *
- * See dual_accel_detect.h for more info on the dual_accel check.
*/
- if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
+ if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {
dev_info(&device->dev, "switch event received, enable switches supports\n");
err = intel_hid_switches_setup(device);
if (err)
@@ -596,7 +612,8 @@ static int intel_hid_probe(struct platform_device *device)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
- priv->dual_accel = dual_accel_detect();
+ /* See dual_accel_detect.h for more info on the dual_accel check. */
+ priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();
err = intel_hid_input_setup(device);
if (err) {
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.c b/drivers/platform/x86/intel/int1092/intel_sar.c
index 379560fe5df9..e03943e6380a 100644
--- a/drivers/platform/x86/intel/int1092/intel_sar.c
+++ b/drivers/platform/x86/intel/int1092/intel_sar.c
@@ -42,12 +42,20 @@ static void update_sar_data(struct wwan_sar_context *context)
if (config->device_mode_info &&
context->sar_data.device_mode < config->total_dev_mode) {
- struct wwan_device_mode_info *dev_mode =
- &config->device_mode_info[context->sar_data.device_mode];
-
- context->sar_data.antennatable_index = dev_mode->antennatable_index;
- context->sar_data.bandtable_index = dev_mode->bandtable_index;
- context->sar_data.sartable_index = dev_mode->sartable_index;
+ int itr = 0;
+
+ for (itr = 0; itr < config->total_dev_mode; itr++) {
+ if (context->sar_data.device_mode ==
+ config->device_mode_info[itr].device_mode) {
+ struct wwan_device_mode_info *dev_mode =
+ &config->device_mode_info[itr];
+
+ context->sar_data.antennatable_index = dev_mode->antennatable_index;
+ context->sar_data.bandtable_index = dev_mode->bandtable_index;
+ context->sar_data.sartable_index = dev_mode->sartable_index;
+ break;
+ }
+ }
}
}
@@ -305,7 +313,6 @@ static struct platform_driver sar_driver = {
.remove = sar_remove,
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(sar_device_ids)
}
};
@@ -313,4 +320,4 @@ module_platform_driver(sar_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Platform device driver for INTEL MODEM BIOS SAR");
-MODULE_AUTHOR("Shravan S <[email protected]>");
+MODULE_AUTHOR("Shravan Sudhakar <[email protected]>");
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c b/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
index 9fe0a2527e1c..e59d79c7e82f 100644
--- a/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
+++ b/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
@@ -401,7 +401,7 @@ int skl_int3472_discrete_remove(struct platform_device *pdev)
gpiod_remove_lookup_table(&int3472->gpios);
- if (int3472->clock.ena_gpio)
+ if (int3472->clock.cl)
skl_int3472_unregister_clock(int3472);
gpiod_put(int3472->clock.ena_gpio);
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index f58b8543f6ac..66bb39fd0ef9 100644
--- a/drivers/platform/x86/intel/punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -8,7 +8,6 @@
* which provide mailbox interface for power management usage.
*/
-#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -319,7 +318,7 @@ static struct platform_driver intel_punit_ipc_driver = {
.remove = intel_punit_ipc_remove,
.driver = {
.name = "intel_punit_ipc",
- .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
+ .acpi_match_table = punit_ipc_acpi_ids,
},
};
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index bfa0cc20750d..7cc9089d1e14 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -75,7 +75,7 @@ struct intel_scu_ipc_dev {
#define IPC_READ_BUFFER 0x90
/* Timeout in jiffies */
-#define IPC_TIMEOUT (5 * HZ)
+#define IPC_TIMEOUT (10 * HZ)
static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
@@ -232,7 +232,7 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
- unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
+ unsigned long end = jiffies + IPC_TIMEOUT;
do {
u32 status;
@@ -247,7 +247,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
return -ETIMEDOUT;
}
-/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
+/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 3e520d5bca07..88b551caeaaf 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -655,7 +655,7 @@ static int acpi_add(struct acpi_device *device)
goto out_platform_registered;
}
product = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (strlen(product) > 4)
+ if (product && strlen(product) > 4)
switch (product[4]) {
case '5':
case '6':
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 0e1451b1d9c6..033f797861d8 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -100,10 +100,10 @@ static const struct ts_dmi_data chuwi_hi10_air_data = {
};
static const struct property_entry chuwi_hi10_plus_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
- PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1914),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1283),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
@@ -111,6 +111,15 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
};
static const struct ts_dmi_data chuwi_hi10_plus_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hi10plus.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 34056,
+ .sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e,
+ 0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38,
+ 0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5,
+ 0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 },
+ },
.acpi_name = "MSSL0017:00",
.properties = chuwi_hi10_plus_props,
};
@@ -141,6 +150,33 @@ static const struct ts_dmi_data chuwi_hi10_pro_data = {
.properties = chuwi_hi10_pro_props,
};
+static const struct property_entry chuwi_hibook_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 30),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1892),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1276),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hibook_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hibook.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 40392,
+ .sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d,
+ 0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a,
+ 0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8,
+ 0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 },
+ },
+ .acpi_name = "MSSL0017:00",
+ .properties = chuwi_hibook_props,
+};
+
static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -980,6 +1016,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi HiBook (CWI514) */
+ .driver_data = (void *)&chuwi_hibook_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index f02bedf41264..458218f88c5e 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -174,6 +174,7 @@ config PTP_1588_CLOCK_OCP
depends on I2C && MTD
depends on SERIAL_8250
depends on !S390
+ depends on COMMON_CLK
select NET_DEVLINK
help
This driver adds support for an OpenCompute time card.
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
deleted file mode 100644
index 1c5210187110..000000000000
--- a/drivers/ptp/idt8a340_reg.h
+++ /dev/null
@@ -1,783 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* idt8a340_reg.h
- *
- * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019
- * https://github.com/richardcochran/regen
- *
- * Hand modified to include some HW registers.
- * Based on 5.2.0, Family Programming Guide (Sept 30, 2020)
- */
-#ifndef HAVE_IDT8A340_REG
-#define HAVE_IDT8A340_REG
-
-#define PAGE_ADDR_BASE 0x0000
-#define PAGE_ADDR 0x00fc
-
-#define HW_REVISION 0x8180
-#define REV_ID 0x007a
-
-#define HW_DPLL_0 (0x8a00)
-#define HW_DPLL_1 (0x8b00)
-#define HW_DPLL_2 (0x8c00)
-#define HW_DPLL_3 (0x8d00)
-#define HW_DPLL_4 (0x8e00)
-#define HW_DPLL_5 (0x8f00)
-#define HW_DPLL_6 (0x9000)
-#define HW_DPLL_7 (0x9100)
-
-#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
-#define HW_DPLL_TOD_CTRL_1 (0x089)
-#define HW_DPLL_TOD_CTRL_2 (0x08A)
-#define HW_DPLL_TOD_OVR__0 (0x098)
-#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
-
-#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
-#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
-#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
-#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
-#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
-#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
-#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
-#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
-#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
-#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
-#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
-#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
-#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
-#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
-#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
-#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
-
-#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
-#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
-#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
-#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
-
-#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
-#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
-#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
-#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
-#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
-#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
-#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
-
-#define HW_Q8_CTRL_SPARE (0xa7d4)
-#define HW_Q11_CTRL_SPARE (0xa7ec)
-
-/**
- * Select FOD5 as sync_trigger for Q8 divider.
- * Transition from logic zero to one
- * sets trigger to sync Q8 divider.
- *
- * Unused when FOD4 is driving Q8 divider (normal operation).
- */
-#define Q9_TO_Q8_SYNC_TRIG BIT(1)
-
-/**
- * Enable FOD5 as driver for clock and sync for Q8 divider.
- * Enable fanout buffer for FOD5.
- *
- * Unused when FOD4 is driving Q8 divider (normal operation).
- */
-#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
-
-/**
- * Select FOD6 as sync_trigger for Q11 divider.
- * Transition from logic zero to one
- * sets trigger to sync Q11 divider.
- *
- * Unused when FOD7 is driving Q11 divider (normal operation).
- */
-#define Q10_TO_Q11_SYNC_TRIG BIT(1)
-
-/**
- * Enable FOD6 as driver for clock and sync for Q11 divider.
- * Enable fanout buffer for FOD6.
- *
- * Unused when FOD7 is driving Q11 divider (normal operation).
- */
-#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
-
-#define RESET_CTRL 0xc000
-#define SM_RESET 0x0012
-#define SM_RESET_V520 0x0013
-#define SM_RESET_CMD 0x5A
-
-#define GENERAL_STATUS 0xc014
-#define BOOT_STATUS 0x0000
-#define HW_REV_ID 0x000A
-#define BOND_ID 0x000B
-#define HW_CSR_ID 0x000C
-#define HW_IRQ_ID 0x000E
-
-#define MAJ_REL 0x0010
-#define MIN_REL 0x0011
-#define HOTFIX_REL 0x0012
-
-#define PIPELINE_ID 0x0014
-#define BUILD_ID 0x0018
-
-#define JTAG_DEVICE_ID 0x001c
-#define PRODUCT_ID 0x001e
-
-#define OTP_SCSR_CONFIG_SELECT 0x0022
-
-#define STATUS 0xc03c
-#define DPLL_SYS_STATUS 0x0020
-#define DPLL_SYS_APLL_STATUS 0x0021
-#define USER_GPIO0_TO_7_STATUS 0x008a
-#define USER_GPIO8_TO_15_STATUS 0x008b
-
-#define GPIO_USER_CONTROL 0xc160
-#define GPIO0_TO_7_OUT 0x0000
-#define GPIO8_TO_15_OUT 0x0001
-#define GPIO0_TO_7_OUT_V520 0x0002
-#define GPIO8_TO_15_OUT_V520 0x0003
-
-#define STICKY_STATUS_CLEAR 0xc164
-
-#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
-
-#define ALERT_CFG 0xc188
-
-#define SYS_DPLL_XO 0xc194
-
-#define SYS_APLL 0xc19c
-
-#define INPUT_0 0xc1b0
-
-#define INPUT_1 0xc1c0
-
-#define INPUT_2 0xc1d0
-
-#define INPUT_3 0xc200
-
-#define INPUT_4 0xc210
-
-#define INPUT_5 0xc220
-
-#define INPUT_6 0xc230
-
-#define INPUT_7 0xc240
-
-#define INPUT_8 0xc250
-
-#define INPUT_9 0xc260
-
-#define INPUT_10 0xc280
-
-#define INPUT_11 0xc290
-
-#define INPUT_12 0xc2a0
-
-#define INPUT_13 0xc2b0
-
-#define INPUT_14 0xc2c0
-
-#define INPUT_15 0xc2d0
-
-#define REF_MON_0 0xc2e0
-
-#define REF_MON_1 0xc2ec
-
-#define REF_MON_2 0xc300
-
-#define REF_MON_3 0xc30c
-
-#define REF_MON_4 0xc318
-
-#define REF_MON_5 0xc324
-
-#define REF_MON_6 0xc330
-
-#define REF_MON_7 0xc33c
-
-#define REF_MON_8 0xc348
-
-#define REF_MON_9 0xc354
-
-#define REF_MON_10 0xc360
-
-#define REF_MON_11 0xc36c
-
-#define REF_MON_12 0xc380
-
-#define REF_MON_13 0xc38c
-
-#define REF_MON_14 0xc398
-
-#define REF_MON_15 0xc3a4
-
-#define DPLL_0 0xc3b0
-#define DPLL_CTRL_REG_0 0x0002
-#define DPLL_CTRL_REG_1 0x0003
-#define DPLL_CTRL_REG_2 0x0004
-#define DPLL_TOD_SYNC_CFG 0x0031
-#define DPLL_COMBO_SLAVE_CFG_0 0x0032
-#define DPLL_COMBO_SLAVE_CFG_1 0x0033
-#define DPLL_SLAVE_REF_CFG 0x0034
-#define DPLL_REF_MODE 0x0035
-#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
-#define DPLL_MODE 0x0037
-#define DPLL_MODE_V520 0x003B
-
-#define DPLL_1 0xc400
-
-#define DPLL_2 0xc438
-#define DPLL_2_V520 0xc43c
-
-#define DPLL_3 0xc480
-
-#define DPLL_4 0xc4b8
-#define DPLL_4_V520 0xc4bc
-
-#define DPLL_5 0xc500
-
-#define DPLL_6 0xc538
-#define DPLL_6_V520 0xc53c
-
-#define DPLL_7 0xc580
-
-#define SYS_DPLL 0xc5b8
-#define SYS_DPLL_V520 0xc5bc
-
-#define DPLL_CTRL_0 0xc600
-#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
-#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
-
-#define DPLL_CTRL_1 0xc63c
-
-#define DPLL_CTRL_2 0xc680
-
-#define DPLL_CTRL_3 0xc6bc
-
-#define DPLL_CTRL_4 0xc700
-
-#define DPLL_CTRL_5 0xc73c
-
-#define DPLL_CTRL_6 0xc780
-
-#define DPLL_CTRL_7 0xc7bc
-
-#define SYS_DPLL_CTRL 0xc800
-
-#define DPLL_PHASE_0 0xc818
-
-/* Signed 42-bit FFO in units of 2^(-53) */
-#define DPLL_WR_PHASE 0x0000
-
-#define DPLL_PHASE_1 0xc81c
-
-#define DPLL_PHASE_2 0xc820
-
-#define DPLL_PHASE_3 0xc824
-
-#define DPLL_PHASE_4 0xc828
-
-#define DPLL_PHASE_5 0xc82c
-
-#define DPLL_PHASE_6 0xc830
-
-#define DPLL_PHASE_7 0xc834
-
-#define DPLL_FREQ_0 0xc838
-
-/* Signed 42-bit FFO in units of 2^(-53) */
-#define DPLL_WR_FREQ 0x0000
-
-#define DPLL_FREQ_1 0xc840
-
-#define DPLL_FREQ_2 0xc848
-
-#define DPLL_FREQ_3 0xc850
-
-#define DPLL_FREQ_4 0xc858
-
-#define DPLL_FREQ_5 0xc860
-
-#define DPLL_FREQ_6 0xc868
-
-#define DPLL_FREQ_7 0xc870
-
-#define DPLL_PHASE_PULL_IN_0 0xc880
-#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
-#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
-#define PULL_IN_CTRL 0x0007
-
-#define DPLL_PHASE_PULL_IN_1 0xc888
-
-#define DPLL_PHASE_PULL_IN_2 0xc890
-
-#define DPLL_PHASE_PULL_IN_3 0xc898
-
-#define DPLL_PHASE_PULL_IN_4 0xc8a0
-
-#define DPLL_PHASE_PULL_IN_5 0xc8a8
-
-#define DPLL_PHASE_PULL_IN_6 0xc8b0
-
-#define DPLL_PHASE_PULL_IN_7 0xc8b8
-
-#define GPIO_CFG 0xc8c0
-#define GPIO_CFG_GBL 0x0000
-
-#define GPIO_0 0xc8c2
-#define GPIO_DCO_INC_DEC 0x0000
-#define GPIO_OUT_CTRL_0 0x0001
-#define GPIO_OUT_CTRL_1 0x0002
-#define GPIO_TOD_TRIG 0x0003
-#define GPIO_DPLL_INDICATOR 0x0004
-#define GPIO_LOS_INDICATOR 0x0005
-#define GPIO_REF_INPUT_DSQ_0 0x0006
-#define GPIO_REF_INPUT_DSQ_1 0x0007
-#define GPIO_REF_INPUT_DSQ_2 0x0008
-#define GPIO_REF_INPUT_DSQ_3 0x0009
-#define GPIO_MAN_CLK_SEL_0 0x000a
-#define GPIO_MAN_CLK_SEL_1 0x000b
-#define GPIO_MAN_CLK_SEL_2 0x000c
-#define GPIO_SLAVE 0x000d
-#define GPIO_ALERT_OUT_CFG 0x000e
-#define GPIO_TOD_NOTIFICATION_CFG 0x000f
-#define GPIO_CTRL 0x0010
-#define GPIO_CTRL_V520 0x0011
-
-#define GPIO_1 0xc8d4
-
-#define GPIO_2 0xc8e6
-
-#define GPIO_3 0xc900
-
-#define GPIO_4 0xc912
-
-#define GPIO_5 0xc924
-
-#define GPIO_6 0xc936
-
-#define GPIO_7 0xc948
-
-#define GPIO_8 0xc95a
-
-#define GPIO_9 0xc980
-
-#define GPIO_10 0xc992
-
-#define GPIO_11 0xc9a4
-
-#define GPIO_12 0xc9b6
-
-#define GPIO_13 0xc9c8
-
-#define GPIO_14 0xc9da
-
-#define GPIO_15 0xca00
-
-#define OUT_DIV_MUX 0xca12
-
-#define OUTPUT_0 0xca14
-#define OUTPUT_0_V520 0xca20
-/* FOD frequency output divider value */
-#define OUT_DIV 0x0000
-#define OUT_DUTY_CYCLE_HIGH 0x0004
-#define OUT_CTRL_0 0x0008
-#define OUT_CTRL_1 0x0009
-/* Phase adjustment in FOD cycles */
-#define OUT_PHASE_ADJ 0x000c
-
-#define OUTPUT_1 0xca24
-#define OUTPUT_1_V520 0xca30
-
-#define OUTPUT_2 0xca34
-#define OUTPUT_2_V520 0xca40
-
-#define OUTPUT_3 0xca44
-#define OUTPUT_3_V520 0xca50
-
-#define OUTPUT_4 0xca54
-#define OUTPUT_4_V520 0xca60
-
-#define OUTPUT_5 0xca64
-#define OUTPUT_5_V520 0xca80
-
-#define OUTPUT_6 0xca80
-#define OUTPUT_6_V520 0xca90
-
-#define OUTPUT_7 0xca90
-#define OUTPUT_7_V520 0xcaa0
-
-#define OUTPUT_8 0xcaa0
-#define OUTPUT_8_V520 0xcab0
-
-#define OUTPUT_9 0xcab0
-#define OUTPUT_9_V520 0xcac0
-
-#define OUTPUT_10 0xcac0
-#define OUTPUT_10_V520 0xcad0
-
-#define OUTPUT_11 0xcad0
-#define OUTPUT_11_V520 0xcae0
-
-#define SERIAL 0xcae0
-#define SERIAL_V520 0xcaf0
-
-#define PWM_ENCODER_0 0xcb00
-
-#define PWM_ENCODER_1 0xcb08
-
-#define PWM_ENCODER_2 0xcb10
-
-#define PWM_ENCODER_3 0xcb18
-
-#define PWM_ENCODER_4 0xcb20
-
-#define PWM_ENCODER_5 0xcb28
-
-#define PWM_ENCODER_6 0xcb30
-
-#define PWM_ENCODER_7 0xcb38
-
-#define PWM_DECODER_0 0xcb40
-
-#define PWM_DECODER_1 0xcb48
-#define PWM_DECODER_1_V520 0xcb4a
-
-#define PWM_DECODER_2 0xcb50
-#define PWM_DECODER_2_V520 0xcb54
-
-#define PWM_DECODER_3 0xcb58
-#define PWM_DECODER_3_V520 0xcb5e
-
-#define PWM_DECODER_4 0xcb60
-#define PWM_DECODER_4_V520 0xcb68
-
-#define PWM_DECODER_5 0xcb68
-#define PWM_DECODER_5_V520 0xcb80
-
-#define PWM_DECODER_6 0xcb70
-#define PWM_DECODER_6_V520 0xcb8a
-
-#define PWM_DECODER_7 0xcb80
-#define PWM_DECODER_7_V520 0xcb94
-
-#define PWM_DECODER_8 0xcb88
-#define PWM_DECODER_8_V520 0xcb9e
-
-#define PWM_DECODER_9 0xcb90
-#define PWM_DECODER_9_V520 0xcba8
-
-#define PWM_DECODER_10 0xcb98
-#define PWM_DECODER_10_V520 0xcbb2
-
-#define PWM_DECODER_11 0xcba0
-#define PWM_DECODER_11_V520 0xcbbc
-
-#define PWM_DECODER_12 0xcba8
-#define PWM_DECODER_12_V520 0xcbc6
-
-#define PWM_DECODER_13 0xcbb0
-#define PWM_DECODER_13_V520 0xcbd0
-
-#define PWM_DECODER_14 0xcbb8
-#define PWM_DECODER_14_V520 0xcbda
-
-#define PWM_DECODER_15 0xcbc0
-#define PWM_DECODER_15_V520 0xcbe4
-
-#define PWM_USER_DATA 0xcbc8
-#define PWM_USER_DATA_V520 0xcbf0
-
-#define TOD_0 0xcbcc
-#define TOD_0_V520 0xcc00
-
-/* Enable TOD counter, output channel sync and even-PPS mode */
-#define TOD_CFG 0x0000
-#define TOD_CFG_V520 0x0001
-
-#define TOD_1 0xcbce
-#define TOD_1_V520 0xcc02
-
-#define TOD_2 0xcbd0
-#define TOD_2_V520 0xcc04
-
-#define TOD_3 0xcbd2
-#define TOD_3_V520 0xcc06
-
-
-#define TOD_WRITE_0 0xcc00
-#define TOD_WRITE_0_V520 0xcc10
-/* 8-bit subns, 32-bit ns, 48-bit seconds */
-#define TOD_WRITE 0x0000
-/* Counter increments after TOD write is completed */
-#define TOD_WRITE_COUNTER 0x000c
-/* TOD write trigger configuration */
-#define TOD_WRITE_SELECT_CFG_0 0x000d
-/* TOD write trigger selection */
-#define TOD_WRITE_CMD 0x000f
-
-#define TOD_WRITE_1 0xcc10
-#define TOD_WRITE_1_V520 0xcc20
-
-#define TOD_WRITE_2 0xcc20
-#define TOD_WRITE_2_V520 0xcc30
-
-#define TOD_WRITE_3 0xcc30
-#define TOD_WRITE_3_V520 0xcc40
-
-#define TOD_READ_PRIMARY_0 0xcc40
-#define TOD_READ_PRIMARY_0_V520 0xcc50
-/* 8-bit subns, 32-bit ns, 48-bit seconds */
-#define TOD_READ_PRIMARY 0x0000
-/* Counter increments after TOD write is completed */
-#define TOD_READ_PRIMARY_COUNTER 0x000b
-/* Read trigger configuration */
-#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
-/* Read trigger selection */
-#define TOD_READ_PRIMARY_CMD 0x000e
-#define TOD_READ_PRIMARY_CMD_V520 0x000f
-
-#define TOD_READ_PRIMARY_1 0xcc50
-#define TOD_READ_PRIMARY_1_V520 0xcc60
-
-#define TOD_READ_PRIMARY_2 0xcc60
-#define TOD_READ_PRIMARY_2_V520 0xcc80
-
-#define TOD_READ_PRIMARY_3 0xcc80
-#define TOD_READ_PRIMARY_3_V520 0xcc90
-
-#define TOD_READ_SECONDARY_0 0xcc90
-#define TOD_READ_SECONDARY_0_V520 0xcca0
-
-#define TOD_READ_SECONDARY_1 0xcca0
-#define TOD_READ_SECONDARY_1_V520 0xccb0
-
-#define TOD_READ_SECONDARY_2 0xccb0
-#define TOD_READ_SECONDARY_2_V520 0xccc0
-
-#define TOD_READ_SECONDARY_3 0xccc0
-#define TOD_READ_SECONDARY_3_V520 0xccd0
-
-#define OUTPUT_TDC_CFG 0xccd0
-#define OUTPUT_TDC_CFG_V520 0xcce0
-
-#define OUTPUT_TDC_0 0xcd00
-
-#define OUTPUT_TDC_1 0xcd08
-
-#define OUTPUT_TDC_2 0xcd10
-
-#define OUTPUT_TDC_3 0xcd18
-
-#define INPUT_TDC 0xcd20
-
-#define SCRATCH 0xcf50
-#define SCRATCH_V520 0xcf4c
-
-#define EEPROM 0xcf68
-#define EEPROM_V520 0xcf64
-
-#define OTP 0xcf70
-
-#define BYTE 0xcf80
-
-/* Bit definitions for the MAJ_REL register */
-#define MAJOR_SHIFT (1)
-#define MAJOR_MASK (0x7f)
-#define PR_BUILD BIT(0)
-
-/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
-#define GPIO0_LEVEL BIT(0)
-#define GPIO1_LEVEL BIT(1)
-#define GPIO2_LEVEL BIT(2)
-#define GPIO3_LEVEL BIT(3)
-#define GPIO4_LEVEL BIT(4)
-#define GPIO5_LEVEL BIT(5)
-#define GPIO6_LEVEL BIT(6)
-#define GPIO7_LEVEL BIT(7)
-
-/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
-#define GPIO8_LEVEL BIT(0)
-#define GPIO9_LEVEL BIT(1)
-#define GPIO10_LEVEL BIT(2)
-#define GPIO11_LEVEL BIT(3)
-#define GPIO12_LEVEL BIT(4)
-#define GPIO13_LEVEL BIT(5)
-#define GPIO14_LEVEL BIT(6)
-#define GPIO15_LEVEL BIT(7)
-
-/* Bit definitions for the GPIO0_TO_7_OUT register */
-#define GPIO0_DRIVE_LEVEL BIT(0)
-#define GPIO1_DRIVE_LEVEL BIT(1)
-#define GPIO2_DRIVE_LEVEL BIT(2)
-#define GPIO3_DRIVE_LEVEL BIT(3)
-#define GPIO4_DRIVE_LEVEL BIT(4)
-#define GPIO5_DRIVE_LEVEL BIT(5)
-#define GPIO6_DRIVE_LEVEL BIT(6)
-#define GPIO7_DRIVE_LEVEL BIT(7)
-
-/* Bit definitions for the GPIO8_TO_15_OUT register */
-#define GPIO8_DRIVE_LEVEL BIT(0)
-#define GPIO9_DRIVE_LEVEL BIT(1)
-#define GPIO10_DRIVE_LEVEL BIT(2)
-#define GPIO11_DRIVE_LEVEL BIT(3)
-#define GPIO12_DRIVE_LEVEL BIT(4)
-#define GPIO13_DRIVE_LEVEL BIT(5)
-#define GPIO14_DRIVE_LEVEL BIT(6)
-#define GPIO15_DRIVE_LEVEL BIT(7)
-
-/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
-#define TOD_SYNC_SOURCE_SHIFT (1)
-#define TOD_SYNC_SOURCE_MASK (0x3)
-#define TOD_SYNC_EN BIT(0)
-
-/* Bit definitions for the DPLL_MODE register */
-#define WRITE_TIMER_MODE BIT(6)
-#define PLL_MODE_SHIFT (3)
-#define PLL_MODE_MASK (0x7)
-#define STATE_MODE_SHIFT (0)
-#define STATE_MODE_MASK (0x7)
-
-/* Bit definitions for the DPLL_MANU_REF_CFG register */
-#define MANUAL_REFERENCE_SHIFT (0)
-#define MANUAL_REFERENCE_MASK (0x1f)
-
-/* Bit definitions for the GPIO_CFG_GBL register */
-#define SUPPLY_MODE_SHIFT (0)
-#define SUPPLY_MODE_MASK (0x3)
-
-/* Bit definitions for the GPIO_DCO_INC_DEC register */
-#define INCDEC_DPLL_INDEX_SHIFT (0)
-#define INCDEC_DPLL_INDEX_MASK (0x7)
-
-/* Bit definitions for the GPIO_OUT_CTRL_0 register */
-#define CTRL_OUT_0 BIT(0)
-#define CTRL_OUT_1 BIT(1)
-#define CTRL_OUT_2 BIT(2)
-#define CTRL_OUT_3 BIT(3)
-#define CTRL_OUT_4 BIT(4)
-#define CTRL_OUT_5 BIT(5)
-#define CTRL_OUT_6 BIT(6)
-#define CTRL_OUT_7 BIT(7)
-
-/* Bit definitions for the GPIO_OUT_CTRL_1 register */
-#define CTRL_OUT_8 BIT(0)
-#define CTRL_OUT_9 BIT(1)
-#define CTRL_OUT_10 BIT(2)
-#define CTRL_OUT_11 BIT(3)
-#define CTRL_OUT_12 BIT(4)
-#define CTRL_OUT_13 BIT(5)
-#define CTRL_OUT_14 BIT(6)
-#define CTRL_OUT_15 BIT(7)
-
-/* Bit definitions for the GPIO_TOD_TRIG register */
-#define TOD_TRIG_0 BIT(0)
-#define TOD_TRIG_1 BIT(1)
-#define TOD_TRIG_2 BIT(2)
-#define TOD_TRIG_3 BIT(3)
-
-/* Bit definitions for the GPIO_DPLL_INDICATOR register */
-#define IND_DPLL_INDEX_SHIFT (0)
-#define IND_DPLL_INDEX_MASK (0x7)
-
-/* Bit definitions for the GPIO_LOS_INDICATOR register */
-#define REFMON_INDEX_SHIFT (0)
-#define REFMON_INDEX_MASK (0xf)
-/* Active level of LOS indicator, 0=low 1=high */
-#define ACTIVE_LEVEL BIT(4)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
-#define DSQ_INP_0 BIT(0)
-#define DSQ_INP_1 BIT(1)
-#define DSQ_INP_2 BIT(2)
-#define DSQ_INP_3 BIT(3)
-#define DSQ_INP_4 BIT(4)
-#define DSQ_INP_5 BIT(5)
-#define DSQ_INP_6 BIT(6)
-#define DSQ_INP_7 BIT(7)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
-#define DSQ_INP_8 BIT(0)
-#define DSQ_INP_9 BIT(1)
-#define DSQ_INP_10 BIT(2)
-#define DSQ_INP_11 BIT(3)
-#define DSQ_INP_12 BIT(4)
-#define DSQ_INP_13 BIT(5)
-#define DSQ_INP_14 BIT(6)
-#define DSQ_INP_15 BIT(7)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
-#define DSQ_DPLL_0 BIT(0)
-#define DSQ_DPLL_1 BIT(1)
-#define DSQ_DPLL_2 BIT(2)
-#define DSQ_DPLL_3 BIT(3)
-#define DSQ_DPLL_4 BIT(4)
-#define DSQ_DPLL_5 BIT(5)
-#define DSQ_DPLL_6 BIT(6)
-#define DSQ_DPLL_7 BIT(7)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
-#define DSQ_DPLL_SYS BIT(0)
-#define GPIO_DSQ_LEVEL BIT(1)
-
-/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
-#define DPLL_TOD_SHIFT (0)
-#define DPLL_TOD_MASK (0x3)
-#define TOD_READ_SECONDARY BIT(2)
-#define GPIO_ASSERT_LEVEL BIT(3)
-
-/* Bit definitions for the GPIO_CTRL register */
-#define GPIO_FUNCTION_EN BIT(0)
-#define GPIO_CMOS_OD_MODE BIT(1)
-#define GPIO_CONTROL_DIR BIT(2)
-#define GPIO_PU_PD_MODE BIT(3)
-#define GPIO_FUNCTION_SHIFT (4)
-#define GPIO_FUNCTION_MASK (0xf)
-
-/* Bit definitions for the OUT_CTRL_1 register */
-#define OUT_SYNC_DISABLE BIT(7)
-#define SQUELCH_VALUE BIT(6)
-#define SQUELCH_DISABLE BIT(5)
-#define PAD_VDDO_SHIFT (2)
-#define PAD_VDDO_MASK (0x7)
-#define PAD_CMOSDRV_SHIFT (0)
-#define PAD_CMOSDRV_MASK (0x3)
-
-/* Bit definitions for the TOD_CFG register */
-#define TOD_EVEN_PPS_MODE BIT(2)
-#define TOD_OUT_SYNC_ENABLE BIT(1)
-#define TOD_ENABLE BIT(0)
-
-/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
-#define WR_PWM_DECODER_INDEX_SHIFT (4)
-#define WR_PWM_DECODER_INDEX_MASK (0xf)
-#define WR_REF_INDEX_SHIFT (0)
-#define WR_REF_INDEX_MASK (0xf)
-
-/* Bit definitions for the TOD_WRITE_CMD register */
-#define TOD_WRITE_SELECTION_SHIFT (0)
-#define TOD_WRITE_SELECTION_MASK (0xf)
-/* 4.8.7 */
-#define TOD_WRITE_TYPE_SHIFT (4)
-#define TOD_WRITE_TYPE_MASK (0x3)
-
-/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
-#define RD_PWM_DECODER_INDEX_SHIFT (4)
-#define RD_PWM_DECODER_INDEX_MASK (0xf)
-#define RD_REF_INDEX_SHIFT (0)
-#define RD_REF_INDEX_MASK (0xf)
-
-/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
-#define TOD_READ_TRIGGER_MODE BIT(4)
-#define TOD_READ_TRIGGER_SHIFT (0)
-#define TOD_READ_TRIGGER_MASK (0xf)
-
-/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
-#define COMBO_MASTER_HOLD BIT(0)
-
-/* Bit definitions for DPLL_SYS_STATUS register */
-#define DPLL_SYS_STATE_MASK (0xf)
-
-/* Bit definitions for SYS_APLL_STATUS register */
-#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
-#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
-#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
-
-#endif
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 4dfc52e06704..f9b2d66b0443 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
ptp_cleanup_pin_groups(ptp);
+ kfree(ptp->vclock_index);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
@@ -283,15 +284,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
/* Create a posix clock and link it to the device. */
err = posix_clock_register(&ptp->clock, &ptp->dev);
if (err) {
+ if (ptp->pps_source)
+ pps_unregister_source(ptp->pps_source);
+
+ if (ptp->kworker)
+ kthread_destroy_worker(ptp->kworker);
+
+ put_device(&ptp->dev);
+
pr_err("failed to create posix clock\n");
- goto no_clock;
+ return ERR_PTR(err);
}
return ptp;
-no_clock:
- if (ptp->pps_source)
- pps_unregister_source(ptp->pps_source);
no_pps:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
@@ -321,8 +327,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
- kfree(ptp->vclock_index);
-
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index 1a2e3c2e4328..6bc5791a7ec5 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -6,7 +6,7 @@
* Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
*/
#include <linux/firmware.h>
-#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/delay.h>
@@ -14,6 +14,10 @@
#include <linux/kernel.h>
#include <linux/timekeeping.h>
#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/mfd/idt8a340_reg.h>
+#include <asm/unaligned.h>
#include "ptp_private.h"
#include "ptp_clockmatrix.h"
@@ -32,9 +36,28 @@ static char *firmware;
module_param(firmware, charp, 0);
#define SETTIME_CORRECTION (0)
+#define EXTTS_PERIOD_MS (95)
static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm);
+static inline int idtcm_read(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return regmap_bulk_read(idtcm->regmap, module + regaddr, buf, count);
+}
+
+static inline int idtcm_write(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return regmap_bulk_write(idtcm->regmap, module + regaddr, buf, count);
+}
+
static int contains_full_configuration(struct idtcm *idtcm,
const struct firmware *fw)
{
@@ -173,134 +196,6 @@ static enum fw_version idtcm_fw_version(const char *version)
return ver;
}
-static int idtcm_xfer_read(struct idtcm *idtcm,
- u8 regaddr,
- u8 *buf,
- u16 count)
-{
- struct i2c_client *client = idtcm->client;
- struct i2c_msg msg[2];
- int cnt;
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &regaddr;
-
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = count;
- msg[1].buf = buf;
-
- cnt = i2c_transfer(client->adapter, msg, 2);
-
- if (cnt < 0) {
- dev_err(&client->dev,
- "i2c_transfer failed at %d in %s, at addr: %04x!",
- __LINE__, __func__, regaddr);
- return cnt;
- } else if (cnt != 2) {
- dev_err(&client->dev,
- "i2c_transfer sent only %d of %d messages", cnt, 2);
- return -EIO;
- }
-
- return 0;
-}
-
-static int idtcm_xfer_write(struct idtcm *idtcm,
- u8 regaddr,
- u8 *buf,
- u16 count)
-{
- struct i2c_client *client = idtcm->client;
- /* we add 1 byte for device register */
- u8 msg[IDTCM_MAX_WRITE_COUNT + 1];
- int cnt;
-
- if (count > IDTCM_MAX_WRITE_COUNT)
- return -EINVAL;
-
- msg[0] = regaddr;
- memcpy(&msg[1], buf, count);
-
- cnt = i2c_master_send(client, msg, count + 1);
-
- if (cnt < 0) {
- dev_err(&client->dev,
- "i2c_master_send failed at %d in %s, at addr: %04x!",
- __LINE__, __func__, regaddr);
- return cnt;
- }
-
- return 0;
-}
-
-static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
-{
- u8 buf[4];
- int err;
-
- if (idtcm->page_offset == val)
- return 0;
-
- buf[0] = 0x0;
- buf[1] = val;
- buf[2] = 0x10;
- buf[3] = 0x20;
-
- err = idtcm_xfer_write(idtcm, PAGE_ADDR, buf, sizeof(buf));
- if (err) {
- idtcm->page_offset = 0xff;
- dev_err(&idtcm->client->dev, "failed to set page offset");
- } else {
- idtcm->page_offset = val;
- }
-
- return err;
-}
-
-static int _idtcm_rdwr(struct idtcm *idtcm,
- u16 regaddr,
- u8 *buf,
- u16 count,
- bool write)
-{
- u8 hi;
- u8 lo;
- int err;
-
- hi = (regaddr >> 8) & 0xff;
- lo = regaddr & 0xff;
-
- err = idtcm_page_offset(idtcm, hi);
- if (err)
- return err;
-
- if (write)
- return idtcm_xfer_write(idtcm, lo, buf, count);
-
- return idtcm_xfer_read(idtcm, lo, buf, count);
-}
-
-static int idtcm_read(struct idtcm *idtcm,
- u16 module,
- u16 regaddr,
- u8 *buf,
- u16 count)
-{
- return _idtcm_rdwr(idtcm, module + regaddr, buf, count, false);
-}
-
-static int idtcm_write(struct idtcm *idtcm,
- u16 module,
- u16 regaddr,
- u8 *buf,
- u16 count)
-{
- return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true);
-}
-
static int clear_boot_status(struct idtcm *idtcm)
{
u8 buf[4] = {0};
@@ -339,11 +234,82 @@ static int wait_for_boot_status_ready(struct idtcm *idtcm)
} while (i);
- dev_warn(&idtcm->client->dev, "%s timed out", __func__);
+ dev_warn(idtcm->dev, "%s timed out", __func__);
return -EBUSY;
}
+static int _idtcm_set_scsr_read_trig(struct idtcm_channel *channel,
+ enum scsr_read_trig_sel trig, u8 ref)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
+ u8 val;
+ int err;
+
+ if (trig == SCSR_TOD_READ_TRIG_SEL_REFCLK) {
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val));
+ if (err)
+ return err;
+
+ val &= ~(WR_REF_INDEX_MASK << WR_REF_INDEX_SHIFT);
+ val |= (ref << WR_REF_INDEX_SHIFT);
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val));
+ if (err)
+ return err;
+ }
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ tod_read_cmd, &val, sizeof(val));
+ if (err)
+ return err;
+
+ val &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
+ val |= (trig << TOD_READ_TRIGGER_SHIFT);
+ val &= ~TOD_READ_TRIGGER_MODE; /* single shot */
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ tod_read_cmd, &val, sizeof(val));
+ return err;
+}
+
+static int idtcm_enable_extts(struct idtcm_channel *channel, u8 todn, u8 ref,
+ bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 old_mask = idtcm->extts_mask;
+ u8 mask = 1 << todn;
+ int err = 0;
+
+ if (todn >= MAX_TOD)
+ return -EINVAL;
+
+ if (enable) {
+ if (ref > 0xF) /* E_REF_CLK15 */
+ return -EINVAL;
+ if (idtcm->extts_mask & mask)
+ return 0;
+ err = _idtcm_set_scsr_read_trig(&idtcm->channel[todn],
+ SCSR_TOD_READ_TRIG_SEL_REFCLK,
+ ref);
+ if (err == 0) {
+ idtcm->extts_mask |= mask;
+ idtcm->event_channel[todn] = channel;
+ idtcm->channel[todn].refn = ref;
+ }
+ } else
+ idtcm->extts_mask &= ~mask;
+
+ if (old_mask == 0 && idtcm->extts_mask)
+ schedule_delayed_work(&idtcm->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+
+ return err;
+}
+
static int read_sys_apll_status(struct idtcm *idtcm, u8 *status)
{
return idtcm_read(idtcm, STATUS, DPLL_SYS_APLL_STATUS, status,
@@ -380,7 +346,7 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
} else if (dpll == DPLL_STATE_FREERUN ||
dpll == DPLL_STATE_HOLDOVER ||
dpll == DPLL_STATE_OPEN_LOOP) {
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"No wait state: DPLL_SYS_STATE %d", dpll);
return -EPERM;
}
@@ -388,7 +354,7 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
msleep(LOCK_POLL_INTERVAL_MS);
} while (time_is_after_jiffies(timeout));
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"%d ms lock timeout: SYS APLL Loss Lock %d SYS DPLL state %d",
LOCK_TIMEOUT_MS, apll, dpll);
@@ -398,39 +364,27 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
static void wait_for_chip_ready(struct idtcm *idtcm)
{
if (wait_for_boot_status_ready(idtcm))
- dev_warn(&idtcm->client->dev, "BOOT_STATUS != 0xA0");
+ dev_warn(idtcm->dev, "BOOT_STATUS != 0xA0");
if (wait_for_sys_apll_dpll_lock(idtcm))
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"Continuing while SYS APLL/DPLL is not locked");
}
static int _idtcm_gettime(struct idtcm_channel *channel,
- struct timespec64 *ts)
+ struct timespec64 *ts, u8 timeout)
{
struct idtcm *idtcm = channel->idtcm;
u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
u8 buf[TOD_BYTE_COUNT];
- u8 timeout = 10;
u8 trigger;
int err;
- err = idtcm_read(idtcm, channel->tod_read_primary,
- tod_read_cmd, &trigger, sizeof(trigger));
- if (err)
- return err;
-
- trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
- trigger |= (1 << TOD_READ_TRIGGER_SHIFT);
- trigger &= ~TOD_READ_TRIGGER_MODE; /* single shot */
-
- err = idtcm_write(idtcm, channel->tod_read_primary,
- tod_read_cmd, &trigger, sizeof(trigger));
- if (err)
- return err;
-
/* wait trigger to be 0 */
- while (trigger & TOD_READ_TRIGGER_MASK) {
+ do {
+ if (timeout-- == 0)
+ return -EIO;
+
if (idtcm->calculate_overhead_flag)
idtcm->start_time = ktime_get_raw();
@@ -439,10 +393,7 @@ static int _idtcm_gettime(struct idtcm_channel *channel,
sizeof(trigger));
if (err)
return err;
-
- if (--timeout == 0)
- return -EIO;
- }
+ } while (trigger & TOD_READ_TRIGGER_MASK);
err = idtcm_read(idtcm, channel->tod_read_primary,
TOD_READ_PRIMARY, buf, sizeof(buf));
@@ -454,6 +405,79 @@ static int _idtcm_gettime(struct idtcm_channel *channel,
return err;
}
+static int idtcm_extts_check_channel(struct idtcm *idtcm, u8 todn)
+{
+ struct idtcm_channel *ptp_channel, *extts_channel;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+ u32 dco_delay = 0;
+ int err;
+
+ extts_channel = &idtcm->channel[todn];
+ ptp_channel = idtcm->event_channel[todn];
+ if (extts_channel == ptp_channel)
+ dco_delay = ptp_channel->dco_delay;
+
+ err = _idtcm_gettime(extts_channel, &ts, 1);
+ if (err == 0) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = todn;
+ event.timestamp = timespec64_to_ns(&ts) - dco_delay;
+ ptp_clock_event(ptp_channel->ptp_clock, &event);
+ }
+ return err;
+}
+
+static u8 idtcm_enable_extts_mask(struct idtcm_channel *channel,
+ u8 extts_mask, bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int i, err;
+
+ for (i = 0; i < MAX_TOD; i++) {
+ u8 mask = 1 << i;
+ u8 refn = idtcm->channel[i].refn;
+
+ if (extts_mask & mask) {
+ /* check extts before disabling it */
+ if (enable == false) {
+ err = idtcm_extts_check_channel(idtcm, i);
+ /* trigger happened so we won't re-enable it */
+ if (err == 0)
+ extts_mask &= ~mask;
+ }
+ (void)idtcm_enable_extts(channel, i, refn, enable);
+ }
+ }
+
+ return extts_mask;
+}
+
+static int _idtcm_gettime_immediate(struct idtcm_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 extts_mask = 0;
+ int err;
+
+ /* Disable extts */
+ if (idtcm->extts_mask) {
+ extts_mask = idtcm_enable_extts_mask(channel, idtcm->extts_mask,
+ false);
+ }
+
+ err = _idtcm_set_scsr_read_trig(channel,
+ SCSR_TOD_READ_TRIG_SEL_IMMEDIATE, 0);
+ if (err == 0)
+ err = _idtcm_gettime(channel, ts, 10);
+
+ /* Re-enable extts */
+ if (extts_mask)
+ idtcm_enable_extts_mask(channel, extts_mask, true);
+
+ return err;
+}
+
static int _sync_pll_output(struct idtcm *idtcm,
u8 pll,
u8 sync_src,
@@ -777,7 +801,7 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
break;
if (++count > 20) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Timed out waiting for the write counter");
return -EIO;
}
@@ -842,7 +866,7 @@ static int _idtcm_settime_deprecated(struct idtcm_channel *channel,
err = _idtcm_set_dpll_hw_tod(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
if (err) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"%s: Set HW ToD failed", __func__);
return err;
}
@@ -1001,7 +1025,7 @@ static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta)
if (err)
return err;
- err = _idtcm_gettime(channel, &ts);
+ err = _idtcm_gettime_immediate(channel, &ts);
if (err)
return err;
@@ -1035,14 +1059,14 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm)
read_boot_status(idtcm, &status);
if (status == 0xA0) {
- dev_dbg(&idtcm->client->dev,
+ dev_dbg(idtcm->dev,
"SM_RESET completed in %d ms", i * 100);
break;
}
}
if (!status)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Timed out waiting for CM_RESET to complete");
}
@@ -1139,12 +1163,12 @@ static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
static int set_tod_ptp_pll(struct idtcm *idtcm, u8 index, u8 pll)
{
if (index >= MAX_TOD) {
- dev_err(&idtcm->client->dev, "ToD%d not supported", index);
+ dev_err(idtcm->dev, "ToD%d not supported", index);
return -EINVAL;
}
if (pll >= MAX_PLL) {
- dev_err(&idtcm->client->dev, "Pll%d not supported", pll);
+ dev_err(idtcm->dev, "Pll%d not supported", pll);
return -EINVAL;
}
@@ -1162,7 +1186,7 @@ static int check_and_set_masks(struct idtcm *idtcm,
switch (regaddr) {
case TOD_MASK_ADDR:
if ((val & 0xf0) || !(val & 0x0f)) {
- dev_err(&idtcm->client->dev, "Invalid TOD mask 0x%02x", val);
+ dev_err(idtcm->dev, "Invalid TOD mask 0x%02x", val);
err = -EINVAL;
} else {
idtcm->tod_mask = val;
@@ -1193,13 +1217,13 @@ static void display_pll_and_masks(struct idtcm *idtcm)
u8 i;
u8 mask;
- dev_dbg(&idtcm->client->dev, "tod_mask = 0x%02x", idtcm->tod_mask);
+ dev_dbg(idtcm->dev, "tod_mask = 0x%02x", idtcm->tod_mask);
for (i = 0; i < MAX_TOD; i++) {
mask = 1 << i;
if (mask & idtcm->tod_mask)
- dev_dbg(&idtcm->client->dev,
+ dev_dbg(idtcm->dev,
"TOD%d pll = %d output_mask = 0x%04x",
i, idtcm->channel[i].pll,
idtcm->channel[i].output_mask);
@@ -1222,16 +1246,16 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
if (firmware) /* module parameter */
snprintf(fname, sizeof(fname), "%s", firmware);
- dev_info(&idtcm->client->dev, "firmware '%s'", fname);
+ dev_info(idtcm->dev, "requesting firmware '%s'", fname);
err = request_firmware(&fw, fname, dev);
if (err) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
- dev_dbg(&idtcm->client->dev, "firmware size %zu bytes", fw->size);
+ dev_dbg(idtcm->dev, "firmware size %zu bytes", fw->size);
rec = (struct idtcm_fwrc *) fw->data;
@@ -1240,7 +1264,7 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
for (len = fw->size; len > 0; len -= sizeof(*rec)) {
if (rec->reserved) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"bad firmware, reserved field non-zero");
err = -EINVAL;
} else {
@@ -1291,7 +1315,7 @@ static int idtcm_output_enable(struct idtcm_channel *channel,
base = get_output_base_addr(idtcm->fw_ver, outn);
if (!(base > 0)) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"%s - Unsupported out%d", __func__, outn);
return base;
}
@@ -1333,8 +1357,8 @@ static int idtcm_output_mask_enable(struct idtcm_channel *channel,
}
static int idtcm_perout_enable(struct idtcm_channel *channel,
- bool enable,
- struct ptp_perout_request *perout)
+ struct ptp_perout_request *perout,
+ bool enable)
{
struct idtcm *idtcm = channel->idtcm;
unsigned int flags = perout->flags;
@@ -1347,7 +1371,7 @@ static int idtcm_perout_enable(struct idtcm_channel *channel,
err = idtcm_output_enable(channel, enable, perout->index);
if (err) {
- dev_err(&idtcm->client->dev, "Unable to set output enable");
+ dev_err(idtcm->dev, "Unable to set output enable");
return err;
}
@@ -1448,7 +1472,7 @@ static int configure_dpll_mode_write_frequency(struct idtcm_channel *channel)
err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
if (err)
- dev_err(&idtcm->client->dev, "Failed to set pll mode to write frequency");
+ dev_err(idtcm->dev, "Failed to set pll mode to write frequency");
else
channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
@@ -1463,7 +1487,7 @@ static int configure_dpll_mode_write_phase(struct idtcm_channel *channel)
err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
if (err)
- dev_err(&idtcm->client->dev, "Failed to set pll mode to write phase");
+ dev_err(idtcm->dev, "Failed to set pll mode to write phase");
else
channel->mode = PTP_PLL_MODE_WRITE_PHASE;
@@ -1478,7 +1502,7 @@ static int configure_manual_reference_write_frequency(struct idtcm_channel *chan
err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_FREQUENCY);
if (err)
- dev_err(&idtcm->client->dev, "Failed to set manual reference to write frequency");
+ dev_err(idtcm->dev, "Failed to set manual reference to write frequency");
else
channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
@@ -1493,7 +1517,7 @@ static int configure_manual_reference_write_phase(struct idtcm_channel *channel)
err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_PHASE);
if (err)
- dev_err(&idtcm->client->dev, "Failed to set manual reference to write phase");
+ dev_err(idtcm->dev, "Failed to set manual reference to write phase");
else
channel->mode = PTP_PLL_MODE_WRITE_PHASE;
@@ -1518,11 +1542,11 @@ static long idtcm_work_handler(struct ptp_clock_info *ptp)
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
struct idtcm *idtcm = channel->idtcm;
- mutex_lock(&idtcm->reg_lock);
+ mutex_lock(idtcm->lock);
(void)idtcm_stop_phase_pull_in(channel);
- mutex_unlock(&idtcm->reg_lock);
+ mutex_unlock(idtcm->lock);
/* Return a negative value here to not reschedule */
return -1;
@@ -1533,8 +1557,8 @@ static s32 phase_pull_in_scaled_ppm(s32 current_ppm, s32 phase_pull_in_ppb)
/* ppb = scaled_ppm * 125 / 2^13 */
/* scaled_ppm = ppb * 2^13 / 125 */
- s64 max_scaled_ppm = (PHASE_PULL_IN_MAX_PPB << 13) / 125;
- s64 scaled_ppm = (phase_pull_in_ppb << 13) / 125;
+ s64 max_scaled_ppm = div_s64((s64)PHASE_PULL_IN_MAX_PPB << 13, 125);
+ s64 scaled_ppm = div_s64((s64)phase_pull_in_ppb << 13, 125);
current_ppm += scaled_ppm;
@@ -1607,7 +1631,7 @@ static int initialize_operating_mode_with_manual_reference(struct idtcm_channel
channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
break;
default:
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"Unsupported MANUAL_REFERENCE: 0x%02x", ref);
}
@@ -1633,7 +1657,7 @@ static int initialize_operating_mode_with_pll_mode(struct idtcm_channel *channel
channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
break;
default:
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Unsupported PLL_MODE: 0x%02x", mode);
err = -EINVAL;
}
@@ -1652,14 +1676,14 @@ static int initialize_dco_operating_mode(struct idtcm_channel *channel)
err = idtcm_get_pll_mode(channel, &mode);
if (err) {
- dev_err(&idtcm->client->dev, "Unable to read pll mode!");
+ dev_err(idtcm->dev, "Unable to read pll mode!");
return err;
}
if (mode == PLL_MODE_PLL) {
err = idtcm_get_manual_reference(channel, &ref);
if (err) {
- dev_err(&idtcm->client->dev, "Unable to read manual reference!");
+ dev_err(idtcm->dev, "Unable to read manual reference!");
return err;
}
err = initialize_operating_mode_with_manual_reference(channel, ref);
@@ -1775,15 +1799,14 @@ static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
+ mutex_lock(idtcm->lock);
+ err = _idtcm_gettime_immediate(channel, ts);
+ mutex_unlock(idtcm->lock);
- err = _idtcm_gettime(channel, ts);
if (err)
- dev_err(&idtcm->client->dev, "Failed at line %d in %s!",
+ dev_err(idtcm->dev, "Failed at line %d in %s!",
__LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1794,15 +1817,14 @@ static int idtcm_settime_deprecated(struct ptp_clock_info *ptp,
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_settime_deprecated(channel, ts);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1813,15 +1835,14 @@ static int idtcm_settime(struct ptp_clock_info *ptp,
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_settime(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1831,15 +1852,14 @@ static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_adjtime_deprecated(channel, delta);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1854,13 +1874,10 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
if (channel->phase_pull_in == true)
return 0;
- mutex_lock(&idtcm->reg_lock);
+ mutex_lock(idtcm->lock);
if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
err = channel->do_phase_pull_in(channel, delta, 0);
- if (err)
- dev_err(&idtcm->client->dev,
- "Failed at line %d in %s!", __LINE__, __func__);
} else {
if (delta >= 0) {
ts = ns_to_timespec64(delta);
@@ -1870,11 +1887,13 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
}
err = _idtcm_settime(channel, &ts, type);
- if (err)
- dev_err(&idtcm->client->dev,
- "Failed at line %d in %s!", __LINE__, __func__);
}
- mutex_unlock(&idtcm->reg_lock);
+
+ mutex_unlock(idtcm->lock);
+
+ if (err)
+ dev_err(idtcm->dev,
+ "Failed at line %d in %s!", __LINE__, __func__);
return err;
}
@@ -1885,15 +1904,14 @@ static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_adjphase(channel, delta);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1909,13 +1927,14 @@ static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
if (scaled_ppm == channel->current_freq_scaled_ppm)
return 0;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_adjfine(channel, scaled_ppm);
+ mutex_unlock(idtcm->lock);
- mutex_unlock(&idtcm->reg_lock);
-
- if (!err)
+ if (err)
+ dev_err(idtcm->dev,
+ "Failed at line %d in %s!", __LINE__, __func__);
+ else
channel->current_freq_scaled_ppm = scaled_ppm;
return err;
@@ -1924,35 +1943,38 @@ static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
static int idtcm_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
- int err;
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(idtcm->lock);
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
- if (!on) {
- err = idtcm_perout_enable(channel, false, &rq->perout);
- if (err)
- dev_err(&channel->idtcm->client->dev,
- "Failed at line %d in %s!",
- __LINE__, __func__);
- return err;
- }
-
+ if (!on)
+ err = idtcm_perout_enable(channel, &rq->perout, false);
/* Only accept a 1-PPS aligned to the second. */
- if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
- rq->perout.period.nsec)
- return -ERANGE;
-
- err = idtcm_perout_enable(channel, true, &rq->perout);
- if (err)
- dev_err(&channel->idtcm->client->dev,
- "Failed at line %d in %s!", __LINE__, __func__);
- return err;
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ err = -ERANGE;
+ else
+ err = idtcm_perout_enable(channel, &rq->perout, true);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ err = idtcm_enable_extts(channel, rq->extts.index,
+ rq->extts.rsv[0], on);
+ break;
default:
break;
}
- return -EOPNOTSUPP;
+ mutex_unlock(idtcm->lock);
+
+ if (err)
+ dev_err(channel->idtcm->dev,
+ "Failed in %s with err %d!", __func__, err);
+
+ return err;
}
static int idtcm_enable_tod(struct idtcm_channel *channel)
@@ -2013,7 +2035,7 @@ static void idtcm_set_version_info(struct idtcm *idtcm)
idtcm->fw_ver = idtcm_fw_version(idtcm->version);
- dev_info(&idtcm->client->dev,
+ dev_info(idtcm->dev,
"%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d",
major, minor, hotfix,
product_id, hw_rev_id, config_select);
@@ -2023,6 +2045,7 @@ static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
+ .n_ext_ts = MAX_TOD,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime,
@@ -2036,6 +2059,7 @@ static const struct ptp_clock_info idtcm_caps_deprecated = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
+ .n_ext_ts = MAX_TOD,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime_deprecated,
@@ -2122,24 +2146,46 @@ static int configure_channel_pll(struct idtcm_channel *channel)
return err;
}
-static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+/*
+ * Compensate for the PTP DCO input-to-output delay.
+ * This delay is 18 FOD cycles.
+ */
+static u32 idtcm_get_dco_delay(struct idtcm_channel *channel)
{
- enum fw_version fw_ver = idtcm->fw_ver;
- struct idtcm_channel *channel;
+ struct idtcm *idtcm = channel->idtcm;
+ u8 mbuf[8] = {0};
+ u8 nbuf[2] = {0};
+ u32 fodFreq;
int err;
+ u64 m;
+ u16 n;
- if (!(index < MAX_TOD))
- return -EINVAL;
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_FOD_FREQ, mbuf, 6);
+ if (err)
+ return 0;
- channel = &idtcm->channel[index];
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_FOD_FREQ + 6, nbuf, 2);
+ if (err)
+ return 0;
- channel->idtcm = idtcm;
- channel->current_freq_scaled_ppm = 0;
+ m = get_unaligned_le64(mbuf);
+ n = get_unaligned_le16(nbuf);
- /* Set pll addresses */
- err = configure_channel_pll(channel);
- if (err)
- return err;
+ if (n == 0)
+ n = 1;
+
+ fodFreq = (u32)div_u64(m, n);
+ if (fodFreq >= 500000000)
+ return 18 * (u32)div_u64(NSEC_PER_SEC, fodFreq);
+
+ return 0;
+}
+
+static int configure_channel_tod(struct idtcm_channel *channel, u32 index)
+{
+ enum fw_version fw_ver = channel->idtcm->fw_ver;
/* Set tod addresses */
switch (index) {
@@ -2171,6 +2217,32 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
return -EINVAL;
}
+ return 0;
+}
+
+static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_TOD))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+
+ channel->idtcm = idtcm;
+ channel->current_freq_scaled_ppm = 0;
+
+ /* Set pll addresses */
+ err = configure_channel_pll(channel);
+ if (err)
+ return err;
+
+ /* Set tod addresses */
+ err = configure_channel_tod(channel, index);
+ if (err)
+ return err;
+
if (idtcm->fw_ver < V487)
channel->caps = idtcm_caps_deprecated;
else
@@ -2185,11 +2257,13 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
err = idtcm_enable_tod(channel);
if (err) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
+ channel->dco_delay = idtcm_get_dco_delay(channel);
+
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
if (IS_ERR(channel->ptp_clock)) {
@@ -2201,12 +2275,59 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
if (!channel->ptp_clock)
return -ENOTSUPP;
- dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d",
+ dev_info(idtcm->dev, "PLL%d registered as ptp%d",
index, channel->ptp_clock->index);
return 0;
}
+static int idtcm_enable_extts_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_TOD))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+ channel->idtcm = idtcm;
+
+ /* Set tod addresses */
+ err = configure_channel_tod(channel, index);
+ if (err)
+ return err;
+
+ channel->idtcm = idtcm;
+
+ return 0;
+}
+
+static void idtcm_extts_check(struct work_struct *work)
+{
+ struct idtcm *idtcm = container_of(work, struct idtcm, extts_work.work);
+ int err, i;
+
+ if (idtcm->extts_mask == 0)
+ return;
+
+ mutex_lock(idtcm->lock);
+ for (i = 0; i < MAX_TOD; i++) {
+ u8 mask = 1 << i;
+
+ if (idtcm->extts_mask & mask) {
+ err = idtcm_extts_check_channel(idtcm, i);
+ /* trigger clears itself, so clear the mask */
+ if (err == 0)
+ idtcm->extts_mask &= ~mask;
+ }
+ }
+
+ if (idtcm->extts_mask)
+ schedule_delayed_work(&idtcm->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+ mutex_unlock(idtcm->lock);
+}
+
static void ptp_clock_unregister_all(struct idtcm *idtcm)
{
u8 i;
@@ -2222,6 +2343,7 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm)
static void set_default_masks(struct idtcm *idtcm)
{
idtcm->tod_mask = DEFAULT_TOD_MASK;
+ idtcm->extts_mask = 0;
idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL;
idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL;
@@ -2234,158 +2356,86 @@ static void set_default_masks(struct idtcm *idtcm)
idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
}
-static int idtcm_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int idtcm_probe(struct platform_device *pdev)
{
+ struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
struct idtcm *idtcm;
int err;
u8 i;
- /* Unused for now */
- (void)id;
-
- idtcm = devm_kzalloc(&client->dev, sizeof(struct idtcm), GFP_KERNEL);
+ idtcm = devm_kzalloc(&pdev->dev, sizeof(struct idtcm), GFP_KERNEL);
if (!idtcm)
return -ENOMEM;
- idtcm->client = client;
- idtcm->page_offset = 0xff;
+ idtcm->dev = &pdev->dev;
+ idtcm->mfd = pdev->dev.parent;
+ idtcm->lock = &ddata->lock;
+ idtcm->regmap = ddata->regmap;
idtcm->calculate_overhead_flag = 0;
+ INIT_DELAYED_WORK(&idtcm->extts_work, idtcm_extts_check);
+
set_default_masks(idtcm);
- mutex_init(&idtcm->reg_lock);
- mutex_lock(&idtcm->reg_lock);
+ mutex_lock(idtcm->lock);
idtcm_set_version_info(idtcm);
- err = idtcm_load_firmware(idtcm, &client->dev);
+ err = idtcm_load_firmware(idtcm, &pdev->dev);
+
if (err)
- dev_warn(&idtcm->client->dev, "loading firmware failed with %d", err);
+ dev_warn(idtcm->dev, "loading firmware failed with %d", err);
wait_for_chip_ready(idtcm);
if (idtcm->tod_mask) {
for (i = 0; i < MAX_TOD; i++) {
- if (idtcm->tod_mask & (1 << i)) {
+ if (idtcm->tod_mask & (1 << i))
err = idtcm_enable_channel(idtcm, i);
- if (err) {
- dev_err(&idtcm->client->dev,
- "idtcm_enable_channel %d failed!", i);
- break;
- }
+ else
+ err = idtcm_enable_extts_channel(idtcm, i);
+ if (err) {
+ dev_err(idtcm->dev,
+ "idtcm_enable_channel %d failed!", i);
+ break;
}
}
} else {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"no PLLs flagged as PHCs, nothing to do");
err = -ENODEV;
}
- mutex_unlock(&idtcm->reg_lock);
+ mutex_unlock(idtcm->lock);
if (err) {
ptp_clock_unregister_all(idtcm);
return err;
}
- i2c_set_clientdata(client, idtcm);
+ platform_set_drvdata(pdev, idtcm);
return 0;
}
-static int idtcm_remove(struct i2c_client *client)
+static int idtcm_remove(struct platform_device *pdev)
{
- struct idtcm *idtcm = i2c_get_clientdata(client);
+ struct idtcm *idtcm = platform_get_drvdata(pdev);
ptp_clock_unregister_all(idtcm);
- mutex_destroy(&idtcm->reg_lock);
+ cancel_delayed_work_sync(&idtcm->extts_work);
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id idtcm_dt_id[] = {
- { .compatible = "idt,8a34000" },
- { .compatible = "idt,8a34001" },
- { .compatible = "idt,8a34002" },
- { .compatible = "idt,8a34003" },
- { .compatible = "idt,8a34004" },
- { .compatible = "idt,8a34005" },
- { .compatible = "idt,8a34006" },
- { .compatible = "idt,8a34007" },
- { .compatible = "idt,8a34008" },
- { .compatible = "idt,8a34009" },
- { .compatible = "idt,8a34010" },
- { .compatible = "idt,8a34011" },
- { .compatible = "idt,8a34012" },
- { .compatible = "idt,8a34013" },
- { .compatible = "idt,8a34014" },
- { .compatible = "idt,8a34015" },
- { .compatible = "idt,8a34016" },
- { .compatible = "idt,8a34017" },
- { .compatible = "idt,8a34018" },
- { .compatible = "idt,8a34019" },
- { .compatible = "idt,8a34040" },
- { .compatible = "idt,8a34041" },
- { .compatible = "idt,8a34042" },
- { .compatible = "idt,8a34043" },
- { .compatible = "idt,8a34044" },
- { .compatible = "idt,8a34045" },
- { .compatible = "idt,8a34046" },
- { .compatible = "idt,8a34047" },
- { .compatible = "idt,8a34048" },
- { .compatible = "idt,8a34049" },
- {},
-};
-MODULE_DEVICE_TABLE(of, idtcm_dt_id);
-#endif
-
-static const struct i2c_device_id idtcm_i2c_id[] = {
- { "8a34000" },
- { "8a34001" },
- { "8a34002" },
- { "8a34003" },
- { "8a34004" },
- { "8a34005" },
- { "8a34006" },
- { "8a34007" },
- { "8a34008" },
- { "8a34009" },
- { "8a34010" },
- { "8a34011" },
- { "8a34012" },
- { "8a34013" },
- { "8a34014" },
- { "8a34015" },
- { "8a34016" },
- { "8a34017" },
- { "8a34018" },
- { "8a34019" },
- { "8a34040" },
- { "8a34041" },
- { "8a34042" },
- { "8a34043" },
- { "8a34044" },
- { "8a34045" },
- { "8a34046" },
- { "8a34047" },
- { "8a34048" },
- { "8a34049" },
- {},
-};
-MODULE_DEVICE_TABLE(i2c, idtcm_i2c_id);
-
-static struct i2c_driver idtcm_driver = {
+static struct platform_driver idtcm_driver = {
.driver = {
- .of_match_table = of_match_ptr(idtcm_dt_id),
- .name = "idtcm",
+ .name = "8a3400x-phc",
},
- .probe = idtcm_probe,
- .remove = idtcm_remove,
- .id_table = idtcm_i2c_id,
+ .probe = idtcm_probe,
+ .remove = idtcm_remove,
};
-module_i2c_driver(idtcm_driver);
+module_platform_driver(idtcm_driver);
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index 833e5907c351..0f3059ae1fff 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -9,8 +9,8 @@
#define PTP_IDTCLOCKMATRIX_H
#include <linux/ktime.h>
-
-#include "idt8a340_reg.h"
+#include <linux/mfd/idt8a340_reg.h>
+#include <linux/regmap.h>
#define FW_FILENAME "idtcm.bin"
#define MAX_TOD (4)
@@ -44,7 +44,6 @@
#define DEFAULT_TOD2_PTP_PLL (2)
#define DEFAULT_TOD3_PTP_PLL (3)
-#define POST_SM_RESET_DELAY_MS (3000)
#define PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED (150000)
#define PHASE_PULL_IN_THRESHOLD_NS (15000)
#define TOD_WRITE_OVERHEAD_COUNT_MAX (2)
@@ -64,6 +63,11 @@
* Return register address based on passed in firmware version
*/
#define IDTCM_FW_REG(FW, VER, REG) (((FW) < (VER)) ? (REG) : (REG##_##VER))
+enum fw_version {
+ V_DEFAULT = 0,
+ V487 = 1,
+ V520 = 2,
+};
/* PTP PLL Mode */
enum ptp_pll_mode {
@@ -74,94 +78,6 @@ enum ptp_pll_mode {
PTP_PLL_MODE_MAX = PTP_PLL_MODE_UNSUPPORTED,
};
-/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
-enum pll_mode {
- PLL_MODE_MIN = 0,
- PLL_MODE_PLL = PLL_MODE_MIN,
- PLL_MODE_WRITE_PHASE = 1,
- PLL_MODE_WRITE_FREQUENCY = 2,
- PLL_MODE_GPIO_INC_DEC = 3,
- PLL_MODE_SYNTHESIS = 4,
- PLL_MODE_PHASE_MEASUREMENT = 5,
- PLL_MODE_DISABLED = 6,
- PLL_MODE_MAX = PLL_MODE_DISABLED,
-};
-
-/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
-enum manual_reference {
- MANU_REF_MIN = 0,
- MANU_REF_CLK0 = MANU_REF_MIN,
- MANU_REF_CLK1,
- MANU_REF_CLK2,
- MANU_REF_CLK3,
- MANU_REF_CLK4,
- MANU_REF_CLK5,
- MANU_REF_CLK6,
- MANU_REF_CLK7,
- MANU_REF_CLK8,
- MANU_REF_CLK9,
- MANU_REF_CLK10,
- MANU_REF_CLK11,
- MANU_REF_CLK12,
- MANU_REF_CLK13,
- MANU_REF_CLK14,
- MANU_REF_CLK15,
- MANU_REF_WRITE_PHASE,
- MANU_REF_WRITE_FREQUENCY,
- MANU_REF_XO_DPLL,
- MANU_REF_MAX = MANU_REF_XO_DPLL,
-};
-
-enum hw_tod_write_trig_sel {
- HW_TOD_WR_TRIG_SEL_MIN = 0,
- HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
- HW_TOD_WR_TRIG_SEL_RESERVED = 1,
- HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
- HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
- HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
- HW_TOD_WR_TRIG_SEL_GPIO = 5,
- HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
- WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
-};
-
-/* 4.8.7 only */
-enum scsr_tod_write_trig_sel {
- SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
- SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
- SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
- SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
- SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
- SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
- SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
- SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
-};
-
-/* 4.8.7 only */
-enum scsr_tod_write_type_sel {
- SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
- SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
- SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
- SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
-};
-
-/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
-enum dpll_state {
- DPLL_STATE_MIN = 0,
- DPLL_STATE_FREERUN = DPLL_STATE_MIN,
- DPLL_STATE_LOCKACQ = 1,
- DPLL_STATE_LOCKREC = 2,
- DPLL_STATE_LOCKED = 3,
- DPLL_STATE_HOLDOVER = 4,
- DPLL_STATE_OPEN_LOOP = 5,
- DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
-};
-
-enum fw_version {
- V_DEFAULT = 0,
- V487 = 1,
- V520 = 2,
-};
-
struct idtcm;
struct idtcm_channel {
@@ -185,25 +101,32 @@ struct idtcm_channel {
s32 offset_ns, u32 max_ffo_ppb);
s32 current_freq_scaled_ppm;
bool phase_pull_in;
+ u32 dco_delay;
+ /* last input trigger for extts */
+ u8 refn;
u8 pll;
u16 output_mask;
};
struct idtcm {
struct idtcm_channel channel[MAX_TOD];
- struct i2c_client *client;
- u8 page_offset;
+ struct device *dev;
u8 tod_mask;
char version[16];
enum fw_version fw_ver;
-
+ /* Polls for external time stamps */
+ u8 extts_mask;
+ struct delayed_work extts_work;
+ /* Remember the ptp channel to report extts */
+ struct idtcm_channel *event_channel[MAX_TOD];
+ /* Mutex to protect operations from being interrupted */
+ struct mutex *lock;
+ struct device *mfd;
+ struct regmap *regmap;
/* Overhead calculation for adjtime */
u8 calculate_overhead_flag;
s64 tod_write_overhead_ns;
ktime_t start_time;
-
- /* Protects I2C read/modify/write registers from concurrent access */
- struct mutex reg_lock;
};
struct idtcm_fwrc {
diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
index 3dd519dfc473..4991054a2135 100644
--- a/drivers/ptp/ptp_kvm_x86.c
+++ b/drivers/ptp/ptp_kvm_x86.c
@@ -15,8 +15,6 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_kvm.h>
-struct pvclock_vsyscall_time_info *hv_clock;
-
static phys_addr_t clock_pair_gpa;
static struct kvm_clock_pairing clock_pair;
@@ -28,16 +26,15 @@ int kvm_arch_ptp_init(void)
return -ENODEV;
clock_pair_gpa = slow_virt_to_phys(&clock_pair);
- hv_clock = pvclock_get_pvti_cpu0_va();
- if (!hv_clock)
+ if (!pvclock_get_pvti_cpu0_va())
return -ENODEV;
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
KVM_CLOCK_PAIRING_WALLCLOCK);
- if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
+ if (ret == -KVM_ENOSYS)
return -ENODEV;
- return 0;
+ return ret;
}
int kvm_arch_ptp_get_clock(struct timespec64 *ts)
@@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
struct pvclock_vcpu_time_info *src;
unsigned int version;
long ret;
- int cpu;
- cpu = smp_processor_id();
- src = &hv_clock[cpu].pvti;
+ src = this_cpu_pvti();
do {
/*
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 4c25467198e3..34f943c8c9fd 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -2455,7 +2455,6 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
}
- devlink_register(devlink);
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_device\n");
@@ -2497,7 +2496,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out;
ptp_ocp_info(bp);
-
+ devlink_register(devlink);
return 0;
out:
@@ -2506,7 +2505,6 @@ out:
out_disable:
pci_disable_device(pdev);
out_unregister:
- devlink_unregister(devlink);
devlink_free(devlink);
return err;
}
@@ -2517,11 +2515,11 @@ ptp_ocp_remove(struct pci_dev *pdev)
struct ptp_ocp *bp = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(bp);
+ devlink_unregister(devlink);
ptp_ocp_detach(bp);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
- devlink_unregister(devlink);
devlink_free(devlink);
}
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index a17e8cc642c5..8070f3fd98f0 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -644,6 +644,7 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
},
{0}
};
+MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
static SIMPLE_DEV_PM_OPS(pch_pm_ops, pch_suspend, pch_resume);
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index 1d78b455cc48..e34face736f4 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -269,5 +269,3 @@ module_exit(max14577_regulator_exit);
MODULE_AUTHOR("Krzysztof Kozlowski <[email protected]>");
MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:max14577-regulator");
-MODULE_ALIAS("platform:max77836-regulator");
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 6cca910a76de..7f458d510483 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -991,7 +991,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
- RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
{}
};
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 2f3515fa242a..f3d5c7f4c13d 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
- sclp.has_sipl = !!(sccb->cbl & 0x4000);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
if (sccb->cpuoff > 134)
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
+ if (sccb->cpuoff > 137)
+ sclp.has_sipl = !!(sccb->cbl & 0x4000);
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index f3c656975e05..93695d535380 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -262,10 +262,12 @@ static int blacklist_parse_proc_parameters(char *buf)
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
- /* There could be subchannels without proper devices connected.
- * evaluate all the entries
+ /*
+ * Evaluate the subchannels without an online device. This way,
+ * no path-verification will be triggered on those subchannels
+ * and it avoids unnecessary delays.
*/
- css_schedule_eval_all();
+ css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 2ec741106cb6..f0538609dfe4 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -77,12 +77,13 @@ EXPORT_SYMBOL(ccwgroup_set_online);
/**
* ccwgroup_set_offline() - disable a ccwgroup device
* @gdev: target ccwgroup device
+ * @call_gdrv: Call the registered gdrv set_offline function
*
* This function attempts to put the ccwgroup device into the offline state.
* Returns:
* %0 on success and a negative error value on failure.
*/
-int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
int ret = -EINVAL;
@@ -91,11 +92,16 @@ int ccwgroup_set_offline(struct ccwgroup_device *gdev)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE)
goto out;
+ if (!call_gdrv) {
+ ret = 0;
+ goto offline;
+ }
if (gdrv->set_offline)
ret = gdrv->set_offline(gdev);
if (ret)
goto out;
+offline:
gdev->state = CCWGROUP_OFFLINE;
out:
atomic_set(&gdev->onoff, 0);
@@ -124,7 +130,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
if (value == 1)
ret = ccwgroup_set_online(gdev);
else if (value == 0)
- ret = ccwgroup_set_offline(gdev);
+ ret = ccwgroup_set_offline(gdev, true);
else
ret = -EINVAL;
out:
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3377097e65de..44461928aab8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -788,27 +788,49 @@ static int __unset_registered(struct device *dev, void *data)
return 0;
}
-void css_schedule_eval_all_unreg(unsigned long delay)
+static int __unset_online(struct device *dev, void *data)
+{
+ struct idset *set = data;
+ struct subchannel *sch = to_subchannel(dev);
+ struct ccw_device *cdev = sch_get_cdev(sch);
+
+ if (cdev && cdev->online)
+ idset_sch_del(set, sch->schid);
+
+ return 0;
+}
+
+void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
{
unsigned long flags;
- struct idset *unreg_set;
+ struct idset *set;
/* Find unregistered subchannels. */
- unreg_set = idset_sch_new();
- if (!unreg_set) {
+ set = idset_sch_new();
+ if (!set) {
/* Fallback. */
css_schedule_eval_all();
return;
}
- idset_fill(unreg_set);
- bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+ idset_fill(set);
+ switch (cond) {
+ case CSS_EVAL_UNREG:
+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
+ break;
+ case CSS_EVAL_NOT_ONLINE:
+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
+ break;
+ default:
+ break;
+ }
+
/* Apply to slow_subchannel_set. */
spin_lock_irqsave(&slow_subchannel_lock, flags);
- idset_add_set(slow_subchannel_set, unreg_set);
+ idset_add_set(slow_subchannel_set, set);
atomic_set(&css_eval_scheduled, 1);
queue_delayed_work(cio_work_q, &slow_path_work, delay);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
- idset_free(unreg_set);
+ idset_free(set);
}
void css_wait_for_slow_path(void)
@@ -820,7 +842,7 @@ void css_wait_for_slow_path(void)
void css_schedule_reprobe(void)
{
/* Schedule with a delay to allow merging of subsequent calls. */
- css_schedule_eval_all_unreg(1 * HZ);
+ css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index c98522cbe276..ede0b905bc6f 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -34,6 +34,14 @@
#define SNID_STATE3_MULTI_PATH 1
#define SNID_STATE3_SINGLE_PATH 0
+/*
+ * Conditions used to specify which subchannels need evaluation
+ */
+enum css_eval_cond {
+ CSS_EVAL_UNREG, /* unregistered subchannels */
+ CSS_EVAL_NOT_ONLINE /* sch without an online-device */
+};
+
struct path_state {
__u8 state1 : 2; /* path state value 1 */
__u8 state2 : 2; /* path state value 2 */
@@ -136,7 +144,7 @@ static inline struct channel_subsystem *css_by_id(u8 cssid)
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
-void css_schedule_eval_all_unreg(unsigned long delay);
+void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay);
int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f433428057d9..d9b804943d19 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -213,7 +213,6 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info)
* ap_init_qci_info(): Allocate and query qci config info.
* Does also update the static variables ap_max_domain_id
* and ap_max_adapter_id if this info is available.
-
*/
static void __init ap_init_qci_info(void)
{
@@ -439,6 +438,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
/**
* ap_interrupt_handler() - Schedule ap_tasklet on interrupt
* @airq: pointer to adapter interrupt descriptor
+ * @floating: ignored
*/
static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
{
@@ -1786,6 +1786,7 @@ static inline void ap_scan_adapter(int ap)
/**
* ap_scan_bus(): Scan the AP bus for new devices
* Runs periodically, workqueue timer (ap_config_time)
+ * @unused: Unused pointer.
*/
static void ap_scan_bus(struct work_struct *unused)
{
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index d70c4d3d0907..9ea48bf0ee40 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -20,7 +20,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
/**
* ap_queue_enable_irq(): Enable interrupt support on this AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
* @ind: the notification indicator byte
*
* Enables interruption on AP queue via ap_aqic(). Based on the return
@@ -311,7 +311,7 @@ static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
/**
* ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
*
* Submit the Reset command to an AP queue.
*/
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 118939a7729a..623d5269a52c 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -361,6 +361,7 @@ err_list:
mutex_lock(&matrix_dev->lock);
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->lock);
+ vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
err_dec_available:
atomic_inc(&matrix_dev->available_instances);
@@ -376,9 +377,10 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
mutex_lock(&matrix_dev->lock);
vfio_ap_mdev_reset_queues(matrix_mdev);
list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->lock);
+ vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
atomic_inc(&matrix_dev->available_instances);
- mutex_unlock(&matrix_dev->lock);
}
static ssize_t name_show(struct mdev_type *mtype,
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c18fd48e02b6..2a6479740600 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2162,7 +2162,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
card->dev->ml_priv = card;
card->dev->netdev_ops = &lcs_netdev_ops;
card->dev->dev_port = card->portno;
- memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
+ eth_hw_addr_set(card->dev, card->mac);
#ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card))
card->dev->netdev_ops = &lcs_mc_netdev_ops;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 535a60b3946d..a5aa0bdc61d6 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -858,7 +858,6 @@ struct qeth_card {
struct napi_struct napi;
struct qeth_rx rx;
struct delayed_work buffer_reclaim_work;
- struct work_struct close_dev_work;
};
static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 41ca6273b750..15999a816054 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -70,15 +70,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_queues(struct qeth_card *card);
-static void qeth_close_dev_handler(struct work_struct *work)
-{
- struct qeth_card *card;
-
- card = container_of(work, struct qeth_card, close_dev_work);
- QETH_CARD_TEXT(card, 2, "cldevhdl");
- ccwgroup_set_offline(card->gdev);
-}
-
static const char *qeth_get_cardname(struct qeth_card *card)
{
if (IS_VM_NIC(card)) {
@@ -202,6 +193,9 @@ static void qeth_clear_working_pool_list(struct qeth_card *card)
&card->qdio.in_buf_pool.entry_list, list)
list_del(&pool_entry->list);
+ if (!queue)
+ return;
+
for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
queue->bufs[i].pool_entry = NULL;
}
@@ -792,10 +786,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_STOPLAN:
if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
dev_err(&card->gdev->dev,
- "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
+ "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
netdev_name(card->dev));
- schedule_work(&card->close_dev_work);
+ /* Set offline, then probably fail to set online: */
+ qeth_schedule_recovery(card);
} else {
+ /* stay online for subsequent STARTLAN */
dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID 0x%X failed\n",
netdev_name(card->dev), card->info.chpid);
@@ -1537,7 +1533,6 @@ static void qeth_setup_card(struct qeth_card *card)
INIT_LIST_HEAD(&card->ipato.entries);
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
- INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
hash_init(card->rx_mode_addrs);
hash_init(card->local_addrs4);
hash_init(card->local_addrs6);
@@ -4380,7 +4375,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
!(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
return -EADDRNOTAVAIL;
- ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
+ eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr);
return 0;
}
@@ -5051,7 +5046,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "badmac");
QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
} else {
- ether_addr_copy(card->dev->dev_addr, response->mac);
+ eth_hw_addr_set(card->dev, response->mac);
}
out:
@@ -5519,7 +5514,8 @@ static int qeth_do_reset(void *data)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
} else {
- ccwgroup_set_offline(card->gdev);
+ qeth_set_offline(card, disc, true);
+ ccwgroup_set_offline(card->gdev, false);
dev_warn(&card->gdev->dev,
"The qeth device driver failed to recover an error on the device\n");
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 72e84ff9fea5..5b6187f2d9d6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -71,7 +71,7 @@ static int qeth_l2_send_setdelmac_cb(struct qeth_card *card,
return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code);
}
-static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
+static int qeth_l2_send_setdelmac(struct qeth_card *card, const __u8 *mac,
enum qeth_ipa_cmds ipacmd)
{
struct qeth_ipa_cmd *cmd;
@@ -88,7 +88,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL);
}
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_send_setmac(struct qeth_card *card, const __u8 *mac)
{
int rc;
@@ -377,7 +377,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
if (rc)
return rc;
ether_addr_copy(old_addr, dev->dev_addr);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
if (card->info.dev_addr_is_registered)
qeth_l2_remove_mac(card, old_addr);
@@ -2307,7 +2307,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED) {
priv = netdev_priv(card->dev);
if (priv->brport_features & BR_LEARNING_SYNC) {
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3a523e700a5a..e6e921310211 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -913,8 +913,7 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr))
return -EADDRNOTAVAIL;
- ether_addr_copy(card->dev->dev_addr,
- cmd->data.create_destroy_addr.mac_addr);
+ eth_hw_addr_set(card->dev, cmd->data.create_destroy_addr.mac_addr);
return 0;
}
@@ -1969,7 +1968,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(card->dev);
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index f34badc75196..9f64133f976a 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -10,17 +10,6 @@ config SCSI_ACORNSCSI_3
This enables support for the Acorn SCSI card (aka30). If you have an
Acorn system with one of these, say Y. If unsure, say N.
-config SCSI_ACORNSCSI_TAGGED_QUEUE
- bool "Support SCSI 2 Tagged queueing"
- depends on SCSI_ACORNSCSI_3
- help
- Say Y here to enable tagged queuing support on the Acorn SCSI card.
-
- This is a feature of SCSI-2 which improves performance: the host
- adapter can send several SCSI commands to a device's queue even if
- previous commands haven't finished yet. Some SCSI devices don't
- implement this properly, so the safe answer is N.
-
config SCSI_ACORNSCSI_SYNC
bool "Support SCSI 2 Synchronous Transfers"
depends on SCSI_ACORNSCSI_3
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 4a84599ff491..0cc62c1b0825 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -52,12 +52,8 @@
* You can tell if you have a device that supports tagged queueing my
* cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported
* as '2 TAG'.
- *
- * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config
- * scripts, but disabled here. Once debugged, remove the #undef, otherwise to debug,
- * comment out the undef.
*/
-#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+
/*
* SCSI-II Synchronous transfer support.
*
@@ -171,7 +167,7 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
unsigned int result);
static int acornscsi_reconnect_finish(AS_Host *host);
static void acornscsi_dma_cleanup(AS_Host *host);
-static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);
+static void acornscsi_abortcmd(AS_Host *host);
/* ====================================================================================
* Miscellaneous
@@ -741,17 +737,6 @@ intr_ret_t acornscsi_kick(AS_Host *host)
#endif
if (from_queue) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- /*
- * tagged queueing - allocate a new tag to this command
- */
- if (SCpnt->device->simple_tags) {
- SCpnt->device->current_tag += 1;
- if (SCpnt->device->current_tag == 0)
- SCpnt->device->current_tag = 1;
- SCpnt->tag = SCpnt->device->current_tag;
- } else
-#endif
set_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x07), host->busyluns);
@@ -1192,7 +1177,7 @@ void acornscsi_dma_intr(AS_Host *host)
* the device recognises the attention.
*/
if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
dmac_write(host, DMAC_TXCNTLO, 0);
dmac_write(host, DMAC_TXCNTHI, 0);
@@ -1560,23 +1545,6 @@ void acornscsi_message(AS_Host *host)
acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
switch (host->scsi.last_message) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- case HEAD_OF_QUEUE_TAG:
- case ORDERED_QUEUE_TAG:
- case SIMPLE_QUEUE_TAG:
- /*
- * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17)
- * If a target does not implement tagged queuing and a queue tag
- * message is received, it shall respond with a MESSAGE REJECT
- * message and accept the I/O process as if it were untagged.
- */
- printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n",
- host->host->host_no, acornscsi_target(host));
- host->SCpnt->device->simple_tags = 0;
- set_bit(host->SCpnt->device->id * 8 +
- (u8)(host->SCpnt->device->lun & 0x7), host->busyluns);
- break;
-#endif
case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8):
/*
* Target can't handle synchronous transfers
@@ -1687,24 +1655,11 @@ void acornscsi_buildmessages(AS_Host *host)
#if 0
/* does the device need the current command aborted */
if (cmd_aborted) {
- acornscsi_abortcmd(host->SCpnt->tag);
+ acornscsi_abortcmd(host);
return;
}
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- if (host->SCpnt->tag) {
- unsigned int tag_type;
-
- if (host->SCpnt->cmnd[0] == REQUEST_SENSE ||
- host->SCpnt->cmnd[0] == TEST_UNIT_READY ||
- host->SCpnt->cmnd[0] == INQUIRY)
- tag_type = HEAD_OF_QUEUE_TAG;
- else
- tag_type = SIMPLE_QUEUE_TAG;
- msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag);
- }
-#endif
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) {
@@ -1798,7 +1753,7 @@ int acornscsi_reconnect(AS_Host *host)
"to reconnect with\n",
host->host->host_no, '0' + target);
acornscsi_dumplog(host, target);
- acornscsi_abortcmd(host, 0);
+ acornscsi_abortcmd(host);
if (host->SCpnt) {
queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);
host->SCpnt = NULL;
@@ -1821,7 +1776,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
host->scsi.disconnectable = 0;
if (host->SCpnt->device->id == host->scsi.reconnected.target &&
host->SCpnt->device->lun == host->scsi.reconnected.lun &&
- host->SCpnt->tag == host->scsi.reconnected.tag) {
+ scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) {
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
host->host->host_no, acornscsi_target(host)));
@@ -1848,7 +1803,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
}
if (!host->SCpnt)
- acornscsi_abortcmd(host, host->scsi.reconnected.tag);
+ acornscsi_abortcmd(host);
else {
/*
* Restore data pointer from SAVED pointers.
@@ -1889,21 +1844,15 @@ void acornscsi_disconnect_unexpected(AS_Host *host)
* Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag)
* Purpose : abort a currently executing command
* Params : host - host with connected command to abort
- * tag - tag to abort
*/
static
-void acornscsi_abortcmd(AS_Host *host, unsigned char tag)
+void acornscsi_abortcmd(AS_Host *host)
{
host->scsi.phase = PHASE_ABORTED;
sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);
msgqueue_flush(&host->scsi.msgs);
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- if (tag)
- msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag);
- else
-#endif
- msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
+ msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
}
/* ==========================================================================================
@@ -1993,7 +1942,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n",
host->host->host_no, acornscsi_target(host), ssr);
acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
}
return INTR_PROCESSING;
@@ -2029,7 +1978,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n",
host->host->host_no, acornscsi_target(host), ssr);
acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
}
return INTR_PROCESSING;
@@ -2075,20 +2024,20 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x18: /* -> PHASE_DATAOUT */
/* COMMAND -> DATA OUT */
if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
acornscsi_dma_setup(host, DMA_OUT);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAOUT;
return INTR_IDLE;
case 0x19: /* -> PHASE_DATAIN */
/* COMMAND -> DATA IN */
if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
acornscsi_dma_setup(host, DMA_IN);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAIN;
return INTR_IDLE;
@@ -2156,7 +2105,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
/* MESSAGE IN -> DATA OUT */
acornscsi_dma_setup(host, DMA_OUT);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAOUT;
return INTR_IDLE;
@@ -2165,7 +2114,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
/* MESSAGE IN -> DATA IN */
acornscsi_dma_setup(host, DMA_IN);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAIN;
return INTR_IDLE;
@@ -2206,7 +2155,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
switch (ssr) {
case 0x19: /* -> PHASE_DATAIN */
case 0x89: /* -> PHASE_DATAIN */
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
return INTR_IDLE;
case 0x1b: /* -> PHASE_STATUSIN */
@@ -2255,7 +2204,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
switch (ssr) {
case 0x18: /* -> PHASE_DATAOUT */
case 0x88: /* -> PHASE_DATAOUT */
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
return INTR_IDLE;
case 0x1b: /* -> PHASE_STATUSIN */
@@ -2482,7 +2431,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
SCpnt->scsi_done = done;
SCpnt->host_scribble = NULL;
SCpnt->result = 0;
- SCpnt->tag = 0;
SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
SCpnt->SCp.sent_command = 0;
SCpnt->SCp.scsi_xferred = 0;
@@ -2581,7 +2529,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
break;
default:
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
res = res_snooze;
}
local_irq_restore(flags);
@@ -2747,9 +2695,6 @@ char *acornscsi_info(struct Scsi_Host *host)
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
" SYNC"
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- " TAG"
-#endif
#if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")"
#endif
@@ -2770,9 +2715,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
" SYNC"
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- " TAG"
-#endif
#if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")"
#endif
@@ -2827,9 +2769,8 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "Device/Lun TaggedQ Sync\n");
seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
- seq_printf(m, "%3sabled(%3d) ",
- scd->simple_tags ? "en" : "dis",
- scd->current_tag);
+ seq_printf(m, "%3sabled ",
+ scd->simple_tags ? "en" : "dis");
else
seq_printf(m, "unsupported ");
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 9c4458a99025..cf71ef488e36 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -77,7 +77,6 @@
* I was thinking that this was a good chip until I found this restriction ;(
*/
#define SCSI2_SYNC
-#undef SCSI2_TAG
#undef DEBUG_CONNECT
#undef DEBUG_MESSAGES
@@ -990,7 +989,7 @@ fas216_reselected_intr(FAS216_Info *info)
info->scsi.disconnectable = 0;
if (info->SCpnt->device->id == target &&
info->SCpnt->device->lun == lun &&
- info->SCpnt->tag == tag) {
+ scsi_cmd_to_rq(info->SCpnt)->tag == tag) {
fas216_log(info, LOG_CONNECT, "reconnected previously executing command");
} else {
queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt);
@@ -1791,8 +1790,9 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
/*
* add tag message if required
*/
- if (SCpnt->tag)
- msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag);
+ if (SCpnt->device->simple_tags)
+ msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG,
+ scsi_cmd_to_rq(SCpnt)->tag);
do {
#ifdef SCSI2_SYNC
@@ -1815,20 +1815,8 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)
{
-#ifdef SCSI2_TAG
- /*
- * tagged queuing - allocate a new tag to this command
- */
- if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE &&
- SCpnt->cmnd[0] != INQUIRY) {
- SCpnt->device->current_tag += 1;
- if (SCpnt->device->current_tag == 0)
- SCpnt->device->current_tag = 1;
- SCpnt->tag = SCpnt->device->current_tag;
- } else
-#endif
- set_bit(SCpnt->device->id * 8 +
- (u8)(SCpnt->device->lun & 0x7), info->busyluns);
+ set_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);
info->stats.removes += 1;
switch (SCpnt->cmnd[0]) {
@@ -2117,7 +2105,6 @@ request_sense:
init_SCp(SCpnt);
SCpnt->SCp.Message = 0;
SCpnt->SCp.Status = 0;
- SCpnt->tag = 0;
SCpnt->host_scribble = (void *)fas216_rq_sns_done;
/*
@@ -2223,7 +2210,6 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
init_SCp(SCpnt);
info->stats.queues += 1;
- SCpnt->tag = 0;
spin_lock(&info->host_lock);
@@ -3003,9 +2989,8 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
dev = &info->device[scd->id];
seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
- seq_printf(m, "%3sabled(%3d) ",
- scd->simple_tags ? "en" : "dis",
- scd->current_tag);
+ seq_printf(m, "%3sabled ",
+ scd->simple_tags ? "en" : "dis");
else
seq_puts(m, "unsupported ");
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
index e5559f27669d..c6f71a7d1b8e 100644
--- a/drivers/scsi/arm/queue.c
+++ b/drivers/scsi/arm/queue.c
@@ -214,7 +214,7 @@ struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
list_for_each(l, &queue->head) {
QE_t *q = list_entry(l, QE_t, list);
if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun &&
- q->SCpnt->tag == tag) {
+ scsi_cmd_to_rq(q->SCpnt)->tag == tag) {
SCpnt = __queue_remove(queue, l);
break;
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 390b07bf92b9..ccbded3353bd 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1254,3 +1254,4 @@ MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
MODULE_FIRMWARE(FW_FNAME_T6);
+MODULE_SOFTDEP("pre: cxgb4");
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index bb3b460dc0bc..4d73e92909ab 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -880,11 +880,11 @@ efct_lio_npiv_drop_nport(struct se_wwn *wwn)
struct efct *efct = lio_vport->efct;
unsigned long flags = 0;
- spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
-
if (lio_vport->fc_vport)
fc_vport_terminate(lio_vport->fc_vport);
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+
list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
list_entry) {
if (vport->lio_vport == lio_vport) {
diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c
index 40fb3a724c76..cf2e41dd354c 100644
--- a/drivers/scsi/elx/efct/efct_scsi.c
+++ b/drivers/scsi/elx/efct/efct_scsi.c
@@ -32,7 +32,7 @@ efct_scsi_io_alloc(struct efct_node *node)
struct efct *efct;
struct efct_xport *xport;
struct efct_io *io;
- unsigned long flags = 0;
+ unsigned long flags;
efct = node->efct;
@@ -44,7 +44,6 @@ efct_scsi_io_alloc(struct efct_node *node)
if (!io) {
efc_log_err(efct, "IO alloc Failed\n");
atomic_add_return(1, &xport->io_alloc_failed_count);
- spin_unlock_irqrestore(&node->active_ios_lock, flags);
return NULL;
}
diff --git a/drivers/scsi/elx/libefc/efc_device.c b/drivers/scsi/elx/libefc/efc_device.c
index 725ca2a23fb2..52be01333c6e 100644
--- a/drivers/scsi/elx/libefc/efc_device.c
+++ b/drivers/scsi/elx/libefc/efc_device.c
@@ -928,22 +928,21 @@ __efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
break;
case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
- enum efc_nport_topology topology =
- (enum efc_nport_topology)arg;
+ enum efc_nport_topology *topology = arg;
WARN_ON(node->nport->domain->attached);
WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
node_printf(node, "topology notification, topology=%d\n",
- topology);
+ *topology);
/* At the time the PLOGI was received, the topology was unknown,
* so we didn't know which node would perform the domain attach:
* 1. The node from which the PLOGI was sent (p2p) or
* 2. The node to which the FLOGI was sent (fabric).
*/
- if (topology == EFC_NPORT_TOPO_P2P) {
+ if (*topology == EFC_NPORT_TOPO_P2P) {
/* if this is p2p, need to attach to the domain using
* the d_id from the PLOGI received
*/
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
index d397220d9e54..3270ce40196c 100644
--- a/drivers/scsi/elx/libefc/efc_fabric.c
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -107,7 +107,6 @@ void
efc_fabric_notify_topology(struct efc_node *node)
{
struct efc_node *tmp_node;
- enum efc_nport_topology topology = node->nport->topology;
unsigned long index;
/*
@@ -118,7 +117,7 @@ efc_fabric_notify_topology(struct efc_node *node)
if (tmp_node != node) {
efc_node_post_event(tmp_node,
EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
- (void *)topology);
+ &node->nport->topology);
}
}
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4683c183e9d4..5bc91d34df63 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2281,11 +2281,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return FAILED;
}
- conn = session->leadconn;
- iscsi_get_conn(conn->cls_conn);
- conn->eh_abort_cnt++;
- age = session->age;
-
spin_lock(&session->back_lock);
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task || !task->sc) {
@@ -2293,8 +2288,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
spin_unlock(&session->back_lock);
- goto success;
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
}
+
+ conn = session->leadconn;
+ iscsi_get_conn(conn->cls_conn);
+ conn->eh_abort_cnt++;
+ age = session->age;
+
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
__iscsi_get_task(task);
spin_unlock(&session->back_lock);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b35bf70a8c0d..ebe417921dac 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -285,11 +285,8 @@ buffer_done:
"6312 Catching potential buffer "
"overflow > PAGE_SIZE = %lu bytes\n",
PAGE_SIZE);
- strscpy(buf + PAGE_SIZE - 1 -
- strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1),
- LPFC_INFO_MORE_STR,
- strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1)
- + 1);
+ strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
+ LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1);
}
return len;
}
@@ -6204,7 +6201,8 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
- len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "Cfg: %d SCSI: %d NVME: %d\n",
phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt);
return len;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 1254a575fd47..052c0e5b1119 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4015,11 +4015,11 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
be32_to_cpu(pcgd->desc_tag),
be32_to_cpu(pcgd->desc_len),
be32_to_cpu(pcgd->xmt_signal_capability),
- be32_to_cpu(pcgd->xmt_signal_frequency.count),
- be32_to_cpu(pcgd->xmt_signal_frequency.units),
+ be16_to_cpu(pcgd->xmt_signal_frequency.count),
+ be16_to_cpu(pcgd->xmt_signal_frequency.units),
be32_to_cpu(pcgd->rcv_signal_capability),
- be32_to_cpu(pcgd->rcv_signal_frequency.count),
- be32_to_cpu(pcgd->rcv_signal_frequency.units));
+ be16_to_cpu(pcgd->rcv_signal_frequency.count),
+ be16_to_cpu(pcgd->rcv_signal_frequency.units));
/* Compare driver and Fport capabilities and choose
* least common.
@@ -9387,7 +9387,7 @@ lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
/* Extract the next WWPN from the payload */
wwn = *wwnlist++;
wwpn = be64_to_cpu(wwn);
- len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ,
+ len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
" %016llx", wwpn);
/* Log a message if we are on the last WWPN
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 79a4872c2edb..7359505e6041 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1167,7 +1167,7 @@ struct lpfc_mbx_read_object { /* Version 0 */
#define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF
#define lpfc_mbx_rd_object_rlen_WORD word0
uint32_t rd_object_offset;
- uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
+ __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
#define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */
uint32_t rd_object_cnt;
struct lpfc_mbx_host_buf rd_object_hbuf[4];
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0ec322f0e3cb..195169badb37 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5518,7 +5518,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
if (phba->cgn_fpin_frequency &&
phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
- cp->cgn_stat_npm = cpu_to_le32(value);
+ cp->cgn_stat_npm = value;
}
value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
LPFC_CGN_CRC32_SEED);
@@ -5547,9 +5547,9 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
uint32_t mbps;
uint32_t dvalue, wvalue, lvalue, avalue;
uint64_t latsum;
- uint16_t *ptr;
- uint32_t *lptr;
- uint16_t *mptr;
+ __le16 *ptr;
+ __le32 *lptr;
+ __le16 *mptr;
/* Make sure we have a congestion info buffer */
if (!phba->cgn_i)
@@ -5570,7 +5570,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
if (phba->cgn_fpin_frequency &&
phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
- cp->cgn_stat_npm = cpu_to_le32(value);
+ cp->cgn_stat_npm = value;
}
/* Read and clear the latency counters for this minute */
@@ -5753,7 +5753,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
- mbps += le32_to_cpu(cp->cgn_bw_hr[i]);
+ mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
}
if (lvalue) /* Avg of latency averages */
@@ -8277,11 +8277,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
out_free_hba_hdwq_info:
- free_percpu(phba->sli4_hba.c_stat);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ free_percpu(phba->sli4_hba.c_stat);
out_free_hba_idle_stat:
- kfree(phba->sli4_hba.idle_stat);
#endif
+ kfree(phba->sli4_hba.idle_stat);
out_free_hba_eq_info:
free_percpu(phba->sli4_hba.eq_info);
out_free_hba_cpu_map:
@@ -13411,8 +13411,8 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
/* last used Index initialized to 0xff already */
- cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ;
- cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ;
+ cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
+ cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
cp->cgn_info_crc = cpu_to_le32(crc);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 73a3568ff17e..479b3eed6208 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1489,9 +1489,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_qhandle *lpfc_queue_info;
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct nvme_common_command *sqe;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0;
-#endif
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 0fde1e874c7a..befdf864c43b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1495,7 +1495,6 @@ static int
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint8_t *txop, uint8_t *rxop)
{
- uint8_t ret = 0;
if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
switch (scsi_get_prot_op(sc)) {
@@ -1548,7 +1547,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
}
- return ret;
+ return 0;
}
#endif
@@ -5578,12 +5577,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx;
u8 *uuid = NULL;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint64_t start = 0L;
+ uint64_t start;
- if (phba->ktime_on)
- start = ktime_get_ns();
-#endif
start = ktime_get_ns();
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index ffd8a140638c..026a1196a54d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -12292,12 +12292,12 @@ void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = NULL;
IOCB_t *irsp = &rspiocb->iocb;
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "0139 Ignoring ELS cmd tag x%x completion Data: "
+ "0139 Ignoring ELS cmd code x%x completion Data: "
"x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
@@ -12305,10 +12305,13 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
* if exchange is busy.
*/
- if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+ if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+ ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
- else
+ } else {
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
lpfc_els_free_iocb(phba, cmdiocb);
+ }
lpfc_nlp_put(ndlp);
}
@@ -22090,6 +22093,7 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
struct lpfc_dmabuf *pcmd;
+ u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
/* sanity check on queue memory */
if (!datap)
@@ -22113,10 +22117,10 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
memset((void *)read_object->u.request.rd_object_name, 0,
LPFC_OBJ_NAME_SZ);
- sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject);
+ scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
for (j = 0; j < strlen(rdobject); j++)
read_object->u.request.rd_object_name[j] =
- cpu_to_le32(read_object->u.request.rd_object_name[j]);
+ cpu_to_le32(rd_object_name[j]);
pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
if (pcmd)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e4298bf4a482..39d8754e63ac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1916,7 +1916,7 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
raid = MR_LdRaidGet(ld, local_map_ptr);
if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
- blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+ blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
mr_device_priv_data->is_tm_capable =
raid->capability.tmCapable;
@@ -8033,7 +8033,7 @@ skip_firing_dcmds:
if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+ pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) *
(MAX_PHYSICAL_DEVICES - 1));
for (i = 0; i < 2 ; i++) {
@@ -8773,8 +8773,7 @@ int megasas_update_device_list(struct megasas_instance *instance,
if (event_type & SCAN_VD_CHANNEL) {
if (!instance->requestorId ||
- (instance->requestorId &&
- megasas_get_ld_vf_affiliation(instance, 0))) {
+ megasas_get_ld_vf_affiliation(instance, 0)) {
dcmd_ret = megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
if (dcmd_ret != DCMD_SUCCESS)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 6c82435bc9cc..27eb652b564f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1582,8 +1582,10 @@ mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
* wait for current poll to complete.
*/
for (qid = 0; qid < iopoll_q_count; qid++) {
- while (atomic_read(&ioc->io_uring_poll_queues[qid].busy))
+ while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
+ cpu_relax();
udelay(500);
+ }
}
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 770b241d7bb2..1b79f01f03a4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -2178,7 +2178,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
mpt3sas_check_cmd_timeout(ioc,
ioc->ctl_cmds.status, mpi_request,
sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
- *issue_reset = reset_needed;
+ *issue_reset = reset_needed;
rc = -EFAULT;
goto out;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 2f82b1e629af..d383d4a03436 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -10749,8 +10749,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_pcie_topology_change_event(ioc, fw_event);
ioc->current_event = NULL;
- return;
- break;
+ return;
}
out:
fw_event_work_put(fw_event);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 7a4f5d4dd670..2b8c6fa5e775 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1939,11 +1939,8 @@ static void ncr_start_next_ccb (struct ncb *np, struct lcb * lp, int maxn);
static void ncr_put_start_queue(struct ncb *np, struct ccb *cp);
static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd);
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd);
static void process_waiting_list(struct ncb *np, int sts);
-#define remove_from_waiting_list(np, cmd) \
- retrieve_from_waiting_list(1, (np), (cmd))
#define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
#define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
@@ -7997,26 +7994,6 @@ static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd)
}
}
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd)
-{
- struct scsi_cmnd **pcmd = &np->waiting_list;
-
- while (*pcmd) {
- if (cmd == *pcmd) {
- if (to_remove) {
- *pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
- cmd->next_wcmd = NULL;
- }
-#ifdef DEBUG_WAITING_LIST
- printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
-#endif
- return cmd;
- }
- pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
- }
- return NULL;
-}
-
static void process_waiting_list(struct ncb *np, int sts)
{
struct scsi_cmnd *waiting_list, *wcmd;
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 747af96dd15c..e8bc8d9e4583 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -22,9 +22,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
u32 task_retry_id,
u8 fcp_cmd_payload[32])
{
- struct e4_fcoe_task_context *ctx = task_params->context;
+ struct fcoe_task_context *ctx = task_params->context;
const u8 val_byte = ctx->ystorm_ag_context.byte0;
- struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
@@ -115,9 +115,9 @@ int init_initiator_midpath_unsolicited_fcoe_task(
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header)
{
- struct e4_fcoe_task_context *ctx = task_params->context;
+ struct fcoe_task_context *ctx = task_params->context;
const u8 val_byte = ctx->ystorm_ag_context.byte0;
- struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index 1ee31a5f063b..7125e484bf93 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -10,7 +10,7 @@
struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */
- struct e4_fcoe_task_context *context;
+ struct fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index ba94413fe2ea..631a15969d21 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -141,7 +141,7 @@ struct qedf_ioreq {
struct completion tm_done;
struct completion abts_done;
struct completion cleanup_done;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
int idx;
@@ -503,7 +503,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 625e58ccb8c8..1ff5bc314fc0 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -16,7 +16,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
int rc = 0;
uint32_t did, sid;
uint16_t xid;
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 3404782988d5..b649f835d436 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -584,7 +584,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
+ struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
@@ -602,7 +602,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
/* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
- memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
@@ -674,7 +674,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
@@ -692,7 +692,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
@@ -850,7 +850,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc_cmd->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct e4_fcoe_task_context *task_ctx;
+ struct fcoe_task_context *task_ctx;
u16 xid;
struct fcoe_wqe *sqe;
u16 sqe_idx;
@@ -2293,7 +2293,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
int rc = 0;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 42d0d941dba5..0da32fd3302e 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2170,7 +2170,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
struct qedf_ctx *qedf = fp->qedf;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
u16 prod_idx;
/* Get the pointer to the global CQ this completion is on */
@@ -2197,7 +2197,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
{
struct qedf_ctx *qedf = fp->qedf;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
struct global_queue *que;
u16 prod_idx;
struct fcoe_cqe *cqe;
@@ -2688,12 +2688,12 @@ void qedf_fp_io_handler(struct work_struct *work)
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
- sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
+ sizeof(struct status_block), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
QEDF_ERR(&qedf->dbg_ctx,
@@ -3416,7 +3416,9 @@ retry_probe:
qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
if (IS_ERR(qedf->devlink)) {
QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
+ rc = PTR_ERR(qedf->devlink);
qedf->devlink = NULL;
+ goto err2;
}
}
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 42f5afb60055..8deb2001dc2f 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -136,7 +136,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
{
struct qedi_fastpath *fp = NULL;
struct qed_sb_info *sb_info = NULL;
- struct status_block_e4 *sb = NULL;
+ struct status_block *sb = NULL;
struct global_queue *que = NULL;
int id;
u16 prod_idx;
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
sb_info = fp->sb_info;
sb = sb_info->sb_virt;
prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
- STATUS_BLOCK_E4_PROD_INDEX_MASK);
+ STATUS_BLOCK_PROD_INDEX_MASK);
seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
que = qedi->global_queues[fp->sb_id];
seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index d01cd829ef97..84a4204a2cb4 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -85,7 +85,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct e4_iscsi_task_context *task_ctx;
+ struct iscsi_task_context *task_ctx;
struct iscsi_text_rsp *resp_hdr_ptr;
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
@@ -261,7 +261,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct e4_iscsi_task_context *task_ctx;
+ struct iscsi_task_context *task_ctx;
struct iscsi_login_rsp *resp_hdr_ptr;
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
@@ -970,7 +970,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
struct scsi_sge *resp_sge = NULL;
@@ -990,9 +990,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1073,7 +1073,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct qedi_cmd *qedi_cmd;
@@ -1091,9 +1091,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1434,7 +1434,7 @@ static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_tm *tmf_hdr;
struct qedi_cmd *qedi_cmd;
struct qedi_cmd *cmd;
@@ -1454,9 +1454,9 @@ static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1548,7 +1548,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
struct scsi_sge *req_sge = NULL;
@@ -1570,9 +1570,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1649,7 +1649,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
@@ -1669,9 +1669,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1991,7 +1991,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct iscsi_task_params task_params;
struct iscsi_conn_params conn_params;
struct scsi_initiator_cmd_params cmd_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2014,9 +2014,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
cmd->task_id = tid;
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 52772904ef5d..642556a1ce1c 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -202,7 +202,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
struct data_hdr *pdu_header,
enum iscsi_task_type task_type)
{
- struct e4_iscsi_task_context *context;
+ struct iscsi_task_context *context;
u32 val;
u16 index;
u8 val_byte;
@@ -224,7 +224,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
cpu_to_le16(task_params->conn_icid);
SET_FIELD(context->ustorm_ag_context.flags1,
- E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
context->ustorm_st_context.task_type = task_type;
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -254,7 +254,7 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
static
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
- struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
u32 remaining_recv_len, u32 expected_data_transfer_len,
u8 num_sges, bool tx_dif_conn_err_en)
{
@@ -266,12 +266,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
ustorm_st_cxt->exp_data_transfer_len = val;
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
SET_FIELD(ustorm_ag_cxt->flags2,
- E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
tx_dif_conn_err_en ? 1 : 0);
}
static
-void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
struct iscsi_conn_params *conn_params,
enum iscsi_task_type task_type,
u32 task_size,
@@ -470,7 +470,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
}
}
-static void set_local_completion_context(struct e4_iscsi_task_context *context)
+static void set_local_completion_context(struct iscsi_task_context *context)
{
SET_FIELD(context->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -487,7 +487,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
struct scsi_dif_task_params *dif_task_params)
{
u32 exp_data_transfer_len = conn_params->max_burst_length;
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
bool slow_io = false;
u32 task_size, val;
u8 num_sges = 0;
@@ -615,7 +615,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -657,7 +657,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -703,7 +703,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -758,7 +758,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index 10f19f0af0a3..df2d471a7b51 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -10,7 +10,7 @@
#include "qedi_fw_scsi.h"
struct iscsi_task_params {
- struct e4_iscsi_task_context *context;
+ struct iscsi_task_context *context;
struct iscsi_wqe *sqe;
u32 tx_io_size;
u32 rx_io_size;
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index a31c5de74754..a282860da0aa 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -182,7 +182,7 @@ struct qedi_cmd {
struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg;
struct qedi_io_bdt io_tbl;
- struct e4_iscsi_task_context request;
+ struct iscsi_task_context request;
unsigned char *sense_buffer;
dma_addr_t sense_buffer_dma;
u16 task_id;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e6dc0b495a82..1dec814d8788 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -351,12 +351,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(struct status_block_e4), &sb_phys,
+ sizeof(struct status_block), &sb_phys,
GFP_KERNEL);
if (!sb_virt) {
QEDI_ERR(&qedi->dbg_ctx,
@@ -865,7 +865,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
- qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
+ qedi->pf_params.iscsi_pf_params.two_msl_timer = QED_TWO_MSL_TIMER_DFLT;
+ qedi->pf_params.iscsi_pf_params.tx_sws_timer = QED_TX_SWS_TIMER_DFLT;
qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
@@ -1259,7 +1260,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
{
struct qedi_ctx *qedi = fp->qedi;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
struct qedi_percpu_s *p = NULL;
struct global_queue *que;
u16 prod_idx;
@@ -1315,7 +1316,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
struct qedi_ctx *qedi = fp->qedi;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
u16 prod_idx;
barrier();
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1e4e3e83b5c7..5fc7697f0af4 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -7169,7 +7169,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
return 0;
break;
case QLA2XXX_INI_MODE_DUAL:
- if (!qla_dual_mode_enabled(vha))
+ if (!qla_dual_mode_enabled(vha) &&
+ !qla_ini_mode_enabled(vha))
return 0;
break;
case QLA2XXX_INI_MODE_ENABLED:
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ece60267b971..b26f2699adb2 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2634,7 +2634,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (unlikely(logit))
- ql_log(ql_log_warn, fcport->vha, 0x5060,
+ ql_log(ql_dbg_io, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status,
fd->transferred_length, le32_to_cpu(sts->residual_len),
@@ -3491,7 +3491,7 @@ check_scsi_status:
out:
if (logit)
- ql_log(ql_log_warn, fcport->vha, 0x3022,
+ ql_log(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index d8b05d8b5470..922e4c7bd88e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -441,9 +441,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct iscsi_transport *t = iface->transport;
int param = -1;
- if (attr == &dev_attr_iface_enabled.attr)
- param = ISCSI_NET_PARAM_IFACE_ENABLE;
- else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+ if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
else if (attr == &dev_attr_iface_header_digest.attr)
param = ISCSI_IFACE_PARAM_HDRDGST_EN;
@@ -483,7 +481,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
if (param != -1)
return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
- if (attr == &dev_attr_iface_vlan_id.attr)
+ if (attr == &dev_attr_iface_enabled.attr)
+ param = ISCSI_NET_PARAM_IFACE_ENABLE;
+ else if (attr == &dev_attr_iface_vlan_id.attr)
param = ISCSI_NET_PARAM_VLAN_ID;
else if (attr == &dev_attr_iface_vlan_priority.attr)
param = ISCSI_NET_PARAM_VLAN_PRIORITY;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cbd9999f93a6..523bf2fdc253 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2124,6 +2124,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
retries = 0;
do {
+ bool media_was_present = sdkp->media_present;
+
cmd[0] = TEST_UNIT_READY;
memset((void *) &cmd[1], 0, 9);
@@ -2138,7 +2140,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
* with any more polling.
*/
if (media_not_present(sdkp, &sshdr)) {
- sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
+ if (media_was_present)
+ sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
return;
}
@@ -3401,15 +3404,16 @@ static int sd_probe(struct device *dev)
}
device_initialize(&sdkp->dev);
- sdkp->dev.parent = dev;
+ sdkp->dev.parent = get_device(dev);
sdkp->dev.class = &sd_disk_class;
dev_set_name(&sdkp->dev, "%s", dev_name(dev));
error = device_add(&sdkp->dev);
- if (error)
- goto out_free_index;
+ if (error) {
+ put_device(&sdkp->dev);
+ goto out;
+ }
- get_device(dev);
dev_set_drvdata(dev, sdkp);
gd->major = sd_major((index & 0xf0) >> 4);
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index b9757f24b0d6..ed06798983f8 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -154,8 +154,8 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
/*
* Report zone buffer size should be at most 64B times the number of
- * zones requested plus the 64B reply header, but should be at least
- * SECTOR_SIZE for ATA devices.
+ * zones requested plus the 64B reply header, but should be aligned
+ * to SECTOR_SIZE for ATA devices.
* Make sure that this size does not exceed the hardware capabilities.
* Furthermore, since the report zone command cannot be split, make
* sure that the allocated buffer can always be mapped by limiting the
@@ -174,7 +174,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
*buflen = bufsize;
return buf;
}
- bufsize >>= 1;
+ bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
}
return NULL;
@@ -280,7 +280,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
{
struct scsi_disk *sdkp;
unsigned long flags;
- unsigned int zno;
+ sector_t zno;
int ret;
sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index c2afba2a5414..0a1734f34587 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -87,9 +87,16 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
0
};
unsigned char recv_page_code;
+ unsigned int retries = SES_RETRIES;
+ struct scsi_sense_hdr sshdr;
+
+ do {
+ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+ &sshdr, SES_TIMEOUT, 1, NULL);
+ } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
+ (sshdr.sense_key == NOT_READY ||
+ (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
- ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
- NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (unlikely(ret))
return ret;
@@ -111,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
static int ses_send_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen)
{
- u32 result;
+ int result;
unsigned char cmd[] = {
SEND_DIAGNOSTIC,
@@ -121,9 +128,16 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
bufflen & 0xff,
0
};
+ struct scsi_sense_hdr sshdr;
+ unsigned int retries = SES_RETRIES;
+
+ do {
+ result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
+ &sshdr, SES_TIMEOUT, 1, NULL);
+ } while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
+ (sshdr.sense_key == NOT_READY ||
+ (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
- result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
- NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (result)
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
result);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 79d9aa2df528..ddd00efc4882 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -523,7 +523,7 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
return rc;
cd->readcd_known = 0;
sr_printk(KERN_INFO, cd,
- "CDROM does'nt support READ CD (0xbe) command\n");
+ "CDROM doesn't support READ CD (0xbe) command\n");
/* fall & retry the other way */
}
/* ... if this fails, we switch the blocksize using MODE SELECT */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9d04929f03a1..ae8636d3780b 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3823,6 +3823,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
case CDROM_SEND_PACKET:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ break;
default:
break;
}
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index b3bcc5c882da..149c1aa09103 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -128,6 +128,81 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
return err;
}
+static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
+{
+ struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
+ int ret;
+
+ pwr_info.lane_rx = lanes;
+ pwr_info.lane_tx = lanes;
+ ret = ufshcd_config_pwr_mode(hba, &pwr_info);
+ if (ret)
+ dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
+ __func__, lanes, ret);
+ return ret;
+}
+
+static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (ufshcd_is_hs_mode(dev_max_params) &&
+ (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
+ ufs_intel_set_lanes(hba, 2);
+ memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
+ break;
+ case POST_CHANGE:
+ if (ufshcd_is_hs_mode(dev_req_params)) {
+ u32 peer_granularity;
+
+ usleep_range(1000, 1250);
+ err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
+{
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ if (granularity == peer_granularity) {
+ u32 new_peer_pa_tactivate = pa_tactivate + 2;
+
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
+ }
+out:
+ return ret;
+}
+
#define INTEL_ACTIVELTR 0x804
#define INTEL_IDLELTR 0x808
@@ -351,6 +426,7 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
struct ufs_host *ufs_host;
int err;
+ hba->nop_out_timeout = 200;
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
hba->caps |= UFSHCD_CAP_CRYPTO;
err = ufs_intel_common_init(hba);
@@ -381,6 +457,8 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.exit = ufs_intel_common_exit,
.hce_enable_notify = ufs_intel_hce_enable_notify,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .pwr_change_notify = ufs_intel_lkf_pwr_change_notify,
+ .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,
.resume = ufs_intel_resume,
.device_reset = ufs_intel_device_reset,
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3841ab49f556..95be7ecdfe10 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -17,8 +17,6 @@
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
#include <scsi/scsi_driver.h>
-#include <scsi/scsi_transport.h>
-#include "../scsi_transport_api.h"
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
@@ -237,6 +235,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -319,8 +318,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
- int off = (int)tag - hba->nutrs;
- struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
+ struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
if (!trace_ufshcd_upiu_enabled())
return;
@@ -2759,8 +2757,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
out:
up_read(&hba->clk_scaling_lock);
- if (ufs_trigger_eh())
- scsi_schedule_eh(hba->host);
+ if (ufs_trigger_eh()) {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_schedule_eh_work(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
return err;
}
@@ -3919,35 +3922,6 @@ out:
}
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
- lockdep_assert_held(hba->host->host_lock);
-
- return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
- (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-static void ufshcd_schedule_eh(struct ufs_hba *hba)
-{
- bool schedule_eh = false;
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- /* handle fatal errors only when link is not in error state */
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
- if (hba->force_reset || ufshcd_is_link_broken(hba) ||
- ufshcd_is_saved_err_fatal(hba))
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
- else
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
- schedule_eh = true;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (schedule_eh)
- scsi_schedule_eh(hba->host);
-}
-
/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
@@ -3968,7 +3942,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{
DECLARE_COMPLETION_ONSTACK(uic_async_done);
unsigned long flags;
- bool schedule_eh = false;
u8 status;
int ret;
bool reenable_intr = false;
@@ -4038,14 +4011,10 @@ out:
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
if (ret) {
ufshcd_set_link_broken(hba);
- schedule_eh = true;
+ ufshcd_schedule_eh_work(hba);
}
-
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (schedule_eh)
- ufshcd_schedule_eh(hba);
mutex_unlock(&hba->uic_cmd_mutex);
return ret;
@@ -4776,7 +4745,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
- NOP_OUT_TIMEOUT);
+ hba->nop_out_timeout);
if (!err || err == -ETIMEDOUT)
break;
@@ -5911,6 +5880,27 @@ out:
return err_handling;
}
+/* host lock must be held before calling this func */
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+/* host lock must be held before calling this func */
+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+{
+ /* handle fatal errors only when link is not in error state */
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba))
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+ else
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+ queue_work(hba->eh_wq, &hba->eh_work);
+ }
+}
+
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
down_write(&hba->clk_scaling_lock);
@@ -6044,11 +6034,11 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
- * @host: SCSI host pointer
+ * @work: pointer to work structure
*/
-static void ufshcd_err_handler(struct Scsi_Host *host)
+static void ufshcd_err_handler(struct work_struct *work)
{
- struct ufs_hba *hba = shost_priv(host);
+ struct ufs_hba *hba;
unsigned long flags;
bool err_xfer = false;
bool err_tm = false;
@@ -6056,9 +6046,10 @@ static void ufshcd_err_handler(struct Scsi_Host *host)
int tag;
bool needs_reset = false, needs_restore = false;
+ hba = container_of(work, struct ufs_hba, eh_work);
+
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
- hba->host->host_eh_scheduled = 0;
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -6371,6 +6362,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
"host_regs: ");
ufshcd_print_pwr_info(hba);
}
+ ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED;
}
/*
@@ -6382,34 +6374,9 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
hba->errors = 0;
hba->uic_error = 0;
spin_unlock(hba->host->host_lock);
-
- if (queue_eh_work)
- ufshcd_schedule_eh(hba);
-
return retval;
}
-struct ctm_info {
- struct ufs_hba *hba;
- unsigned long pending;
- unsigned int ncpl;
-};
-
-static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
-{
- struct ctm_info *const ci = priv;
- struct completion *c;
-
- WARN_ON_ONCE(reserved);
- if (test_bit(req->tag, &ci->pending))
- return true;
- ci->ncpl++;
- c = req->end_io_data;
- if (c)
- complete(c);
- return true;
-}
-
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
@@ -6420,18 +6387,24 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
*/
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
- unsigned long flags;
- struct request_queue *q = hba->tmf_queue;
- struct ctm_info ci = {
- .hba = hba,
- };
+ unsigned long flags, pending, issued;
+ irqreturn_t ret = IRQ_NONE;
+ int tag;
+
+ pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
spin_lock_irqsave(hba->host->host_lock, flags);
- ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+ issued = hba->outstanding_tasks & ~pending;
+ for_each_set_bit(tag, &issued, hba->nutmrs) {
+ struct request *req = hba->tmf_rqs[tag];
+ struct completion *c = req->end_io_data;
+
+ complete(c);
+ ret = IRQ_HANDLED;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
- return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
+ return ret;
}
/**
@@ -6554,9 +6527,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
- blk_mq_start_request(req);
task_tag = req->tag;
+ hba->tmf_rqs[req->tag] = req;
treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
@@ -6597,6 +6570,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
}
spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->tmf_rqs[req->tag] = NULL;
__clear_bit(task_tag, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -6876,7 +6850,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
- __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true);
+ __ufshcd_transfer_req_compl(hba, 1U << pos, false);
}
}
@@ -7048,17 +7022,15 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up
* the lrb taken by this cmd and re-set it in outstanding_reqs,
- * then queue the error handler and bail.
+ * then queue the eh_work and bail.
*/
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
spin_lock_irqsave(host->host_lock, flags);
hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags);
-
- ufshcd_schedule_eh(hba);
-
goto release;
}
@@ -7191,10 +7163,11 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_err_handler(hba->host);
+ flush_work(&hba->eh_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
@@ -8604,6 +8577,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
+ if (hba->eh_wq)
+ destroy_workqueue(hba->eh_wq);
ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
@@ -9448,10 +9423,6 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
}
-static struct scsi_transport_template ufshcd_transport_template = {
- .eh_strategy_handler = ufshcd_err_handler,
-};
-
/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
@@ -9478,11 +9449,11 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
- host->transportt = &ufshcd_transport_template;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
+ hba->nop_out_timeout = NOP_OUT_TIMEOUT;
INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock);
@@ -9517,6 +9488,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
+ char eh_wq_name[sizeof("ufs_eh_wq_00")];
if (!mmio_base) {
dev_err(hba->dev,
@@ -9570,6 +9542,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
+ /* Initialize work queues */
+ snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
+ hba->host->host_no);
+ hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+ if (!hba->eh_wq) {
+ dev_err(hba->dev, "%s: failed to create eh workqueue\n",
+ __func__);
+ err = -ENOMEM;
+ goto out_disable;
+ }
+ INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
sema_init(&hba->host_sem, 1);
@@ -9638,6 +9621,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = PTR_ERR(hba->tmf_queue);
goto free_tmf_tag_set;
}
+ hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
+ sizeof(*hba->tmf_rqs), GFP_KERNEL);
+ if (!hba->tmf_rqs) {
+ err = -ENOMEM;
+ goto free_tmf_queue;
+ }
/* Reset the attached device */
ufshcd_device_reset(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 52ea6f350b18..41f6e06f9185 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -741,6 +741,8 @@ struct ufs_hba_monitor {
* @is_powered: flag to check if HBA is powered
* @shutting_down: flag to check if shutdown has been invoked
* @host_sem: semaphore used to serialize concurrent contexts
+ * @eh_wq: Workqueue that eh_work works on
+ * @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
* @uic_error: UFS interconnect layer error status
@@ -826,6 +828,7 @@ struct ufs_hba {
struct blk_mq_tag_set tmf_tag_set;
struct request_queue *tmf_queue;
+ struct request **tmf_rqs;
struct uic_command *active_uic_cmd;
struct mutex uic_cmd_mutex;
@@ -843,6 +846,8 @@ struct ufs_hba {
struct semaphore host_sem;
/* Work Queues */
+ struct workqueue_struct *eh_wq;
+ struct work_struct eh_work;
struct work_struct eeh_work;
/* HBA Errors */
@@ -858,6 +863,7 @@ struct ufs_hba {
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
ktime_t last_dme_cmd_tstamp;
+ int nop_out_timeout;
/* Keeps information of the UFS device connected to this host */
struct ufs_dev_info dev_info;
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 02fb51ae8b25..589af5f6b940 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -333,9 +333,8 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
}
static void
-ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
- struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
- u8 transfer_len, int read_id)
+ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ __be64 ppn, u8 transfer_len, int read_id)
{
unsigned char *cdb = lrbp->cmd->cmnd;
__be64 ppn_tmp = ppn;
@@ -703,8 +702,7 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
}
}
- ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
- read_id);
+ ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
hpb->stats.hit_cnt++;
return 0;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c25ce8f0e0af..07d0250f17c3 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -300,7 +300,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
}
break;
default:
- pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
+ pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
}
}
@@ -392,7 +392,7 @@ static void virtscsi_handle_event(struct work_struct *work)
virtscsi_handle_param_change(vscsi, event);
break;
default:
- pr_err("Unsupport virtio scsi event %x\n", event->event);
+ pr_err("Unsupported virtio scsi event %x\n", event->event);
}
virtscsi_kick_event(vscsi, event_node);
}
diff --git a/drivers/soc/canaan/Kconfig b/drivers/soc/canaan/Kconfig
index 8179b69518b4..853096b7e84c 100644
--- a/drivers/soc/canaan/Kconfig
+++ b/drivers/soc/canaan/Kconfig
@@ -5,7 +5,6 @@ config SOC_K210_SYSCTL
depends on RISCV && SOC_CANAAN && OF
default SOC_CANAAN
select PM
- select SIMPLE_PM_BUS
select SYSCON
select MFD_SYSCON
help
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 4df32bc4c7a6..07d52cafbb31 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select DIMLIB
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
buffer management facilities for software to interact with
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
index e13fd3ac1939..2fbcb78cdaaf 100644
--- a/drivers/soc/fsl/dpio/dpio-cmd.h
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -46,6 +46,9 @@ struct dpio_rsp_get_attr {
__le64 qbman_portal_ci_addr;
/* cmd word 3 */
__le32 qbman_version;
+ __le32 pad1;
+ /* cmd word 4 */
+ __le32 clk;
};
struct dpio_stashing_dest {
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index 7f397b4ad878..dd948889eeab 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -162,6 +162,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
goto err_get_attr;
}
desc.qman_version = dpio_attrs.qbman_version;
+ desc.qman_clk = dpio_attrs.clk;
err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
if (err) {
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 7351f3030550..3fd0d0840287 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/dim.h>
#include <linux/slab.h>
#include "dpio.h"
@@ -28,6 +29,14 @@ struct dpaa2_io {
spinlock_t lock_notifications;
struct list_head notifications;
struct device *dev;
+
+ /* Net DIM */
+ struct dim rx_dim;
+ /* protect against concurrent Net DIM updates */
+ spinlock_t dim_lock;
+ u16 event_ctr;
+ u64 bytes;
+ u64 frames;
};
struct dpaa2_io_store {
@@ -100,6 +109,17 @@ struct dpaa2_io *dpaa2_io_service_select(int cpu)
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+static void dpaa2_io_dim_work(struct work_struct *w)
+{
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
+
+ dpaa2_io_set_irq_coalescing(d, moder.usec);
+ dim->state = DIM_START_MEASURE;
+}
+
/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
@@ -114,6 +134,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
struct device *dev)
{
struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ u32 qman_256_cycles_per_ns;
if (!obj)
return NULL;
@@ -127,7 +148,15 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
obj->dpio_desc = *desc;
obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+
+ /* Compute how many 256 QBMAN cycles fit into one ns. This is because
+ * the interrupt timeout period register needs to be specified in QBMAN
+ * clock cycles in increments of 256.
+ */
+ qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
+ obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
obj->swp = qbman_swp_init(&obj->swp_desc);
if (!obj->swp) {
@@ -138,6 +167,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
INIT_LIST_HEAD(&obj->node);
spin_lock_init(&obj->lock_mgmt_cmd);
spin_lock_init(&obj->lock_notifications);
+ spin_lock_init(&obj->dim_lock);
INIT_LIST_HEAD(&obj->notifications);
/* For now only enable DQRR interrupts */
@@ -155,6 +185,12 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
obj->dev = dev;
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
+ INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
+ obj->event_ctr = 0;
+ obj->bytes = 0;
+ obj->frames = 0;
+
return obj;
}
@@ -194,6 +230,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
struct qbman_swp *swp;
u32 status;
+ obj->event_ctr++;
+
swp = obj->swp;
status = qbman_swp_interrupt_read_status(swp);
if (!status)
@@ -779,3 +817,82 @@ int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
return 0;
}
EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
+
+/**
+ * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
+ irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
+
+/**
+ * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
+
+/**
+ * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing
+ * @d: the given DPIO object
+ * @use_adaptive_rx_coalesce: adaptive coalescing state
+ */
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce)
+{
+ d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
+
+/**
+ * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state
+ * @d: the given DPIO object
+ *
+ * Return 1 when adaptive coalescing is enabled on the DPIO object and 0
+ * otherwise.
+ */
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
+{
+ return d->swp->use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
+
+/**
+ * dpaa2_io_update_net_dim() - Update Net DIM
+ * @d: the given DPIO object
+ * @frames: how many frames have been dequeued by the user since the last call
+ * @bytes: how many bytes have been dequeued by the user since the last call
+ */
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
+{
+ struct dim_sample dim_sample = {};
+
+ if (!d->swp->use_adaptive_rx_coalesce)
+ return;
+
+ spin_lock(&d->dim_lock);
+
+ d->bytes += bytes;
+ d->frames += frames;
+
+ dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
+ net_dim(&d->rx_dim, dim_sample);
+
+ spin_unlock(&d->dim_lock);
+}
+EXPORT_SYMBOL(dpaa2_io_update_net_dim);
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
index af74c597a675..8ed606ffaac5 100644
--- a/drivers/soc/fsl/dpio/dpio.c
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -162,6 +162,7 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
attr->qbman_portal_ci_offset =
le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
+ attr->clk = le32_to_cpu(dpio_rsp->clk);
return 0;
}
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
index da06f7258098..7fda44f0d7f4 100644
--- a/drivers/soc/fsl/dpio/dpio.h
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -59,6 +59,7 @@ int dpio_disable(struct fsl_mc_io *mc_io,
* @num_priorities: Number of priorities for the notification channel (1-8);
* relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
* @qbman_version: QBMAN version
+ * @clk: QBMAN clock frequency value in Hz
*/
struct dpio_attr {
int id;
@@ -68,6 +69,7 @@ struct dpio_attr {
enum dpio_channel_mode channel_mode;
u8 num_priorities;
u32 qbman_version;
+ u32 clk;
};
int dpio_get_attributes(struct fsl_mc_io *mc_io,
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index f13da4d7d1c5..3474bf5f88d5 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -29,6 +29,7 @@
#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
#define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00
#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
@@ -38,6 +39,7 @@
#define QBMAN_CINH_SWP_IER 0xe40
#define QBMAN_CINH_SWP_ISDR 0xe80
#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */
#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
@@ -355,6 +357,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
& p->eqcr.pi_ci_mask;
p->eqcr.available = p->eqcr.pi_ring_size;
+ /* Initialize the software portal with a irq timeout period of 0us */
+ qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
+
return p;
}
@@ -1796,3 +1801,56 @@ u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
{
return le32_to_cpu(a->fill);
}
+
+/**
+ * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff)
+{
+ u32 itp, max_holdoff;
+
+ /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
+ * increments. This depends on the QBMAN internal frequency.
+ */
+ itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
+ if (itp > 4096) {
+ max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
+ pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
+ return -EINVAL;
+ }
+
+ if (irq_threshold >= p->dqrr.dqrr_size) {
+ pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
+ return -EINVAL;
+ }
+
+ p->irq_threshold = irq_threshold;
+ p->irq_holdoff = irq_holdoff;
+
+ qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
+ qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
+
+ return 0;
+}
+
+/**
+ * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
+ * DQRR entries in the portal than the threshold)
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff)
+{
+ if (irq_threshold)
+ *irq_threshold = p->irq_threshold;
+ if (irq_holdoff)
+ *irq_holdoff = p->irq_holdoff;
+}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index c7c2225b7d91..b23883dd2725 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -24,6 +24,8 @@ struct qbman_swp_desc {
void *cena_bar; /* Cache-enabled portal base address */
void __iomem *cinh_bar; /* Cache-inhibited portal base address */
u32 qman_version;
+ u32 qman_clk;
+ u32 qman_256_cycles_per_ns;
};
#define QBMAN_SWP_INTERRUPT_EQRI 0x01
@@ -156,6 +158,11 @@ struct qbman_swp {
} eqcr;
spinlock_t access_spinlock;
+
+ /* Interrupt coalescing */
+ u32 irq_threshold;
+ u32 irq_holdoff;
+ int use_adaptive_rx_coalesce;
};
/* Function pointers */
@@ -648,4 +655,10 @@ static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
return qbman_swp_dqrr_next_ptr(s);
}
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff);
+
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff);
+
#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index bda170d7b4a2..72fc2b539213 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -98,7 +98,7 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len)
if (ehdr->e_phnum < 2)
return ERR_PTR(-EINVAL);
- if (phdrs[0].p_type == PT_LOAD || phdrs[1].p_type == PT_LOAD)
+ if (phdrs[0].p_type == PT_LOAD)
return ERR_PTR(-EINVAL);
if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH)
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 9faf48302f4b..52e581167115 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -628,7 +628,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
/* Feed the soc specific unique data into entropy pool */
add_device_randomness(info, item_size);
- platform_set_drvdata(pdev, qs->soc_dev);
+ platform_set_drvdata(pdev, qs);
return 0;
}
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index ea64e187854e..f32e1cbbe8c5 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -825,25 +825,28 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
spin_unlock_irqrestore(&reset->lock, flags);
- if (!has_rstst)
- goto exit;
+ /* wait for the reset bit to clear */
+ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+ reset->prm->data->rstctrl,
+ v, !(v & BIT(id)), 1,
+ OMAP_RESET_MAX_WAIT);
+ if (ret)
+ pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+ reset->prm->data->name, id);
/* wait for the status to be set */
- ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+ if (has_rstst) {
+ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
reset->prm->data->rstst,
v, v & BIT(st_bit), 1,
OMAP_RESET_MAX_WAIT);
- if (ret)
- pr_err("%s: timedout waiting for %s:%lu\n", __func__,
- reset->prm->data->name, id);
+ if (ret)
+ pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+ reset->prm->data->name, id);
+ }
-exit:
- if (reset->clkdm) {
- /* At least dra7 iva needs a delay before clkdm idle */
- if (has_rstst)
- udelay(1);
+ if (reset->clkdm)
pdata->clkdm_allow_idle(reset->clkdm);
- }
return ret;
}
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 788dcdf25f00..f872cf196c2f 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1301,7 +1301,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
*/
- if ((!master->cur_msg_mapped)
+ if ((!master->cur_msg->is_dma_mapped)
&& as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
@@ -1381,7 +1381,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
}
}
- if (!master->cur_msg_mapped
+ if (!master->cur_msg->is_dma_mapped
&& as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index a78e56f566dd..3043677ba222 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1250,10 +1250,14 @@ static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
{
+ u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
+
bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
if (has_bspi(qspi))
bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
+ /* clear interrupt */
+ bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status & ~1);
}
static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
@@ -1397,6 +1401,47 @@ int bcm_qspi_probe(struct platform_device *pdev,
if (!qspi->dev_ids)
return -ENOMEM;
+ /*
+ * Some SoCs integrate spi controller (e.g., its interrupt bits)
+ * in specific ways
+ */
+ if (soc_intc) {
+ qspi->soc_intc = soc_intc;
+ soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
+ } else {
+ qspi->soc_intc = NULL;
+ }
+
+ if (qspi->clk) {
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto qspi_probe_err;
+ }
+ qspi->base_clk = clk_get_rate(qspi->clk);
+ } else {
+ qspi->base_clk = MSPI_BASE_FREQ;
+ }
+
+ if (data->has_mspi_rev) {
+ rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
+ /* some older revs do not have a MSPI_REV register */
+ if ((rev & 0xff) == 0xff)
+ rev = 0;
+ }
+
+ qspi->mspi_maj_rev = (rev >> 4) & 0xf;
+ qspi->mspi_min_rev = rev & 0xf;
+ qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
+
+ qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
+
+ /*
+ * On SW resets it is possible to have the mask still enabled
+ * Need to disable the mask and clear the status while we init
+ */
+ bcm_qspi_hw_uninit(qspi);
+
for (val = 0; val < num_irqs; val++) {
irq = -1;
name = qspi_irq_tab[val].irq_name;
@@ -1433,38 +1478,6 @@ int bcm_qspi_probe(struct platform_device *pdev,
goto qspi_probe_err;
}
- /*
- * Some SoCs integrate spi controller (e.g., its interrupt bits)
- * in specific ways
- */
- if (soc_intc) {
- qspi->soc_intc = soc_intc;
- soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
- } else {
- qspi->soc_intc = NULL;
- }
-
- ret = clk_prepare_enable(qspi->clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- goto qspi_probe_err;
- }
-
- qspi->base_clk = clk_get_rate(qspi->clk);
-
- if (data->has_mspi_rev) {
- rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
- /* some older revs do not have a MSPI_REV register */
- if ((rev & 0xff) == 0xff)
- rev = 0;
- }
-
- qspi->mspi_maj_rev = (rev >> 4) & 0xf;
- qspi->mspi_min_rev = rev & 0xf;
- qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
-
- qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
-
bcm_qspi_hw_init(qspi);
init_completion(&qspi->mspi_done);
init_completion(&qspi->bspi_done);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 386e8c84be0a..a15de10ee286 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -233,36 +233,44 @@ static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
return delay;
inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
- setup = setup ? setup : 1;
- hold = hold ? hold : 1;
- inactive = inactive ? inactive : 1;
-
- reg_val = readl(mdata->base + SPI_CFG0_REG);
- if (mdata->dev_comp->enhance_timing) {
- hold = min_t(u32, hold, 0x10000);
- setup = min_t(u32, setup, 0x10000);
- reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
- reg_val |= (((hold - 1) & 0xffff)
- << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
- reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
- reg_val |= (((setup - 1) & 0xffff)
- << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
- } else {
- hold = min_t(u32, hold, 0x100);
- setup = min_t(u32, setup, 0x100);
- reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
- reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
- reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
- reg_val |= (((setup - 1) & 0xff)
- << SPI_CFG0_CS_SETUP_OFFSET);
+ if (hold || setup) {
+ reg_val = readl(mdata->base + SPI_CFG0_REG);
+ if (mdata->dev_comp->enhance_timing) {
+ if (hold) {
+ hold = min_t(u32, hold, 0x10000);
+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+ reg_val |= (((hold - 1) & 0xffff)
+ << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+ }
+ if (setup) {
+ setup = min_t(u32, setup, 0x10000);
+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+ reg_val |= (((setup - 1) & 0xffff)
+ << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+ }
+ } else {
+ if (hold) {
+ hold = min_t(u32, hold, 0x100);
+ reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
+ reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+ }
+ if (setup) {
+ setup = min_t(u32, setup, 0x100);
+ reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
+ reg_val |= (((setup - 1) & 0xff)
+ << SPI_CFG0_CS_SETUP_OFFSET);
+ }
+ }
+ writel(reg_val, mdata->base + SPI_CFG0_REG);
}
- writel(reg_val, mdata->base + SPI_CFG0_REG);
- inactive = min_t(u32, inactive, 0x100);
- reg_val = readl(mdata->base + SPI_CFG1_REG);
- reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
- reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
- writel(reg_val, mdata->base + SPI_CFG1_REG);
+ if (inactive) {
+ inactive = min_t(u32, inactive, 0x100);
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+ reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
return 0;
}
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index 9708b7827ff7..f5d32ec4634e 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -137,6 +137,13 @@ static int spi_mux_probe(struct spi_device *spi)
priv = spi_controller_get_devdata(ctlr);
priv->spi = spi;
+ /*
+ * Increase lockdep class as these lock are taken while the parent bus
+ * already holds their instance's lock.
+ */
+ lockdep_set_subclass(&ctlr->io_mutex, 1);
+ lockdep_set_subclass(&ctlr->add_lock, 1);
+
priv->mux = devm_mux_control_get(&spi->dev, NULL);
if (IS_ERR(priv->mux)) {
ret = dev_err_probe(&spi->dev, PTR_ERR(priv->mux),
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index a66fa97046ee..2b0301fc971c 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -33,6 +33,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -315,6 +316,7 @@
#define NXP_FSPI_MIN_IOMAP SZ_4M
#define DCFG_RCWSR1 0x100
+#define SYS_PLL_RAT GENMASK(6, 2)
/* Access flash memory using IP bus only */
#define FSPI_QUIRK_USE_IP_ONLY BIT(0)
@@ -926,9 +928,8 @@ static void erratum_err050568(struct nxp_fspi *f)
{ .family = "QorIQ LS1028A" },
{ /* sentinel */ }
};
- struct device_node *np;
struct regmap *map;
- u32 val = 0, sysclk = 0;
+ u32 val, sys_pll_ratio;
int ret;
/* Check for LS1028A family */
@@ -937,7 +938,6 @@ static void erratum_err050568(struct nxp_fspi *f)
return;
}
- /* Compute system clock frequency multiplier ratio */
map = syscon_regmap_lookup_by_compatible("fsl,ls1028a-dcfg");
if (IS_ERR(map)) {
dev_err(f->dev, "No syscon regmap\n");
@@ -948,23 +948,11 @@ static void erratum_err050568(struct nxp_fspi *f)
if (ret < 0)
goto err;
- /* Strap bits 6:2 define SYS_PLL_RAT i.e frequency multiplier ratio */
- val = (val >> 2) & 0x1F;
- WARN(val == 0, "Strapping is zero: Cannot determine ratio");
+ sys_pll_ratio = FIELD_GET(SYS_PLL_RAT, val);
+ dev_dbg(f->dev, "val: 0x%08x, sys_pll_ratio: %d\n", val, sys_pll_ratio);
- /* Compute system clock frequency */
- np = of_find_node_by_name(NULL, "clock-sysclk");
- if (!np)
- goto err;
-
- if (of_property_read_u32(np, "clock-frequency", &sysclk))
- goto err;
-
- sysclk = (sysclk * val) / 1000000; /* Convert sysclk to Mhz */
- dev_dbg(f->dev, "val: 0x%08x, sysclk: %dMhz\n", val, sysclk);
-
- /* Use IP bus only if PLL is 300MHz */
- if (sysclk == 300)
+ /* Use IP bus only if platform clock is 300MHz */
+ if (sys_pll_ratio == 3)
f->devtype_data->quirks |= FSPI_QUIRK_USE_IP_ONLY;
return;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 540861ca2ba3..553b6b9d0222 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -600,6 +600,12 @@ static int rockchip_spi_transfer_one(
int ret;
bool use_dma;
+ /* Zero length transfers won't trigger an interrupt on completion */
+ if (!xfer->len) {
+ spi_finalize_current_transfer(ctlr);
+ return 1;
+ }
+
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index ebd27f883033..713292b0c71e 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -204,9 +204,6 @@ struct tegra_slink_data {
struct dma_async_tx_descriptor *tx_dma_desc;
};
-static int tegra_slink_runtime_suspend(struct device *dev);
-static int tegra_slink_runtime_resume(struct device *dev);
-
static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
unsigned long reg)
{
@@ -1185,7 +1182,7 @@ static int tegra_slink_resume(struct device *dev)
}
#endif
-static int tegra_slink_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 57e2499ec1ed..926b68aa45d3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -58,10 +58,6 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
const struct spi_device *spi = to_spi_device(dev);
int len;
- len = of_device_modalias(dev, buf, PAGE_SIZE);
- if (len != -ENODEV)
- return len;
-
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
@@ -367,10 +363,6 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
const struct spi_device *spi = to_spi_device(dev);
int rc;
- rc = of_device_uevent_modalias(dev, env);
- if (rc != -ENODEV)
- return rc;
-
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
@@ -486,12 +478,6 @@ static LIST_HEAD(spi_controller_list);
*/
static DEFINE_MUTEX(board_lock);
-/*
- * Prevents addition of devices with same chip select and
- * addition of devices below an unregistering controller.
- */
-static DEFINE_MUTEX(spi_add_lock);
-
/**
* spi_alloc_device - Allocate a new SPI device
* @ctlr: Controller to which device is connected
@@ -644,9 +630,9 @@ int spi_add_device(struct spi_device *spi)
* chipselect **BEFORE** we call setup(), else we'll trash
* its configuration. Lock against concurrent add() calls.
*/
- mutex_lock(&spi_add_lock);
+ mutex_lock(&ctlr->add_lock);
status = __spi_add_device(spi);
- mutex_unlock(&spi_add_lock);
+ mutex_unlock(&ctlr->add_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
@@ -666,7 +652,7 @@ static int spi_add_device_locked(struct spi_device *spi)
/* Set the bus ID string */
spi_dev_set_name(spi);
- WARN_ON(!mutex_is_locked(&spi_add_lock));
+ WARN_ON(!mutex_is_locked(&ctlr->add_lock));
return __spi_add_device(spi);
}
@@ -2561,6 +2547,12 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
return NULL;
device_initialize(&ctlr->dev);
+ INIT_LIST_HEAD(&ctlr->queue);
+ spin_lock_init(&ctlr->queue_lock);
+ spin_lock_init(&ctlr->bus_lock_spinlock);
+ mutex_init(&ctlr->bus_lock_mutex);
+ mutex_init(&ctlr->io_mutex);
+ mutex_init(&ctlr->add_lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
ctlr->slave = slave;
@@ -2833,11 +2825,6 @@ int spi_register_controller(struct spi_controller *ctlr)
return id;
ctlr->bus_num = id;
}
- INIT_LIST_HEAD(&ctlr->queue);
- spin_lock_init(&ctlr->queue_lock);
- spin_lock_init(&ctlr->bus_lock_spinlock);
- mutex_init(&ctlr->bus_lock_mutex);
- mutex_init(&ctlr->io_mutex);
ctlr->bus_lock_flag = 0;
init_completion(&ctlr->xfer_completion);
if (!ctlr->max_dma_len)
@@ -2974,7 +2961,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
/* Prevent addition of new devices, unregister existing ones */
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
- mutex_lock(&spi_add_lock);
+ mutex_lock(&ctlr->add_lock);
device_for_each_child(&ctlr->dev, NULL, __unregister);
@@ -3005,7 +2992,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
mutex_unlock(&board_lock);
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
- mutex_unlock(&spi_add_lock);
+ mutex_unlock(&ctlr->add_lock);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 6dc29ce3b4bf..1bd73e322b7b 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -673,6 +673,19 @@ static const struct file_operations spidev_fops = {
static struct class *spidev_class;
+static const struct spi_device_id spidev_spi_ids[] = {
+ { .name = "dh2228fv" },
+ { .name = "ltc2488" },
+ { .name = "sx1301" },
+ { .name = "bk4" },
+ { .name = "dhcom-board" },
+ { .name = "m53cpld" },
+ { .name = "spi-petra" },
+ { .name = "spi-authenta" },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
+
#ifdef CONFIG_OF
static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "rohm,dh2228fv" },
@@ -818,6 +831,7 @@ static struct spi_driver spidev_spi_driver = {
},
.probe = spidev_probe,
.remove = spidev_remove,
+ .id_table = spidev_spi_ids,
/* NOTE: suspend/resume methods are not necessary here.
* We don't do anything except pass the requests to/from
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index e6d860a9678e..dc4ed0ff1ae2 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -761,6 +761,17 @@ out:
gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
}
+static void gb_tty_port_destruct(struct tty_port *port)
+{
+ struct gb_tty *gb_tty = container_of(port, struct gb_tty, port);
+
+ if (gb_tty->minor != GB_NUM_MINORS)
+ release_minor(gb_tty);
+ kfifo_free(&gb_tty->write_fifo);
+ kfree(gb_tty->buffer);
+ kfree(gb_tty);
+}
+
static const struct tty_operations gb_ops = {
.install = gb_tty_install,
.open = gb_tty_open,
@@ -786,6 +797,7 @@ static const struct tty_port_operations gb_port_ops = {
.dtr_rts = gb_tty_dtr_rts,
.activate = gb_tty_port_activate,
.shutdown = gb_tty_port_shutdown,
+ .destruct = gb_tty_port_destruct,
};
static int gb_uart_probe(struct gbphy_device *gbphy_dev,
@@ -798,17 +810,11 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
int retval;
int minor;
- gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
- if (!gb_tty)
- return -ENOMEM;
-
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
gb_uart_request_handler);
- if (IS_ERR(connection)) {
- retval = PTR_ERR(connection);
- goto exit_tty_free;
- }
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
max_payload = gb_operation_get_payload_size_max(connection);
if (max_payload < sizeof(struct gb_uart_send_data_request)) {
@@ -816,13 +822,23 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
goto exit_connection_destroy;
}
+ gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
+ if (!gb_tty) {
+ retval = -ENOMEM;
+ goto exit_connection_destroy;
+ }
+
+ tty_port_init(&gb_tty->port);
+ gb_tty->port.ops = &gb_port_ops;
+ gb_tty->minor = GB_NUM_MINORS;
+
gb_tty->buffer_payload_max = max_payload -
sizeof(struct gb_uart_send_data_request);
gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
if (!gb_tty->buffer) {
retval = -ENOMEM;
- goto exit_connection_destroy;
+ goto exit_put_port;
}
INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
@@ -830,7 +846,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
GFP_KERNEL);
if (retval)
- goto exit_buf_free;
+ goto exit_put_port;
gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
init_completion(&gb_tty->credits_complete);
@@ -844,7 +860,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
} else {
retval = minor;
}
- goto exit_kfifo_free;
+ goto exit_put_port;
}
gb_tty->minor = minor;
@@ -853,9 +869,6 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
init_waitqueue_head(&gb_tty->wioctl);
mutex_init(&gb_tty->mutex);
- tty_port_init(&gb_tty->port);
- gb_tty->port.ops = &gb_port_ops;
-
gb_tty->connection = connection;
gb_tty->gbphy_dev = gbphy_dev;
gb_connection_set_data(connection, gb_tty);
@@ -863,7 +876,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
retval = gb_connection_enable_tx(connection);
if (retval)
- goto exit_release_minor;
+ goto exit_put_port;
send_control(gb_tty, gb_tty->ctrlout);
@@ -890,16 +903,10 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
exit_connection_disable:
gb_connection_disable(connection);
-exit_release_minor:
- release_minor(gb_tty);
-exit_kfifo_free:
- kfifo_free(&gb_tty->write_fifo);
-exit_buf_free:
- kfree(gb_tty->buffer);
+exit_put_port:
+ tty_port_put(&gb_tty->port);
exit_connection_destroy:
gb_connection_destroy(connection);
-exit_tty_free:
- kfree(gb_tty);
return retval;
}
@@ -930,15 +937,10 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev)
gb_connection_disable_rx(connection);
tty_unregister_device(gb_tty_driver, gb_tty->minor);
- /* FIXME - free transmit / receive buffers */
-
gb_connection_disable(connection);
- tty_port_destroy(&gb_tty->port);
gb_connection_destroy(connection);
- release_minor(gb_tty);
- kfifo_free(&gb_tty->write_fifo);
- kfree(gb_tty->buffer);
- kfree(gb_tty);
+
+ tty_port_put(&gb_tty->port);
}
static int gb_tty_init(void)
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
index 8e085dda0c18..712e01c37870 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
@@ -1646,6 +1646,8 @@ static input_system_err_t input_system_configure_channel_sensor(
default:
return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
}
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
}
// Test flags and set structure.
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 8a2edd67f2c6..20e508158871 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -919,7 +919,7 @@ static int hantro_probe(struct platform_device *pdev)
if (!vpu->variant->irqs[i].handler)
continue;
- if (vpu->variant->num_clocks > 1) {
+ if (vpu->variant->num_irqs > 1) {
irq_name = vpu->variant->irqs[i].name;
irq = platform_get_irq_byname(vpu->pdev, irq_name);
} else {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index c589fe9dae70..825af5fd35e0 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -135,7 +135,7 @@ void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
sizeimage = bytesperline * height;
/* Chroma plane size. */
- sizeimage += bytesperline * height / 2;
+ sizeimage += bytesperline * ALIGN(height, 64) / 2;
break;
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 33539f6c254d..1dc849378a0f 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -4614,10 +4614,9 @@ static int qlge_probe(struct pci_dev *pdev,
goto netdev_free;
}
- devlink_register(devlink);
err = qlge_health_create_reporters(qdev);
if (err)
- goto devlink_unregister;
+ goto netdev_free;
/* Start up the timer to trigger EEH if
* the bus goes dead
@@ -4628,10 +4627,9 @@ static int qlge_probe(struct pci_dev *pdev,
qlge_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
cards_found++;
+ devlink_register(devlink);
return 0;
-devlink_unregister:
- devlink_unregister(devlink);
netdev_free:
free_netdev(ndev);
devlink_free:
@@ -4656,13 +4654,13 @@ static void qlge_remove(struct pci_dev *pdev)
struct net_device *ndev = qdev->ndev;
struct devlink *devlink = priv_to_devlink(qdev);
+ devlink_unregister(devlink);
del_timer_sync(&qdev->timer);
qlge_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
qlge_release_all(pdev);
pci_disable_device(pdev);
devlink_health_reporter_destroy(qdev->reporter);
- devlink_unregister(devlink);
devlink_free(devlink);
free_netdev(ndev);
}
diff --git a/drivers/staging/r8188eu/hal/hal_intf.c b/drivers/staging/r8188eu/hal/hal_intf.c
index a6d589e89aeb..f27eba72d646 100644
--- a/drivers/staging/r8188eu/hal/hal_intf.c
+++ b/drivers/staging/r8188eu/hal/hal_intf.c
@@ -248,7 +248,7 @@ void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
#ifdef CONFIG_88EU_AP_MODE
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &adapt->stapriv;
- if ((mac_id - 1) > 0)
+ if (mac_id >= 2)
psta = pstapriv->sta_aid[(mac_id - 1) - 1];
if (psta)
add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 81d4255d1785..1fd375076001 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -5372,8 +5372,8 @@ static int rtw_mp_read_reg(struct net_device *dev,
pnext++;
if (*pnext != '\0') {
- strtout = simple_strtoul(pnext, &ptmp, 16);
- sprintf(extra, "%s %d", extra, strtout);
+ strtout = simple_strtoul(pnext, &ptmp, 16);
+ sprintf(extra + strlen(extra), " %d", strtout);
} else {
break;
}
@@ -5405,7 +5405,7 @@ static int rtw_mp_read_reg(struct net_device *dev,
pnext++;
if (*pnext != '\0') {
strtout = simple_strtoul(pnext, &ptmp, 16);
- sprintf(extra, "%s %d", extra, strtout);
+ sprintf(extra + strlen(extra), " %d", strtout);
} else {
break;
}
@@ -5512,7 +5512,7 @@ static int rtw_mp_read_rf(struct net_device *dev,
pnext++;
if (*pnext != '\0') {
strtou = simple_strtoul(pnext, &ptmp, 16);
- sprintf(extra, "%s %d", extra, strtou);
+ sprintf(extra + strlen(extra), " %d", strtou);
} else {
break;
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index b25369a13452..967f10b9582a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -182,7 +182,7 @@ create_pagelist(char *buf, char __user *ubuf,
offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
- if (num_pages > (SIZE_MAX - sizeof(struct pagelist) -
+ if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
sizeof(struct vchiq_pagelist_info)) /
(sizeof(u32) + sizeof(pages[0]) +
sizeof(struct scatterlist)))
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 102ec644bc8a..023bd4516a68 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1110,20 +1110,24 @@ static ssize_t alua_support_store(struct config_item *item,
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
- bool flag;
+ bool flag, oldflag;
int ret;
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
+ if (flag == oldflag)
+ return count;
+
if (!(dev->transport->transport_flags_changeable &
TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
pr_err("dev[%p]: Unable to change SE Device alua_support:"
" alua_support has fixed value\n", dev);
- return -EINVAL;
+ return -ENOSYS;
}
- ret = strtobool(page, &flag);
- if (ret < 0)
- return ret;
-
if (flag)
dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
else
@@ -1145,20 +1149,24 @@ static ssize_t pgr_support_store(struct config_item *item,
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
- bool flag;
+ bool flag, oldflag;
int ret;
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
+ if (flag == oldflag)
+ return count;
+
if (!(dev->transport->transport_flags_changeable &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
pr_err("dev[%p]: Unable to change SE Device pgr_support:"
" pgr_support has fixed value\n", dev);
- return -EINVAL;
+ return -ENOSYS;
}
- ret = strtobool(page, &flag);
- if (ret < 0)
- return ret;
-
if (flag)
dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
else
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 4b94b085625b..3829b61b56c1 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -269,7 +269,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
spin_lock(&dev->dev_reservation_lock);
if (dev->reservation_holder &&
dev->reservation_holder->se_node_acl != sess->se_node_acl) {
- pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+ pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n",
tpg->se_tpg_tfo->fabric_name);
pr_err("Original reserver LUN: %llu %s\n",
cmd->se_lun->unpacked_lun,
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 5ce13b099d7d..5363ebebfc35 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -585,6 +585,9 @@ static int optee_remove(struct platform_device *pdev)
{
struct optee *optee = platform_get_drvdata(pdev);
+ /* Unregister OP-TEE specific client devices on TEE bus */
+ optee_unregister_devices();
+
/*
* Ask OP-TEE to free all cached shared memory objects to decrease
* reference counters and also avoid wild pointers in secure world
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index ec1d24693eba..128a2d2a50a1 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -53,6 +53,13 @@ static int get_devices(struct tee_context *ctx, u32 session,
return 0;
}
+static void optee_release_device(struct device *dev)
+{
+ struct tee_client_device *optee_device = to_tee_client_device(dev);
+
+ kfree(optee_device);
+}
+
static int optee_register_device(const uuid_t *device_uuid)
{
struct tee_client_device *optee_device = NULL;
@@ -63,6 +70,7 @@ static int optee_register_device(const uuid_t *device_uuid)
return -ENOMEM;
optee_device->dev.bus = &tee_bus_type;
+ optee_device->dev.release = optee_release_device;
if (dev_set_name(&optee_device->dev, "optee-ta-%pUb", device_uuid)) {
kfree(optee_device);
return -ENOMEM;
@@ -154,3 +162,17 @@ int optee_enumerate_devices(u32 func)
{
return __optee_enumerate_devices(func);
}
+
+static int __optee_unregister_device(struct device *dev, void *data)
+{
+ if (!strncmp(dev_name(dev), "optee-ta", strlen("optee-ta")))
+ device_unregister(dev);
+
+ return 0;
+}
+
+void optee_unregister_devices(void)
+{
+ bus_for_each_dev(&tee_bus_type, NULL, NULL,
+ __optee_unregister_device);
+}
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index dbdd367be156..f6bb4a763ba9 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -184,6 +184,7 @@ void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
#define PTA_CMD_GET_DEVICES 0x0
#define PTA_CMD_GET_DEVICES_SUPP 0x1
int optee_enumerate_devices(u32 func);
+void optee_unregister_devices(void);
/*
* Small helpers
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
index c41a9a501a6e..d167039af519 100644
--- a/drivers/tee/optee/shm_pool.c
+++ b/drivers/tee/optee/shm_pool.c
@@ -35,7 +35,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
unsigned int nr_pages = 1 << order, i;
struct page **pages;
- pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
rc = -ENOMEM;
goto err;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 0f0038af2ad4..fb64acfd5e07 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -107,7 +107,7 @@ static int tcc_offset_update(unsigned int tcc)
return 0;
}
-static unsigned int tcc_offset_save;
+static int tcc_offset_save = -1;
static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
struct device_attribute *attr, const char *buf,
@@ -352,7 +352,8 @@ int proc_thermal_resume(struct device *dev)
proc_dev = dev_get_drvdata(dev);
proc_thermal_read_ppcc(proc_dev);
- tcc_offset_update(tcc_offset_save);
+ if (tcc_offset_save >= 0)
+ tcc_offset_update(tcc_offset_save);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 4c7ebd1d3f9c..b1162e566a70 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -417,7 +417,7 @@ static irqreturn_t tsens_critical_irq_thread(int irq, void *data)
const struct tsens_sensor *s = &priv->sensor[i];
u32 hw_id = s->hw_id;
- if (IS_ERR(s->tzd))
+ if (!s->tzd)
continue;
if (!tsens_threshold_violated(priv, hw_id, &d))
continue;
@@ -467,7 +467,7 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
const struct tsens_sensor *s = &priv->sensor[i];
u32 hw_id = s->hw_id;
- if (IS_ERR(s->tzd))
+ if (!s->tzd)
continue;
if (!tsens_threshold_violated(priv, hw_id, &d))
continue;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 97ef9b040b84..51374f4e1cca 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -222,15 +222,14 @@ int thermal_build_list_of_policies(char *buf)
{
struct thermal_governor *pos;
ssize_t count = 0;
- ssize_t size = PAGE_SIZE;
mutex_lock(&thermal_governor_lock);
list_for_each_entry(pos, &thermal_governor_list, governor_list) {
- size = PAGE_SIZE - count;
- count += scnprintf(buf + count, size, "%s ", pos->name);
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%s ",
+ pos->name);
}
- count += scnprintf(buf + count, size, "\n");
+ count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
mutex_unlock(&thermal_governor_lock);
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index da19d7987d00..78fd365893c1 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -7,6 +7,7 @@ thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
thunderbolt-${CONFIG_ACPI} += acpi.o
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
+CFLAGS_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o
obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 8f143c09a169..f0bf01ea069a 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -618,10 +618,8 @@ static int __init xenboot_console_setup(struct console *console, char *string)
{
static struct xencons_info xenboot;
- if (xen_initial_domain())
+ if (xen_initial_domain() || !xen_pv_domain())
return 0;
- if (!xen_pv_domain())
- return -ENODEV;
return xencons_info_pv_init(&xenboot, 0);
}
@@ -632,17 +630,16 @@ static void xenboot_write_console(struct console *console, const char *string,
unsigned int linelen, off = 0;
const char *pos;
+ if (dom0_write_console(0, string, len) >= 0)
+ return;
+
if (!xen_pv_domain()) {
xen_hvm_early_write(0, string, len);
return;
}
- dom0_write_console(0, string, len);
-
- if (xen_initial_domain())
+ if (domU_write_console(0, "(early) ", 8) < 0)
return;
-
- domU_write_console(0, "(early) ", 8);
while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
linelen = pos-string+off;
if (off + linelen > len)
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 891fd8345e25..73e5f1dbd075 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -106,7 +106,7 @@
#define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6)
/* RX FIFO occupancy indicator */
-#define UART_OMAP_RX_LVL 0x64
+#define UART_OMAP_RX_LVL 0x19
struct omap8250_priv {
int line;
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 71ae16de0f90..39fc96dc2531 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -361,9 +361,13 @@ config SERIAL_8250_BCM2835AUX
If unsure, say N.
config SERIAL_8250_FSL
- bool
+ bool "Freescale 16550 UART support" if COMPILE_TEST && !(PPC || ARM || ARM64)
depends on SERIAL_8250_CONSOLE
- default PPC || ARM || ARM64 || COMPILE_TEST
+ default PPC || ARM || ARM64
+ help
+ Selecting this option enables a workaround for a break-detection
+ erratum for Freescale 16550 UARTs in the 8250 driver. It also
+ enables support for ACPI enumeration.
config SERIAL_8250_DW
tristate "Support for Synopsys DesignWare 8250 quirks"
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 231de29a6452..ab226da75f7b 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -163,7 +163,7 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
st = readl(port->membase + UART_STAT);
spin_unlock_irqrestore(&port->lock, flags);
- return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0;
+ return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
}
static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index a9acd93e85b7..25c558e65ece 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -438,8 +438,8 @@ static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
-static void get_signals(struct slgt_info *info);
-static void set_signals(struct slgt_info *info);
+static void get_gtsignals(struct slgt_info *info);
+static void set_gtsignals(struct slgt_info *info);
static void set_rate(struct slgt_info *info, u32 data_rate);
static void bh_transmit(struct slgt_info *info);
@@ -720,7 +720,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -730,7 +730,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
if (!C_CRTSCTS(tty) || !tty_throttled(tty))
info->signals |= SerialSignal_RTS;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -1181,7 +1181,7 @@ static inline void line_info(struct seq_file *m, struct slgt_info *info)
/* output current serial signal states */
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
stat_buf[0] = 0;
@@ -1281,7 +1281,7 @@ static void throttle(struct tty_struct * tty)
if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals &= ~SerialSignal_RTS;
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1306,7 +1306,7 @@ static void unthrottle(struct tty_struct * tty)
if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals |= SerialSignal_RTS;
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1477,7 +1477,7 @@ static int hdlcdev_open(struct net_device *dev)
/* inform generic HDLC layer of current DCD status */
spin_lock_irqsave(&info->lock, flags);
- get_signals(info);
+ get_gtsignals(info);
spin_unlock_irqrestore(&info->lock, flags);
if (info->signals & SerialSignal_DCD)
netif_carrier_on(dev);
@@ -2229,7 +2229,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
info->signals &= ~SerialSignal_RTS;
info->drop_rts_on_tx_done = false;
- set_signals(info);
+ set_gtsignals(info);
}
#if SYNCLINK_GENERIC_HDLC
@@ -2394,7 +2394,7 @@ static void shutdown(struct slgt_info *info)
if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_gtsignals(info);
}
flush_cond_wait(&info->gpio_wait_q);
@@ -2422,7 +2422,7 @@ static void program_hw(struct slgt_info *info)
else
async_mode(info);
- set_signals(info);
+ set_gtsignals(info);
info->dcd_chkcount = 0;
info->cts_chkcount = 0;
@@ -2430,7 +2430,7 @@ static void program_hw(struct slgt_info *info)
info->dsr_chkcount = 0;
slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
- get_signals(info);
+ get_gtsignals(info);
if (info->netcount ||
(info->port.tty && info->port.tty->termios.c_cflag & CREAD))
@@ -2667,7 +2667,7 @@ static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
spin_lock_irqsave(&info->lock,flags);
/* return immediately if state matches requested events */
- get_signals(info);
+ get_gtsignals(info);
s = info->signals;
events = mask &
@@ -3085,7 +3085,7 @@ static int tiocmget(struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
@@ -3124,7 +3124,7 @@ static int tiocmset(struct tty_struct *tty,
info->signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
@@ -3135,7 +3135,7 @@ static int carrier_raised(struct tty_port *port)
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
return (info->signals & SerialSignal_DCD) ? 1 : 0;
}
@@ -3150,7 +3150,7 @@ static void dtr_rts(struct tty_port *port, int on)
info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3948,10 +3948,10 @@ static void tx_start(struct slgt_info *info)
if (info->params.mode != MGSL_MODE_ASYNC) {
if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
- get_signals(info);
+ get_gtsignals(info);
if (!(info->signals & SerialSignal_RTS)) {
info->signals |= SerialSignal_RTS;
- set_signals(info);
+ set_gtsignals(info);
info->drop_rts_on_tx_done = true;
}
}
@@ -4005,7 +4005,7 @@ static void reset_port(struct slgt_info *info)
rx_stop(info);
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_gtsignals(info);
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
}
@@ -4427,7 +4427,7 @@ static void tx_set_idle(struct slgt_info *info)
/*
* get state of V24 status (input) signals
*/
-static void get_signals(struct slgt_info *info)
+static void get_gtsignals(struct slgt_info *info)
{
unsigned short status = rd_reg16(info, SSR);
@@ -4489,7 +4489,7 @@ static void msc_set_vcr(struct slgt_info *info)
/*
* set state of V24 control (output) signals
*/
-static void set_signals(struct slgt_info *info)
+static void set_gtsignals(struct slgt_info *info)
{
unsigned char val = rd_reg8(info, VCR);
if (info->signals & SerialSignal_DTR)
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 756a4bfa6a69..3e4e0b20b4bb 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -812,7 +812,6 @@ void tty_ldisc_release(struct tty_struct *tty)
tty_ldisc_debug(tty, "released\n");
}
-EXPORT_SYMBOL_GPL(tty_ldisc_release);
/**
* tty_ldisc_init - ldisc setup for new tty
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 5d8c982019af..1f3b4a142212 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1100,6 +1100,19 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
return 0;
}
+static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (priv_dev->dev_ver < DEV_VER_V3)
+ return;
+
+ if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) {
+ writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts);
+ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+ }
+}
+
/**
* cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
* @priv_ep: endpoint object
@@ -1351,6 +1364,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
/*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+ cdns3_rearm_drdy_if_needed(priv_ep);
trace_cdns3_doorbell_epx(priv_ep->name,
readl(&priv_dev->regs->ep_traddr));
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 8b7bc10b6e8b..f1d100671ee6 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -420,11 +420,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
- /* Return -EINVAL if no usbphy is available */
- if (ret == -ENODEV)
- data->phy = NULL;
- else
- goto err_clk;
+ if (ret == -ENODEV) {
+ data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+ if (IS_ERR(data->phy)) {
+ ret = PTR_ERR(data->phy);
+ if (ret == -ENODEV)
+ data->phy = NULL;
+ else
+ goto err_clk;
+ }
+ }
}
pdata.usb_phy = data->phy;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 8bbd8e29e60d..7b2e2420ecae 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -340,6 +340,9 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
acm->iocount.overrun++;
spin_unlock_irqrestore(&acm->read_lock, flags);
+ if (newctrl & ACM_CTRL_BRK)
+ tty_flip_buffer_push(&acm->port);
+
if (difference)
wake_up_all(&acm->wioctl);
@@ -475,11 +478,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
static void acm_process_read_urb(struct acm *acm, struct urb *urb)
{
+ unsigned long flags;
+
if (!urb->actual_length)
return;
+ spin_lock_irqsave(&acm->read_lock, flags);
tty_insert_flip_string(&acm->port, urb->transfer_buffer,
urb->actual_length);
+ spin_unlock_irqrestore(&acm->read_lock, flags);
+
tty_flip_buffer_push(&acm->port);
}
@@ -726,7 +734,8 @@ static void acm_port_destruct(struct tty_port *port)
{
struct acm *acm = container_of(port, struct acm, port);
- acm_release_minor(acm);
+ if (acm->minor != ACM_MINOR_INVALID)
+ acm_release_minor(acm);
usb_put_intf(acm->control);
kfree(acm->country_codes);
kfree(acm);
@@ -1323,8 +1332,10 @@ made_compressed_probe:
usb_get_intf(acm->control); /* undone in destruct() */
minor = acm_alloc_minor(acm);
- if (minor < 0)
+ if (minor < 0) {
+ acm->minor = ACM_MINOR_INVALID;
goto err_put_port;
+ }
acm->minor = minor;
acm->dev = usb_dev;
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 8aef5eb769a0..3aa7f0a3ad71 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -22,6 +22,8 @@
#define ACM_TTY_MAJOR 166
#define ACM_TTY_MINORS 256
+#define ACM_MINOR_INVALID ACM_TTY_MINORS
+
/*
* Requests.
*/
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 35d5908b5478..fdf79bcf7eb0 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = {
};
/* --- WWAN framework integration --- */
-#ifdef CONFIG_WWAN_CORE
+#ifdef CONFIG_WWAN
static int wdm_wwan_port_start(struct wwan_port *port)
{
struct wdm_device *desc = wwan_port_get_drvdata(port);
@@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
/* inbuf has been copied, it is safe to check for outstanding data */
schedule_work(&desc->service_outs_intr);
}
-#else /* CONFIG_WWAN_CORE */
+#else /* CONFIG_WWAN */
static void wdm_wwan_init(struct wdm_device *desc) {}
static void wdm_wwan_deinit(struct wdm_device *desc) {}
static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
-#endif /* CONFIG_WWAN_CORE */
+#endif /* CONFIG_WWAN */
/* --- error handling --- */
static void wdm_rxwork(struct work_struct *work)
diff --git a/drivers/usb/common/Kconfig b/drivers/usb/common/Kconfig
index 5e8a04e3dd3c..b856622431a7 100644
--- a/drivers/usb/common/Kconfig
+++ b/drivers/usb/common/Kconfig
@@ -6,8 +6,7 @@ config USB_COMMON
config USB_LED_TRIG
bool "USB LED Triggers"
- depends on LEDS_CLASS && LEDS_TRIGGERS
- select USB_COMMON
+ depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS
help
This option adds LED triggers for USB host and/or gadget activity.
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 0f8b7c93310e..7ee6e4cc0d89 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2761,6 +2761,26 @@ static void usb_put_invalidate_rhdev(struct usb_hcd *hcd)
}
/**
+ * usb_stop_hcd - Halt the HCD
+ * @hcd: the usb_hcd that has to be halted
+ *
+ * Stop the root-hub polling timer and invoke the HCD's ->stop callback.
+ */
+static void usb_stop_hcd(struct usb_hcd *hcd)
+{
+ hcd->rh_pollable = 0;
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+
+ hcd->driver->stop(hcd);
+ hcd->state = HC_STATE_HALT;
+
+ /* In case the HCD restarted the timer, stop it again. */
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+}
+
+/**
* usb_add_hcd - finish generic HCD structure initialization and register
* @hcd: the usb_hcd structure to initialize
* @irqnum: Interrupt line to allocate
@@ -2775,6 +2795,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
{
int retval;
struct usb_device *rhdev;
+ struct usb_hcd *shared_hcd;
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2935,24 +2956,31 @@ int usb_add_hcd(struct usb_hcd *hcd,
goto err_hcd_driver_start;
}
+ /* starting here, usbcore will pay attention to the shared HCD roothub */
+ shared_hcd = hcd->shared_hcd;
+ if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
+ retval = register_root_hub(shared_hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
+
+ if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
+ usb_hcd_poll_rh_status(shared_hcd);
+ }
+
/* starting here, usbcore will pay attention to this root hub */
- retval = register_root_hub(hcd);
- if (retval != 0)
- goto err_register_root_hub;
+ if (!HCD_DEFER_RH_REGISTER(hcd)) {
+ retval = register_root_hub(hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
- usb_hcd_poll_rh_status(hcd);
+ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+ usb_hcd_poll_rh_status(hcd);
+ }
return retval;
err_register_root_hub:
- hcd->rh_pollable = 0;
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
- hcd->driver->stop(hcd);
- hcd->state = HC_STATE_HALT;
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
+ usb_stop_hcd(hcd);
err_hcd_driver_start:
if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0)
free_irq(irqnum, hcd);
@@ -2985,6 +3013,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
void usb_remove_hcd(struct usb_hcd *hcd)
{
struct usb_device *rhdev = hcd->self.root_hub;
+ bool rh_registered;
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
@@ -2995,6 +3024,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
spin_lock_irq (&hcd_root_hub_lock);
+ rh_registered = hcd->rh_registered;
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
@@ -3004,7 +3034,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
+ if (rh_registered)
+ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
/*
@@ -3022,16 +3053,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
* interrupt occurs), but usb_hcd_poll_rh_status() won't invoke
* the hub_status_data() callback.
*/
- hcd->rh_pollable = 0;
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
-
- hcd->driver->stop(hcd);
- hcd->state = HC_STATE_HALT;
-
- /* In case the HCD restarted the timer, stop it again. */
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
+ usb_stop_hcd(hcd);
if (usb_hcd_is_primary_hcd(hcd)) {
if (hcd->irq > 0)
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 837237e4bc96..11d85a6e0b0d 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -115,10 +115,16 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
*/
static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u16 limit = DSTS_SOFFN_LIMIT;
+
+ if (hsotg->gadget.speed != USB_SPEED_HIGH)
+ limit >>= 3;
+
hs_ep->target_frame += hs_ep->interval;
- if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
+ if (hs_ep->target_frame > limit) {
hs_ep->frame_overrun = true;
- hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
+ hs_ep->target_frame &= limit;
} else {
hs_ep->frame_overrun = false;
}
@@ -136,10 +142,16 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
*/
static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u16 limit = DSTS_SOFFN_LIMIT;
+
+ if (hsotg->gadget.speed != USB_SPEED_HIGH)
+ limit >>= 3;
+
if (hs_ep->target_frame)
hs_ep->target_frame -= 1;
else
- hs_ep->target_frame = DSTS_SOFFN_LIMIT;
+ hs_ep->target_frame = limit;
}
/**
@@ -1018,6 +1030,12 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
dwc2_writel(hsotg, ctrl, depctl);
}
+static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
+static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_hsotg_req *hs_req,
+ int result);
+
/**
* dwc2_hsotg_start_req - start a USB request from an endpoint's queue
* @hsotg: The controller state.
@@ -1170,14 +1188,19 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
}
}
- if (hs_ep->isochronous && hs_ep->interval == 1) {
- hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
- dwc2_gadget_incr_frame_num(hs_ep);
-
- if (hs_ep->target_frame & 0x1)
- ctrl |= DXEPCTL_SETODDFR;
- else
- ctrl |= DXEPCTL_SETEVENFR;
+ if (hs_ep->isochronous) {
+ if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ if (hs_ep->interval == 1) {
+ if (hs_ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+ }
+ ctrl |= DXEPCTL_CNAK;
+ } else {
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+ return;
+ }
}
ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
@@ -1325,12 +1348,16 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
u32 target_frame = hs_ep->target_frame;
u32 current_frame = hsotg->frame_number;
bool frame_overrun = hs_ep->frame_overrun;
+ u16 limit = DSTS_SOFFN_LIMIT;
+
+ if (hsotg->gadget.speed != USB_SPEED_HIGH)
+ limit >>= 3;
if (!frame_overrun && current_frame >= target_frame)
return true;
if (frame_overrun && current_frame >= target_frame &&
- ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
+ ((current_frame - target_frame) < limit / 2))
return true;
return false;
@@ -1713,11 +1740,9 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
*/
static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
{
- u32 mask;
struct dwc2_hsotg *hsotg = hs_ep->parent;
int dir_in = hs_ep->dir_in;
struct dwc2_hsotg_req *hs_req;
- u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
if (!list_empty(&hs_ep->queue)) {
hs_req = get_ep_head(hs_ep);
@@ -1733,9 +1758,6 @@ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
} else {
dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
__func__);
- mask = dwc2_readl(hsotg, epmsk_reg);
- mask |= DOEPMSK_OUTTKNEPDISMSK;
- dwc2_writel(hsotg, mask, epmsk_reg);
}
}
@@ -2306,19 +2328,6 @@ static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
}
-static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
- u32 epctl_reg)
-{
- u32 ctrl;
-
- ctrl = dwc2_readl(hsotg, epctl_reg);
- if (ctrl & DXEPCTL_EOFRNUM)
- ctrl |= DXEPCTL_SETEVENFR;
- else
- ctrl |= DXEPCTL_SETODDFR;
- dwc2_writel(hsotg, ctrl, epctl_reg);
-}
-
/*
* dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
* @hs_ep - The endpoint on which transfer went
@@ -2439,20 +2448,11 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
dwc2_hsotg_ep0_zlp(hsotg, true);
}
- /*
- * Slave mode OUT transfers do not go through XferComplete so
- * adjust the ISOC parity here.
- */
- if (!using_dma(hsotg)) {
- if (hs_ep->isochronous && hs_ep->interval == 1)
- dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
- else if (hs_ep->isochronous && hs_ep->interval > 1)
- dwc2_gadget_incr_frame_num(hs_ep);
- }
-
/* Set actual frame number for completed transfers */
- if (!using_desc_dma(hsotg) && hs_ep->isochronous)
- req->frame_number = hsotg->frame_number;
+ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+ req->frame_number = hs_ep->target_frame;
+ dwc2_gadget_incr_frame_num(hs_ep);
+ }
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
}
@@ -2766,6 +2766,12 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
return;
}
+ /* Set actual frame number for completed transfers */
+ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ dwc2_gadget_incr_frame_num(hs_ep);
+ }
+
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
}
@@ -2826,23 +2832,18 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
- if (hs_ep->isochronous) {
- dwc2_hsotg_complete_in(hsotg, hs_ep);
- return;
- }
-
if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
int dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_CGNPINNAK;
dwc2_writel(hsotg, dctl, DCTL);
}
- return;
- }
+ } else {
- if (dctl & DCTL_GOUTNAKSTS) {
- dctl |= DCTL_CGOUTNAK;
- dwc2_writel(hsotg, dctl, DCTL);
+ if (dctl & DCTL_GOUTNAKSTS) {
+ dctl |= DCTL_CGOUTNAK;
+ dwc2_writel(hsotg, dctl, DCTL);
+ }
}
if (!hs_ep->isochronous)
@@ -2863,8 +2864,6 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
/* Update current frame number value. */
hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
} while (dwc2_gadget_target_frame_elapsed(hs_ep));
-
- dwc2_gadget_start_next_request(hs_ep);
}
/**
@@ -2881,8 +2880,8 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
{
struct dwc2_hsotg *hsotg = ep->parent;
+ struct dwc2_hsotg_req *hs_req;
int dir_in = ep->dir_in;
- u32 doepmsk;
if (dir_in || !ep->isochronous)
return;
@@ -2896,28 +2895,39 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
return;
}
- if (ep->interval > 1 &&
- ep->target_frame == TARGET_FRAME_INITIAL) {
+ if (ep->target_frame == TARGET_FRAME_INITIAL) {
u32 ctrl;
ep->target_frame = hsotg->frame_number;
- dwc2_gadget_incr_frame_num(ep);
+ if (ep->interval > 1) {
+ ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
+ if (ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
- ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
- if (ep->target_frame & 0x1)
- ctrl |= DXEPCTL_SETODDFR;
- else
- ctrl |= DXEPCTL_SETEVENFR;
+ dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
+ }
+ }
+
+ while (dwc2_gadget_target_frame_elapsed(ep)) {
+ hs_req = get_ep_head(ep);
+ if (hs_req)
+ dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
- dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
+ dwc2_gadget_incr_frame_num(ep);
+ /* Update current frame number value. */
+ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
}
- dwc2_gadget_start_next_request(ep);
- doepmsk = dwc2_readl(hsotg, DOEPMSK);
- doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
- dwc2_writel(hsotg, doepmsk, DOEPMSK);
+ if (!ep->req)
+ dwc2_gadget_start_next_request(ep);
+
}
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep);
+
/**
* dwc2_gadget_handle_nak - handle NAK interrupt
* @hs_ep: The endpoint on which interrupt is asserted.
@@ -2935,7 +2945,9 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
int dir_in = hs_ep->dir_in;
+ u32 ctrl;
if (!dir_in || !hs_ep->isochronous)
return;
@@ -2977,13 +2989,29 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
}
-
- dwc2_hsotg_complete_request(hsotg, hs_ep,
- get_ep_head(hs_ep), 0);
}
- if (!using_desc_dma(hsotg))
+ if (using_desc_dma(hsotg))
+ return;
+
+ ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
+ if (ctrl & DXEPCTL_EPENA)
+ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+ else
+ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+ while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ hs_req = get_ep_head(hs_ep);
+ if (hs_req)
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+
dwc2_gadget_incr_frame_num(hs_ep);
+ /* Update current frame number value. */
+ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
+ }
+
+ if (!hs_ep->req)
+ dwc2_gadget_start_next_request(hs_ep);
}
/**
@@ -3039,21 +3067,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
/* In DDMA handle isochronous requests separately */
if (using_desc_dma(hsotg) && hs_ep->isochronous) {
- /* XferCompl set along with BNA */
- if (!(ints & DXEPINT_BNAINTR))
- dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+ dwc2_gadget_complete_isoc_request_ddma(hs_ep);
} else if (dir_in) {
/*
* We get OutDone from the FIFO, so we only
* need to look at completing IN requests here
* if operating slave mode
*/
- if (hs_ep->isochronous && hs_ep->interval > 1)
- dwc2_gadget_incr_frame_num(hs_ep);
-
- dwc2_hsotg_complete_in(hsotg, hs_ep);
- if (ints & DXEPINT_NAKINTRPT)
- ints &= ~DXEPINT_NAKINTRPT;
+ if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
+ dwc2_hsotg_complete_in(hsotg, hs_ep);
if (idx == 0 && !hs_ep->req)
dwc2_hsotg_enqueue_setup(hsotg);
@@ -3062,10 +3084,8 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
* We're using DMA, we need to fire an OutDone here
* as we ignore the RXFIFO.
*/
- if (hs_ep->isochronous && hs_ep->interval > 1)
- dwc2_gadget_incr_frame_num(hs_ep);
-
- dwc2_hsotg_handle_outdone(hsotg, idx);
+ if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
+ dwc2_hsotg_handle_outdone(hsotg, idx);
}
}
@@ -4085,6 +4105,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
mask |= DIEPMSK_NAKMSK;
dwc2_writel(hsotg, mask, DIEPMSK);
} else {
+ epctrl |= DXEPCTL_SNAK;
mask = dwc2_readl(hsotg, DOEPMSK);
mask |= DOEPMSK_OUTTKNEPDISMSK;
dwc2_writel(hsotg, mask, DOEPMSK);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 2a7828971d05..a215ec9e172e 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -5191,6 +5191,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
hcd->has_tt = 1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -EINVAL;
+ goto error1;
+ }
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 01866dcb953b..0104a80b185e 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -264,19 +264,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
{
u32 reg;
int retries = 1000;
- int ret;
-
- usb_phy_init(dwc->usb2_phy);
- usb_phy_init(dwc->usb3_phy);
- ret = phy_init(dwc->usb2_generic_phy);
- if (ret < 0)
- return ret;
-
- ret = phy_init(dwc->usb3_generic_phy);
- if (ret < 0) {
- phy_exit(dwc->usb2_generic_phy);
- return ret;
- }
/*
* We're resetting only the device side because, if we're in host mode,
@@ -310,9 +297,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
udelay(1);
} while (--retries);
- phy_exit(dwc->usb3_generic_phy);
- phy_exit(dwc->usb2_generic_phy);
-
return -ETIMEDOUT;
done:
@@ -982,9 +966,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc->phys_ready = true;
}
+ usb_phy_init(dwc->usb2_phy);
+ usb_phy_init(dwc->usb3_phy);
+ ret = phy_init(dwc->usb2_generic_phy);
+ if (ret < 0)
+ goto err0a;
+
+ ret = phy_init(dwc->usb3_generic_phy);
+ if (ret < 0) {
+ phy_exit(dwc->usb2_generic_phy);
+ goto err0a;
+ }
+
ret = dwc3_core_soft_reset(dwc);
if (ret)
- goto err0a;
+ goto err1;
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 804b50548163..4519d06c9ca2 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -4243,7 +4243,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
}
- usb_initialize_gadget(dwc->sysdev, dwc->gadget, dwc_gadget_release);
+ usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
dev = &dwc->gadget->dev;
dev->platform_data = dwc;
dwc->gadget->ops = &dwc3_gadget_ops;
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 0b468f5d55bc..068ed8417e5a 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -267,6 +267,8 @@ static const struct net_device_ops pn_netdev_ops = {
static void pn_net_setup(struct net_device *dev)
{
+ const u8 addr = PN_MEDIA_USB;
+
dev->features = 0;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
@@ -274,8 +276,9 @@ static void pn_net_setup(struct net_device *dev)
dev->min_mtu = PHONET_MIN_MTU;
dev->max_mtu = PHONET_MAX_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_USB;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
+
dev->tx_queue_len = 1;
dev->netdev_ops = &pn_netdev_ops;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 3c34995276e7..ef55b8bb5870 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -406,6 +406,14 @@ static struct usb_endpoint_descriptor ss_epin_fback_desc = {
.bInterval = 4,
};
+static struct usb_ss_ep_comp_descriptor ss_epin_fback_desc_comp = {
+ .bLength = sizeof(ss_epin_fback_desc_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = cpu_to_le16(4),
+};
+
/* Audio Streaming IN Interface - Alt0 */
static struct usb_interface_descriptor std_as_in_if0_desc = {
@@ -597,6 +605,7 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
(struct usb_descriptor_header *)&ss_epout_desc_comp,
(struct usb_descriptor_header *)&as_iso_out_desc,
(struct usb_descriptor_header *)&ss_epin_fback_desc,
+ (struct usb_descriptor_header *)&ss_epin_fback_desc_comp,
(struct usb_descriptor_header *)&std_as_in_if0_desc,
(struct usb_descriptor_header *)&std_as_in_if1_desc,
@@ -665,11 +674,17 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC))
+ if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC)) {
+ // Win10 requires max packet size + 1 frame
srate = srate * (1000 + uac2_opts->fb_max) / 1000;
-
- max_size_bw = num_channels(chmask) * ssize *
- DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
+ // updated srate is always bigger, therefore DIV_ROUND_UP always yields +1
+ max_size_bw = num_channels(chmask) * ssize *
+ (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))));
+ } else {
+ // adding 1 frame provision for Win10
+ max_size_bw = num_channels(chmask) * ssize *
+ (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))) + 1);
+ }
ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
max_size_ep));
@@ -705,6 +720,7 @@ static void setup_headers(struct f_uac2_opts *opts,
{
struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
+ struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
struct usb_endpoint_descriptor *epout_desc;
struct usb_endpoint_descriptor *epin_desc;
struct usb_endpoint_descriptor *epin_fback_desc;
@@ -730,6 +746,7 @@ static void setup_headers(struct f_uac2_opts *opts,
epout_desc_comp = &ss_epout_desc_comp;
epin_desc_comp = &ss_epin_desc_comp;
epin_fback_desc = &ss_epin_fback_desc;
+ epin_fback_desc_comp = &ss_epin_fback_desc_comp;
ep_int_desc = &ss_ep_int_desc;
}
@@ -773,8 +790,11 @@ static void setup_headers(struct f_uac2_opts *opts,
headers[i++] = USBDHDR(&as_iso_out_desc);
- if (EPOUT_FBACK_IN_EN(opts))
+ if (EPOUT_FBACK_IN_EN(opts)) {
headers[i++] = USBDHDR(epin_fback_desc);
+ if (epin_fback_desc_comp)
+ headers[i++] = USBDHDR(epin_fback_desc_comp);
+ }
}
if (EPIN_EN(opts)) {
@@ -1164,6 +1184,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize,
le16_to_cpu(ss_epout_desc.wMaxPacketSize));
+ ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize;
+ ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize;
+
// HS and SS endpoint addresses are copied from autoconfigured FS descriptors
hs_ep_int_desc.bEndpointAddress = fs_ep_int_desc.bEndpointAddress;
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 32ef22857083..ad16163b5ff8 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -96,11 +96,13 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
};
static void u_audio_set_fback_frequency(enum usb_device_speed speed,
+ struct usb_ep *out_ep,
unsigned long long freq,
unsigned int pitch,
void *buf)
{
u32 ff = 0;
+ const struct usb_endpoint_descriptor *ep_desc;
/*
* Because the pitch base is 1000000, the final divider here
@@ -128,8 +130,13 @@ static void u_audio_set_fback_frequency(enum usb_device_speed speed,
* byte fromat (that is Q16.16)
*
* ff = (freq << 16) / 8000
+ *
+ * Win10 and OSX UAC2 drivers require number of samples per packet
+ * in order to honor the feedback value.
+ * Linux snd-usb-audio detects the applied bit-shift automatically.
*/
- freq <<= 4;
+ ep_desc = out_ep->desc;
+ freq <<= 4 + (ep_desc->bInterval - 1);
}
ff = DIV_ROUND_CLOSEST_ULL((freq * pitch), 1953125);
@@ -267,7 +274,7 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
pr_debug("%s: iso_complete status(%d) %d/%d\n",
__func__, status, req->actual, req->length);
- u_audio_set_fback_frequency(audio_dev->gadget->speed,
+ u_audio_set_fback_frequency(audio_dev->gadget->speed, audio_dev->out_ep,
params->c_srate, prm->pitch,
req->buf);
@@ -526,7 +533,7 @@ int u_audio_start_capture(struct g_audio *audio_dev)
* be meauserd at start of playback
*/
prm->pitch = 1000000;
- u_audio_set_fback_frequency(audio_dev->gadget->speed,
+ u_audio_set_fback_frequency(audio_dev->gadget->speed, ep,
params->c_srate, prm->pitch,
req_fback->buf);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 65cae4883454..38e4d6b505a0 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1250,7 +1250,7 @@ static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
do {
tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
udelay(1);
- } while (tmp != CS_IDST || timeout-- > 0);
+ } while (tmp != CS_IDST && timeout-- > 0);
if (tmp == CS_IDST)
r8a66597_bset(r8a66597,
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 337b425dd4b0..2df52f75f6b3 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -406,12 +406,9 @@ static int bcma_hcd_probe(struct bcma_device *core)
return -ENOMEM;
usb_dev->core = core;
- if (core->dev.of_node) {
+ if (core->dev.of_node)
usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
GPIOD_OUT_HIGH);
- if (IS_ERR(usb_dev->gpio_desc))
- return PTR_ERR(usb_dev->gpio_desc);
- }
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6bdc6d6bf74d..1776c05d0a48 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -26,6 +26,7 @@
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
@@ -1278,29 +1279,39 @@ MODULE_LICENSE ("GPL");
#ifdef CONFIG_USB_EHCI_SH
#include "ehci-sh.c"
-#define PLATFORM_DRIVER ehci_hcd_sh_driver
#endif
#ifdef CONFIG_PPC_PS3
#include "ehci-ps3.c"
-#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
#endif
#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
#include "ehci-ppc-of.c"
-#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
#endif
#ifdef CONFIG_XPS_USB_HCD_XILINX
#include "ehci-xilinx-of.c"
-#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
#endif
#ifdef CONFIG_SPARC_LEON
#include "ehci-grlib.c"
-#define PLATFORM_DRIVER ehci_grlib_driver
#endif
+static struct platform_driver * const platform_drivers[] = {
+#ifdef CONFIG_USB_EHCI_SH
+ &ehci_hcd_sh_driver,
+#endif
+#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
+ &ehci_hcd_ppc_of_driver,
+#endif
+#ifdef CONFIG_XPS_USB_HCD_XILINX
+ &ehci_hcd_xilinx_of_driver,
+#endif
+#ifdef CONFIG_SPARC_LEON
+ &ehci_grlib_driver,
+#endif
+};
+
static int __init ehci_hcd_init(void)
{
int retval = 0;
@@ -1324,47 +1335,23 @@ static int __init ehci_hcd_init(void)
ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
#endif
-#ifdef PLATFORM_DRIVER
- retval = platform_driver_register(&PLATFORM_DRIVER);
+ retval = platform_register_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
if (retval < 0)
goto clean0;
-#endif
-
-#ifdef PS3_SYSTEM_BUS_DRIVER
- retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
- if (retval < 0)
- goto clean2;
-#endif
-#ifdef OF_PLATFORM_DRIVER
- retval = platform_driver_register(&OF_PLATFORM_DRIVER);
+#ifdef CONFIG_PPC_PS3
+ retval = ps3_ehci_driver_register(&ps3_ehci_driver);
if (retval < 0)
- goto clean3;
+ goto clean1;
#endif
-#ifdef XILINX_OF_PLATFORM_DRIVER
- retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
- if (retval < 0)
- goto clean4;
-#endif
- return retval;
+ return 0;
-#ifdef XILINX_OF_PLATFORM_DRIVER
- /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
-clean4:
-#endif
-#ifdef OF_PLATFORM_DRIVER
- platform_driver_unregister(&OF_PLATFORM_DRIVER);
-clean3:
-#endif
-#ifdef PS3_SYSTEM_BUS_DRIVER
- ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
-clean2:
+#ifdef CONFIG_PPC_PS3
+clean1:
#endif
-#ifdef PLATFORM_DRIVER
- platform_driver_unregister(&PLATFORM_DRIVER);
+ platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
clean0:
-#endif
#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
ehci_debug_root = NULL;
@@ -1376,18 +1363,10 @@ module_init(ehci_hcd_init);
static void __exit ehci_hcd_cleanup(void)
{
-#ifdef XILINX_OF_PLATFORM_DRIVER
- platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
-#endif
-#ifdef OF_PLATFORM_DRIVER
- platform_driver_unregister(&OF_PLATFORM_DRIVER);
-#endif
-#ifdef PLATFORM_DRIVER
- platform_driver_unregister(&PLATFORM_DRIVER);
-#endif
-#ifdef PS3_SYSTEM_BUS_DRIVER
- ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+#ifdef CONFIG_PPC_PS3
+ ps3_ehci_driver_unregister(&ps3_ehci_driver);
#endif
+ platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
#endif
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 0b3722770760..ded9738392e4 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -40,17 +40,6 @@
#include <mach/usb.h>
-/* OMAP-1510 OHCI has its own MMU for DMA */
-#define OMAP1510_LB_MEMSIZE 32 /* Should be same as SDRAM size */
-#define OMAP1510_LB_CLOCK_DIV 0xfffec10c
-#define OMAP1510_LB_MMU_CTL 0xfffec208
-#define OMAP1510_LB_MMU_LCK 0xfffec224
-#define OMAP1510_LB_MMU_LD_TLB 0xfffec228
-#define OMAP1510_LB_MMU_CAM_H 0xfffec22c
-#define OMAP1510_LB_MMU_CAM_L 0xfffec230
-#define OMAP1510_LB_MMU_RAM_H 0xfffec234
-#define OMAP1510_LB_MMU_RAM_L 0xfffec238
-
#define DRIVER_DESC "OHCI OMAP driver"
struct ohci_omap_priv {
@@ -104,61 +93,6 @@ static int omap_ohci_transceiver_power(struct ohci_omap_priv *priv, int on)
return 0;
}
-#ifdef CONFIG_ARCH_OMAP15XX
-/*
- * OMAP-1510 specific Local Bus clock on/off
- */
-static int omap_1510_local_bus_power(int on)
-{
- if (on) {
- omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
- udelay(200);
- } else {
- omap_writel(0, OMAP1510_LB_MMU_CTL);
- }
-
- return 0;
-}
-
-/*
- * OMAP-1510 specific Local Bus initialization
- * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
- * See also arch/mach-omap/memory.h for __virt_to_dma() and
- * __dma_to_virt() which need to match with the physical
- * Local Bus address below.
- */
-static int omap_1510_local_bus_init(void)
-{
- unsigned int tlb;
- unsigned long lbaddr, physaddr;
-
- omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
- OMAP1510_LB_CLOCK_DIV);
-
- /* Configure the Local Bus MMU table */
- for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
- lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
- physaddr = tlb * 0x00100000 + PHYS_OFFSET;
- omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
- omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
- OMAP1510_LB_MMU_CAM_L);
- omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
- omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
- omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
- omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
- }
-
- /* Enable the walking table */
- omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
- udelay(200);
-
- return 0;
-}
-#else
-#define omap_1510_local_bus_power(x) {}
-#define omap_1510_local_bus_init() {}
-#endif
-
#ifdef CONFIG_USB_OTG
static void start_hnp(struct ohci_hcd *ohci)
@@ -229,10 +163,8 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
omap_ohci_clock_power(priv, 1);
- if (cpu_is_omap15xx()) {
- omap_1510_local_bus_power(1);
- omap_1510_local_bus_init();
- }
+ if (config->lb_reset)
+ config->lb_reset();
ret = ohci_setup(hcd);
if (ret < 0)
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index 6e784f2fc26d..eb46e642e87a 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -408,40 +408,38 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
return -EBUSY;
xhci_dbc_tty_init_port(dbc, port);
- tty_dev = tty_port_register_device(&port->port,
- dbc_tty_driver, 0, NULL);
- if (IS_ERR(tty_dev)) {
- ret = PTR_ERR(tty_dev);
- goto register_fail;
- }
ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
if (ret)
- goto buf_alloc_fail;
+ goto err_exit_port;
ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
dbc_read_complete);
if (ret)
- goto request_fail;
+ goto err_free_fifo;
ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
dbc_write_complete);
if (ret)
- goto request_fail;
+ goto err_free_requests;
+
+ tty_dev = tty_port_register_device(&port->port,
+ dbc_tty_driver, 0, NULL);
+ if (IS_ERR(tty_dev)) {
+ ret = PTR_ERR(tty_dev);
+ goto err_free_requests;
+ }
port->registered = true;
return 0;
-request_fail:
+err_free_requests:
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->write_pool);
+err_free_fifo:
kfifo_free(&port->write_fifo);
-
-buf_alloc_fail:
- tty_unregister_device(dbc_tty_driver, 0);
-
-register_fail:
+err_exit_port:
xhci_dbc_tty_exit_port(port);
dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 2c9f25ca8edd..2484a9d38ce2 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -30,6 +30,7 @@
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 0x1100
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
#define PCI_VENDOR_ID_ETRON 0x1b6f
@@ -113,6 +114,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
(pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
+ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x0) {
@@ -279,8 +281,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
- if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_EP_CTX_BROKEN_DCS;
+ }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e676749f543b..311597bba80e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -366,16 +366,22 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
/* Must be called with xhci->lock held, releases and aquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
{
- u64 temp_64;
+ u32 temp_32;
int ret;
xhci_dbg(xhci, "Abort command ring\n");
reinit_completion(&xhci->cmd_ring_stop_completion);
- temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
- &xhci->op_regs->cmd_ring);
+ /*
+ * The control bits like command stop, abort are located in lower
+ * dword of the command ring control register. Limit the write
+ * to the lower dword to avoid corrupting the command ring pointer
+ * in case if the command ring is stopped by the time upper dword
+ * is written.
+ */
+ temp_32 = readl(&xhci->op_regs->cmd_ring);
+ writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
* completion of the Command Abort operation. If CRR is not negated in 5
@@ -559,8 +565,11 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
struct xhci_command *cmd;
struct xhci_segment *new_seg;
+ struct xhci_segment *halted_seg = NULL;
union xhci_trb *new_deq;
int new_cycle;
+ union xhci_trb *halted_trb;
+ int index = 0;
dma_addr_t addr;
u64 hw_dequeue;
bool cycle_found = false;
@@ -598,7 +607,27 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
- new_cycle = hw_dequeue & 0x1;
+
+ /*
+ * Quirk: xHC write-back of the DCS field in the hardware dequeue
+ * pointer is wrong - use the cycle state of the TRB pointed to by
+ * the dequeue pointer.
+ */
+ if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
+ !(ep->ep_state & EP_HAS_STREAMS))
+ halted_seg = trb_in_td(xhci, td->start_seg,
+ td->first_trb, td->last_trb,
+ hw_dequeue & ~0xf, false);
+ if (halted_seg) {
+ index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
+ sizeof(*halted_trb);
+ halted_trb = &halted_seg->trbs[index];
+ new_cycle = halted_trb->generic.field[3] & 0x1;
+ xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
+ (u8)(hw_dequeue & 0x1), index, new_cycle);
+ } else {
+ new_cycle = hw_dequeue & 0x1;
+ }
/*
* We want to find the pointer, segment and cycle state of the new trb
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 575fa89a783f..1bf494b649bd 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1787,7 +1787,6 @@ static int tegra_xusb_remove(struct platform_device *pdev)
return 0;
}
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
{
struct device *dev = hub->hcd->self.controller;
@@ -2102,7 +2101,7 @@ out:
return err;
}
-static int tegra_xusb_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
@@ -2144,7 +2143,7 @@ out:
return err;
}
-static int tegra_xusb_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
@@ -2174,10 +2173,8 @@ static int tegra_xusb_resume(struct device *dev)
return 0;
}
-#endif
-#ifdef CONFIG_PM
-static int tegra_xusb_runtime_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int ret;
@@ -2190,7 +2187,7 @@ static int tegra_xusb_runtime_suspend(struct device *dev)
return ret;
}
-static int tegra_xusb_runtime_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
int err;
@@ -2201,7 +2198,6 @@ static int tegra_xusb_runtime_resume(struct device *dev)
return err;
}
-#endif
static const struct dev_pm_ops tegra_xusb_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_xusb_runtime_suspend,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f3dabd02382c..541fe4dcc43a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -692,6 +692,7 @@ int xhci_run(struct usb_hcd *hcd)
if (ret)
xhci_free_command(xhci, command);
}
+ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
@@ -3213,10 +3214,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
return;
/* Bail out if toggle is already being cleared by a endpoint reset */
+ spin_lock_irqsave(&xhci->lock, flags);
if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
+ spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
+ spin_unlock_irqrestore(&xhci->lock, flags);
/* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
if (usb_endpoint_xfer_control(&host_ep->desc) ||
usb_endpoint_xfer_isoc(&host_ep->desc))
@@ -3302,8 +3306,10 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
xhci_free_command(xhci, cfg_cmd);
cleanup:
xhci_free_command(xhci, stop_cmd);
+ spin_lock_irqsave(&xhci->lock, flags);
if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
+ spin_unlock_irqrestore(&xhci->lock, flags);
}
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index dca6181c33fd..5a75fe563123 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1899,6 +1899,7 @@ struct xhci_hcd {
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
#define XHCI_BROKEN_D3COLD BIT_ULL(41)
+#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index ce9fc46c9266..b5935834f9d2 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -899,11 +899,13 @@ static int dsps_probe(struct platform_device *pdev)
if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
ret = dsps_setup_optional_vbus_irq(pdev, glue);
if (ret)
- goto err;
+ goto unregister_pdev;
}
return 0;
+unregister_pdev:
+ platform_device_unregister(glue->musb);
err:
pm_runtime_disable(&pdev->dev);
iounmap(glue->usbss_base);
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index c42937692207..c968ecda42aa 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -190,6 +190,7 @@ tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
}
if (len > 0) {
/* Write the rest 1 - 3 bytes to FIFO */
+ val = 0;
memcpy(&val, buf, len);
musb_writel(fifo, 0, val);
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 66a6ac50a4cd..189279869a8b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -233,6 +233,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+ { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */
{ USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
@@ -258,6 +259,7 @@ struct cp210x_serial_private {
speed_t max_speed;
bool use_actual_rate;
bool no_flow_control;
+ bool no_event_mode;
};
enum cp210x_event_state {
@@ -1113,12 +1115,16 @@ static void cp210x_change_speed(struct tty_struct *tty,
static void cp210x_enable_event_mode(struct usb_serial_port *port)
{
+ struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
int ret;
if (port_priv->event_mode)
return;
+ if (priv->no_event_mode)
+ return;
+
port_priv->event_state = ES_DATA;
port_priv->event_mode = true;
@@ -2074,6 +2080,33 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
priv->use_actual_rate = use_actual_rate;
}
+static void cp2102_determine_quirks(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return;
+ /*
+ * Some (possibly counterfeit) CP2102 do not support event-insertion
+ * mode and respond differently to malformed vendor requests.
+ * Specifically, they return one instead of two bytes when sent a
+ * two-byte part-number request.
+ */
+ ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT);
+ if (ret == 1) {
+ dev_dbg(&serial->interface->dev,
+ "device does not support event-insertion mode\n");
+ priv->no_event_mode = true;
+ }
+
+ kfree(buf);
+}
+
static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
@@ -2108,7 +2141,12 @@ static void cp210x_determine_type(struct usb_serial *serial)
return;
}
+ dev_dbg(&serial->interface->dev, "partnum = 0x%02x\n", priv->partnum);
+
switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2102:
+ cp2102_determine_quirks(serial);
+ break;
case CP210X_PARTNUM_CP2105:
case CP210X_PARTNUM_CP2108:
cp210x_get_fw_version(serial, CP210X_GET_FW_VER);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index d7fe33ca73e4..925067a7978d 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -107,7 +107,6 @@
#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
-#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
/* Interrupt Routine Defines */
@@ -186,7 +185,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
- { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 29c765cc8495..a484ff5e4ebf 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -246,11 +246,13 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
+#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026
#define CMOTECH_VENDOR_ID 0x16d8
@@ -1111,6 +1113,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
@@ -1128,6 +1133,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
@@ -1205,6 +1211,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
.driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff), /* Telit LN920 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1219,6 +1233,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1204, 0xff), /* Telit LE910Cx (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1650,7 +1666,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
@@ -2068,6 +2083,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
+ .driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 83da8236e3c8..c18bf8164bc2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
{DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
+ {DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index efa972be2ee3..c6b3fcf90180 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -416,9 +416,16 @@ UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
/*
- * Reported by Ondrej Zary <[email protected]>
+ * Reported by Ondrej Zary <[email protected]>
* The device reports one sector more and breaks when that sector is accessed
+ * Firmwares older than 2.6c (the latest one and the only that claims Linux
+ * support) have also broken tag handling
*/
+UNUSUAL_DEV( 0x04ce, 0x0002, 0x0000, 0x026b,
+ "ScanLogic",
+ "SL11R-IDE",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
"ScanLogic",
"SL11R-IDE",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index c35a6db993f1..4051c8cd0cd8 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -50,7 +50,7 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
"LaCie",
"Rugged USB3-FW",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_IGNORE_UAS),
+ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 9858716698df..c15eec9cc460 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -696,7 +696,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
tcpm_pd_receive(tcpci->port, &msg);
}
- if (status & TCPC_ALERT_EXTENDED_STATUS) {
+ if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
tcpm_vbus_change(tcpci->port);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index a4d37205df54..7f2f3ff1b391 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -4876,6 +4876,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
break;
case SRC_ATTACHED:
+ case SRC_STARTUP:
case SRC_SEND_CAPABILITIES:
case SRC_READY:
if (tcpm_port_is_disconnected(port) ||
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 21b3ae25c76d..ea4cc0a6e40c 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -625,10 +625,6 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- fwnode = device_get_named_child_node(&client->dev, "connector");
- if (!fwnode)
- return -ENODEV;
-
/*
* This fwnode has a "compatible" property, but is never populated as a
* struct device. Instead we simply parse it to read the properties.
@@ -636,7 +632,9 @@ static int tps6598x_probe(struct i2c_client *client)
* with existing DT files, we work around this by deleting any
* fwnode_links to/from this fwnode.
*/
- fw_devlink_purge_absent_suppliers(fwnode);
+ fwnode = device_get_named_child_node(&client->dev, "connector");
+ if (fwnode)
+ fw_devlink_purge_absent_suppliers(fwnode);
tps->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(tps->role_sw)) {
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 294ba05e6fc9..bd56de7484dc 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1714,6 +1714,9 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
+ if (!mvdev->actual_features)
+ return;
+
if (!is_index_valid(mvdev, idx))
return;
@@ -2145,6 +2148,8 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
for (i = 0; i < ndev->mvdev.max_vqs; i++)
ndev->vqs[i].ready = false;
+
+ ndev->mvdev.cvq.ready = false;
}
static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 29a38ecba19e..26e3d90d1e7c 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -665,13 +665,11 @@ static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset,
static int vduse_vdpa_reset(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
-
- if (vduse_dev_set_status(dev, 0))
- return -EIO;
+ int ret = vduse_dev_set_status(dev, 0);
vduse_dev_reset(dev);
- return 0;
+ return ret;
}
static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
@@ -1593,8 +1591,10 @@ static int vduse_init(void)
vduse_irq_wq = alloc_workqueue("vduse-irq",
WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);
- if (!vduse_irq_wq)
+ if (!vduse_irq_wq) {
+ ret = -ENOMEM;
goto err_wq;
+ }
ret = vduse_domain_init();
if (ret)
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 68198e0f2a63..a03b5a99c2da 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -565,7 +565,7 @@ static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
}
struct vfio_pci_walk_info {
- int (*fn)(struct pci_dev *, void *data);
+ int (*fn)(struct pci_dev *pdev, void *data);
void *data;
struct pci_dev *pdev;
bool slot;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index f41d081777f5..39039e046117 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -173,6 +173,10 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
return -EINVAL;
+ if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
+ for (i = 0; i < nvqs; i++)
+ vhost_vdpa_unsetup_vq_irq(v, i);
+
if (status == 0) {
ret = ops->reset(vdpa);
if (ret)
@@ -184,10 +188,6 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
for (i = 0; i < nvqs; i++)
vhost_vdpa_setup_vq_irq(v, i);
- if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
- for (i = 0; i < nvqs; i++)
- vhost_vdpa_unsetup_vq_irq(v, i);
-
return 0;
}
@@ -322,7 +322,7 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
struct eventfd_ctx *ctx;
cb.callback = vhost_vdpa_config_cb;
- cb.private = v->vdpa;
+ cb.private = v;
if (copy_from_user(&fd, argp, sizeof(fd)))
return -EFAULT;
@@ -640,7 +640,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
u64 offset, map_size, map_iova = iova;
struct vdpa_map_file *map_file;
struct vm_area_struct *vma;
- int ret;
+ int ret = 0;
mmap_read_lock(dev->mm);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index d33c5cd684c0..6ed5e608dd04 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -582,7 +582,9 @@ config FB_HP300
config FB_TGA
tristate "TGA/SFB+ framebuffer support"
- depends on FB && (ALPHA || TC)
+ depends on FB
+ depends on PCI || TC
+ depends on ALPHA || TC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2191,8 +2193,9 @@ config FB_HYPERV
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
config FB_SIMPLE
- bool "Simple framebuffer support"
- depends on (FB = y) && !DRM_SIMPLEDRM
+ tristate "Simple framebuffer support"
+ depends on FB
+ depends on !DRM_SIMPLEDRM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index c5b99a4861e8..6b4d5a7f3e15 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1267,7 +1267,7 @@ static struct platform_device *gbefb_device;
static int __init gbefb_init(void)
{
int ret = platform_driver_register(&gbefb_driver);
- if (!ret) {
+ if (IS_ENABLED(CONFIG_SGI_IP32) && !ret) {
gbefb_device = platform_device_alloc("gbefb", 0);
if (gbefb_device) {
ret = platform_device_add(gbefb_device);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 588e02fb91d3..236081afe9a2 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -239,6 +239,17 @@ static int virtio_dev_probe(struct device *_d)
driver_features_legacy = driver_features;
}
+ /*
+ * Some devices detect legacy solely via F_VERSION_1. Write
+ * F_VERSION_1 to force LE config space accesses before FEATURES_OK for
+ * these when needed.
+ */
+ if (drv->validate && !virtio_legacy_is_little_endian()
+ && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) {
+ dev->features = BIT_ULL(VIRTIO_F_VERSION_1);
+ dev->config->finalize_features(dev);
+ }
+
if (device_features & (1ULL << VIRTIO_F_VERSION_1))
dev->features = driver_features & device_features;
else
@@ -345,8 +356,13 @@ static int virtio_device_of_init(struct virtio_device *dev)
ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);
BUG_ON(ret >= sizeof(compat));
+ /*
+ * On powerpc/pseries virtio devices are PCI devices so PCI
+ * vendor/device ids play the role of the "compatible" property.
+ * Simply don't init of_node in this case.
+ */
if (!of_device_is_compatible(np, compat)) {
- ret = -EINVAL;
+ ret = 0;
goto out;
}
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b81fe4f7d434..bf59faeb3de1 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1666,7 +1666,7 @@ config WDT_MTX1
config SIBYTE_WDOG
tristate "Sibyte SoC hardware watchdog"
- depends on CPU_SB1 || (MIPS && COMPILE_TEST)
+ depends on CPU_SB1
help
Watchdog driver for the built in watchdog hardware in Sibyte
SoC processors. There are apparently two watchdog timers
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 5f1ce59b44b9..1b2c3aca6887 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -177,6 +177,7 @@ config XEN_GRANT_DMA_ALLOC
config SWIOTLB_XEN
def_bool y
+ depends on XEN_PV || ARM || ARM64
select DMA_OPS
select SWIOTLB
@@ -214,7 +215,7 @@ config XEN_PVCALLS_FRONTEND
implements them.
config XEN_PVCALLS_BACKEND
- bool "XEN PV Calls backend driver"
+ tristate "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND
help
Experimental backend for the Xen PV Calls protocol
@@ -240,7 +241,7 @@ config XEN_PRIVCMD
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
- depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+ depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
This ACPI processor uploads Power Management information to the Xen
@@ -258,7 +259,7 @@ config XEN_ACPI_PROCESSOR
config XEN_MCE_LOG
bool "Xen platform mcelog"
- depends on XEN_DOM0 && X86_MCE
+ depends on XEN_PV_DOM0 && X86_MCE
help
Allow kernel fetching MCE error from Xen platform and
converting it into Linux mcelog format for mcelog tools
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 671c71245a7b..3a50f097ed3e 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -43,6 +43,8 @@
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/pagemap.h>
@@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
/*
- * balloon_process() state:
+ * balloon_thread() state:
*
* BP_DONE: done or nothing to do,
* BP_WAIT: wait to be rescheduled,
@@ -130,6 +132,8 @@ enum bp_state {
BP_ECANCELED
};
+/* Main waiting point for xen-balloon thread. */
+static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
static DEFINE_MUTEX(balloon_mutex);
@@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
static LIST_HEAD(ballooned_pages);
static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
-/* Main work function, always executed in process context. */
-static void balloon_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
@@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
{
if (val == MEM_ONLINE)
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
return NOTIFY_OK;
}
@@ -491,18 +491,52 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
/*
- * As this is a work item it is guaranteed to run as a single instance only.
+ * Stop waiting if either state is BP_DONE and ballooning action is
+ * needed, or if the credit has changed while state is not BP_DONE.
+ */
+static bool balloon_thread_cond(enum bp_state state, long credit)
+{
+ if (state == BP_DONE)
+ credit = 0;
+
+ return current_credit() != credit || kthread_should_stop();
+}
+
+/*
+ * As this is a kthread it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
*/
-static void balloon_process(struct work_struct *work)
+static int balloon_thread(void *unused)
{
enum bp_state state = BP_DONE;
long credit;
+ unsigned long timeout;
+
+ set_freezable();
+ for (;;) {
+ switch (state) {
+ case BP_DONE:
+ case BP_ECANCELED:
+ timeout = 3600 * HZ;
+ break;
+ case BP_EAGAIN:
+ timeout = balloon_stats.schedule_delay * HZ;
+ break;
+ case BP_WAIT:
+ timeout = HZ;
+ break;
+ }
+
+ credit = current_credit();
+ wait_event_freezable_timeout(balloon_thread_wq,
+ balloon_thread_cond(state, credit), timeout);
+
+ if (kthread_should_stop())
+ return 0;
- do {
mutex_lock(&balloon_mutex);
credit = current_credit();
@@ -529,12 +563,7 @@ static void balloon_process(struct work_struct *work)
mutex_unlock(&balloon_mutex);
cond_resched();
-
- } while (credit && state == BP_DONE);
-
- /* Schedule more work if there is some still to be done. */
- if (state == BP_EAGAIN)
- schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
+ }
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
@@ -542,7 +571,7 @@ void balloon_set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
balloon_stats.target_pages = target;
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
}
EXPORT_SYMBOL_GPL(balloon_set_new_target);
@@ -647,7 +676,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
/* The balloon may be too large now. Shrink it if needed. */
if (current_credit())
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
mutex_unlock(&balloon_mutex);
}
@@ -679,6 +708,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
static int __init balloon_init(void)
{
+ struct task_struct *task;
+
if (!xen_domain())
return -ENODEV;
@@ -722,6 +753,12 @@ static int __init balloon_init(void)
}
#endif
+ task = kthread_run(balloon_thread, NULL, "xen-balloon");
+ if (IS_ERR(task)) {
+ pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
+ return PTR_ERR(task);
+ }
+
/* Init the xen-balloon driver. */
xen_balloon_init();
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 1e7f6b1c0c97..fec1b6537166 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -381,6 +381,14 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+ if (use_ptemod) {
+ if (map->kunmap_ops[offset+i].status)
+ err = -EINVAL;
+ pr_debug("kunmap handle=%u st=%d\n",
+ map->kunmap_ops[offset+i].handle,
+ map->kunmap_ops[offset+i].status);
+ map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+ }
}
return err;
}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 720a7b7abd46..3369734108af 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -257,7 +257,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
LIST_HEAD(pagelist);
struct mmap_gfn_state state;
- /* We only support privcmd_ioctl_mmap_batch for auto translated. */
+ /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
if (xen_feature(XENFEAT_auto_translated_physmap))
return -ENOSYS;
@@ -420,7 +420,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
int rc;
struct page **pages;
- pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
+ pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
if (pages == NULL)
return -ENOMEM;
@@ -428,7 +428,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
if (rc != 0) {
pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
numpgs, rc);
- kfree(pages);
+ kvfree(pages);
return -ENOMEM;
}
BUG_ON(vma->vm_private_data != NULL);
@@ -803,21 +803,21 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
unsigned int domid =
(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
DOMID_SELF : kdata.dom;
- int num;
+ int num, *errs = (int *)pfns;
+ BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
num = xen_remap_domain_mfn_array(vma,
kdata.addr & PAGE_MASK,
- pfns, kdata.num, (int *)pfns,
+ pfns, kdata.num, errs,
vma->vm_page_prot,
- domid,
- vma->vm_private_data);
+ domid);
if (num < 0)
rc = num;
else if (num != kdata.num) {
unsigned int i;
for (i = 0; i < num; i++) {
- rc = pfns[i];
+ rc = errs[i];
if (rc < 0)
break;
}
@@ -912,7 +912,7 @@ static void privcmd_close(struct vm_area_struct *vma)
else
pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
numpgs, rc);
- kfree(pages);
+ kvfree(pages);
}
static vm_fault_t privcmd_fault(struct vm_fault *vmf)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 643fe440c46e..e56a5faac395 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
- int i, rc;
- int dma_bits;
+ int rc;
+ unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+ unsigned int i, dma_bits = order + PAGE_SHIFT;
dma_addr_t dma_handle;
phys_addr_t p = virt_to_phys(buf);
- dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+ BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
+ BUG_ON(nslabs % IO_TLB_SEGSIZE);
i = 0;
do {
- int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
-
do {
rc = xen_create_contiguous_region(
- p + (i << IO_TLB_SHIFT),
- get_order(slabs << IO_TLB_SHIFT),
+ p + (i << IO_TLB_SHIFT), order,
dma_bits, &dma_handle);
} while (rc && dma_bits++ < MAX_DMA_BITS);
if (rc)
return rc;
- i += slabs;
+ i += IO_TLB_SEGSIZE;
} while (i < nslabs);
return 0;
}
@@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
return "";
}
-#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
-
-int __ref xen_swiotlb_init(void)
+int xen_swiotlb_init(void)
{
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned long bytes = swiotlb_size_or_default();
@@ -185,7 +182,7 @@ retry:
order--;
}
if (!start)
- goto error;
+ goto exit;
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
(PAGE_SIZE << order) >> 20);
@@ -208,15 +205,15 @@ retry:
swiotlb_set_max_segment(PAGE_SIZE);
return 0;
error:
- if (repeat--) {
+ if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
- nslabs = max(1024UL, (nslabs >> 1));
- pr_info("Lowering to %luMB\n",
- (nslabs << IO_TLB_SHIFT) >> 20);
+ nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
+ bytes = nslabs << IO_TLB_SHIFT;
+ pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
}
+exit:
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
- free_pages((unsigned long)start, order);
return rc;
}
@@ -233,10 +230,11 @@ retry:
/*
* Get IO TLB memory from any location.
*/
- start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
+ start = memblock_alloc(PAGE_ALIGN(bytes),
+ IO_TLB_SEGSIZE << IO_TLB_SHIFT);
if (!start)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
+ panic("%s: Failed to allocate %lu bytes\n",
+ __func__, PAGE_ALIGN(bytes));
/*
* And replace that memory with pages under 4GB.
@@ -244,9 +242,9 @@ retry:
rc = xen_swiotlb_fixup(start, nslabs);
if (rc) {
memblock_free(__pa(start), PAGE_ALIGN(bytes));
- if (repeat--) {
+ if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
- nslabs = max(1024UL, (nslabs >> 1));
+ nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
bytes = nslabs << IO_TLB_SHIFT;
pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
@@ -254,7 +252,7 @@ retry:
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
}
- if (swiotlb_init_with_tbl(start, nslabs, false))
+ if (swiotlb_init_with_tbl(start, nslabs, true))
panic("Cannot allocate SWIOTLB buffer");
swiotlb_set_max_segment(PAGE_SIZE);
}
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index eb2151fb6049..1769a44f4819 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -23,7 +23,7 @@ struct fscache_netfs v9fs_cache_netfs = {
.version = 0,
};
-/**
+/*
* v9fs_random_cachetag - Generate a random tag to be associated
* with a new cache session.
*
@@ -233,7 +233,7 @@ static void v9fs_vfs_readpage_complete(struct page *page, void *data,
unlock_page(page);
}
-/**
+/*
* __v9fs_readpage_from_fscache - read a page from cache
*
* Returns 0 if the pages are in cache and a BIO is submitted,
@@ -268,7 +268,7 @@ int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
}
}
-/**
+/*
* __v9fs_readpages_from_fscache - read multiple pages from cache
*
* Returns 0 if the pages are in cache and a BIO is submitted,
@@ -308,7 +308,7 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
}
}
-/**
+/*
* __v9fs_readpage_to_fscache - write a page to the cache
*
*/
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 9d9de62592be..b8863dd0de5c 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -19,18 +19,18 @@
#include "v9fs_vfs.h"
#include "fid.h"
+static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)
+{
+ hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata);
+}
+
+
/**
* v9fs_fid_add - add a fid to a dentry
* @dentry: dentry that the fid is being added to
* @fid: fid to add
*
*/
-
-static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)
-{
- hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata);
-}
-
void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
{
spin_lock(&dentry->d_lock);
@@ -67,7 +67,7 @@ static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
/**
* v9fs_open_fid_add - add an open fid to an inode
- * @dentry: inode that the fid is being added to
+ * @inode: inode that the fid is being added to
* @fid: fid to add
*
*/
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index cdb99507ef33..2e0fa7c932db 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -155,6 +155,7 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
/**
* v9fs_parse_options - parse mount options into session structure
* @v9ses: existing v9fs session information
+ * @opts: The mount option string
*
* Return 0 upon success, -ERRNO upon failure.
*/
@@ -542,12 +543,9 @@ extern int v9fs_error_init(void);
static struct kobject *v9fs_kobj;
#ifdef CONFIG_9P_FSCACHE
-/**
- * caches_show - list caches associated with a session
- *
- * Returns the size of buffer written.
+/*
+ * List caches associated with a session
*/
-
static ssize_t caches_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index cce9ace651a2..1c4f1b39cc95 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -30,8 +30,7 @@
/**
* v9fs_fid_readpage - read an entire page in from 9P
- *
- * @fid: fid being read
+ * @data: Opaque pointer to the fid being read
* @page: structure to page
*
*/
@@ -116,6 +115,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
/**
* v9fs_release_page - release the private state associated with a page
+ * @page: The page to be released
+ * @gfp: The caller's allocation restrictions
*
* Returns 1 if the page can be released, false otherwise.
*/
@@ -129,9 +130,9 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
/**
* v9fs_invalidate_page - Invalidate a page completely or partially
- *
- * @page: structure to page
- * @offset: offset in the page
+ * @page: The page to be invalidated
+ * @offset: offset of the invalidated region
+ * @length: length of the invalidated region
*/
static void v9fs_invalidate_page(struct page *page, unsigned int offset,
@@ -199,6 +200,8 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
/**
* v9fs_launder_page - Writeback a dirty page
+ * @page: The page to be cleaned up
+ *
* Returns 0 on success.
*/
@@ -219,6 +222,7 @@ static int v9fs_launder_page(struct page *page)
/**
* v9fs_direct_IO - 9P address space operation for direct I/O
* @iocb: target I/O control block
+ * @iter: The data/buffer to use
*
* The presence of v9fs_direct_IO() in the address space ops vector
* allowes open() O_DIRECT flags which would have failed otherwise.
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index aab5e6538660..246235ebdb70 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -359,14 +359,11 @@ out_err:
}
/**
- * v9fs_file_read - read from a file
- * @filp: file pointer to read
- * @udata: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
+ * v9fs_file_read_iter - read from a file
+ * @iocb: The operation parameters
+ * @to: The buffer to read into
*
*/
-
static ssize_t
v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
@@ -388,11 +385,9 @@ v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
/**
- * v9fs_file_write - write to a file
- * @filp: file pointer to write
- * @data: data buffer to write data from
- * @count: size of buffer
- * @offset: offset at which to write data
+ * v9fs_file_write_iter - write to a file
+ * @iocb: The operation parameters
+ * @from: The data to write
*
*/
static ssize_t
@@ -561,11 +556,9 @@ out_unlock:
}
/**
- * v9fs_mmap_file_read - read from a file
- * @filp: file pointer to read
- * @data: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
+ * v9fs_mmap_file_read_iter - read from a file
+ * @iocb: The operation parameters
+ * @to: The buffer to read into
*
*/
static ssize_t
@@ -576,11 +569,9 @@ v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
/**
- * v9fs_mmap_file_write - write to a file
- * @filp: file pointer to write
- * @data: data buffer to write data from
- * @count: size of buffer
- * @offset: offset at which to write data
+ * v9fs_mmap_file_write_iter - write to a file
+ * @iocb: The operation parameters
+ * @from: The data to write
*
*/
static ssize_t
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 795706520b5e..08f48b70a741 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -218,7 +218,7 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
/**
* v9fs_alloc_inode - helper function to allocate an inode
- *
+ * @sb: The superblock to allocate the inode from
*/
struct inode *v9fs_alloc_inode(struct super_block *sb)
{
@@ -238,7 +238,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
/**
* v9fs_free_inode - destroy an inode
- *
+ * @inode: The inode to be freed
*/
void v9fs_free_inode(struct inode *inode)
@@ -343,7 +343,7 @@ error:
* v9fs_get_inode - helper function to setup an inode
* @sb: superblock
* @mode: mode to setup inode with
- *
+ * @rdev: The device numbers to set
*/
struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
@@ -369,7 +369,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
}
/**
- * v9fs_clear_inode - release an inode
+ * v9fs_evict_inode - Remove an inode from the inode cache
* @inode: inode to release
*
*/
@@ -665,14 +665,15 @@ error:
/**
* v9fs_vfs_create - VFS hook to create a regular file
+ * @mnt_userns: The user namespace of the mount
+ * @dir: The parent directory
+ * @dentry: The name of file to be created
+ * @mode: The UNIX file mode to set
+ * @excl: True if the file must not yet exist
*
* open(.., O_CREAT) is handled in v9fs_vfs_atomic_open(). This is only called
* for mknod(2).
*
- * @dir: directory inode that is being created
- * @dentry: dentry that is being deleted
- * @mode: create permissions
- *
*/
static int
@@ -696,6 +697,7 @@ v9fs_vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
/**
* v9fs_vfs_mkdir - VFS mkdir hook to create a directory
+ * @mnt_userns: The user namespace of the mount
* @dir: inode that is being unlinked
* @dentry: dentry that is being unlinked
* @mode: mode for new directory
@@ -900,10 +902,12 @@ int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
/**
* v9fs_vfs_rename - VFS hook to rename an inode
+ * @mnt_userns: The user namespace of the mount
* @old_dir: old dir inode
* @old_dentry: old dentry
* @new_dir: new dir inode
* @new_dentry: new dentry
+ * @flags: RENAME_* flags
*
*/
@@ -1009,6 +1013,7 @@ done:
/**
* v9fs_vfs_getattr - retrieve file metadata
+ * @mnt_userns: The user namespace of the mount
* @path: Object to query
* @stat: metadata structure to populate
* @request_mask: Mask of STATX_xxx flags indicating the caller's interests
@@ -1050,6 +1055,7 @@ v9fs_vfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
/**
* v9fs_vfs_setattr - set file metadata
+ * @mnt_userns: The user namespace of the mount
* @dentry: file whose metadata to set
* @iattr: metadata assignment structure
*
@@ -1285,6 +1291,7 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
/**
* v9fs_vfs_symlink - helper function to create symlinks
+ * @mnt_userns: The user namespace of the mount
* @dir: directory inode containing symlink
* @dentry: dentry for symlink
* @symname: symlink data
@@ -1340,6 +1347,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
/**
* v9fs_vfs_mknod - create a special file
+ * @mnt_userns: The user namespace of the mount
* @dir: inode destination for new link
* @dentry: dentry for file
* @mode: mode for creation
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index e1c0240b51c0..01b9e1281a29 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -37,7 +37,10 @@ v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
struct dentry *dentry, umode_t omode, dev_t rdev);
/**
- * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
+ * v9fs_get_fsgid_for_create - Helper function to get the gid for a new object
+ * @dir_inode: The directory inode
+ *
+ * Helper function to get the gid for creating a
* new file system object. This checks the S_ISGID to determine the owning
* group of the new file system object.
*/
@@ -211,12 +214,13 @@ int v9fs_open_to_dotl_flags(int flags)
/**
* v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
+ * @mnt_userns: The user namespace of the mount
* @dir: directory inode that is being created
* @dentry: dentry that is being deleted
* @omode: create permissions
+ * @excl: True if the file must not yet exist
*
*/
-
static int
v9fs_vfs_create_dotl(struct user_namespace *mnt_userns, struct inode *dir,
struct dentry *dentry, umode_t omode, bool excl)
@@ -361,6 +365,7 @@ err_clunk_old_fid:
/**
* v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
+ * @mnt_userns: The user namespace of the mount
* @dir: inode that is being unlinked
* @dentry: dentry that is being unlinked
* @omode: mode for new directory
@@ -537,6 +542,7 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
/**
* v9fs_vfs_setattr_dotl - set file metadata
+ * @mnt_userns: The user namespace of the mount
* @dentry: file whose metadata to set
* @iattr: metadata assignment structure
*
@@ -816,6 +822,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
/**
* v9fs_vfs_mknod_dotl - create a special file
+ * @mnt_userns: The user namespace of the mount
* @dir: inode destination for new link
* @dentry: dentry for file
* @omode: mode for creation
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 7d9b23d981bf..1b4d5809808d 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -21,6 +21,37 @@
#include "internal.h"
/*
+ * Handle invalidation of an mmap'd file. We invalidate all the PTEs referring
+ * to the pages in this file's pagecache, forcing the kernel to go through
+ * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
+ * more fully.
+ */
+void afs_invalidate_mmap_work(struct work_struct *work)
+{
+ struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
+
+ unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
+}
+
+void afs_server_init_callback_work(struct work_struct *work)
+{
+ struct afs_server *server = container_of(work, struct afs_server, initcb_work);
+ struct afs_vnode *vnode;
+ struct afs_cell *cell = server->cell;
+
+ down_read(&cell->fs_open_mmaps_lock);
+
+ list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
+ if (vnode->cb_server == server) {
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ queue_work(system_unbound_wq, &vnode->cb_work);
+ }
+ }
+
+ up_read(&cell->fs_open_mmaps_lock);
+}
+
+/*
* Allow the fileserver to request callback state (re-)initialisation.
* Unfortunately, UUIDs are not guaranteed unique.
*/
@@ -29,8 +60,11 @@ void afs_init_callback_state(struct afs_server *server)
rcu_read_lock();
do {
server->cb_s_break++;
- server = rcu_dereference(server->uuid_next);
- } while (0);
+ atomic_inc(&server->cell->fs_s_break);
+ if (!list_empty(&server->cell->fs_open_mmaps))
+ queue_work(system_unbound_wq, &server->initcb_work);
+
+ } while ((server = rcu_dereference(server->uuid_next)));
rcu_read_unlock();
}
@@ -44,11 +78,17 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
vnode->cb_break++;
+ vnode->cb_v_break = vnode->volume->cb_v_break;
afs_clear_permits(vnode);
if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
afs_lock_may_be_available(vnode);
+ if (reason != afs_cb_break_for_deleted &&
+ vnode->status.type == AFS_FTYPE_FILE &&
+ atomic_read(&vnode->cb_nr_mmap))
+ queue_work(system_unbound_wq, &vnode->cb_work);
+
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
} else {
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 887b673f6223..d88407fb9bc0 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -166,6 +166,8 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
seqlock_init(&cell->fs_lock);
+ INIT_LIST_HEAD(&cell->fs_open_mmaps);
+ init_rwsem(&cell->fs_open_mmaps_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ac829e63c570..4579bbda4634 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1077,9 +1077,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
*/
static int afs_d_revalidate_rcu(struct dentry *dentry)
{
- struct afs_vnode *dvnode, *vnode;
+ struct afs_vnode *dvnode;
struct dentry *parent;
- struct inode *dir, *inode;
+ struct inode *dir;
long dir_version, de_version;
_enter("%p", dentry);
@@ -1109,18 +1109,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
return -ECHILD;
}
- /* Check to see if the vnode referred to by the dentry still
- * has a callback.
- */
- if (d_really_is_positive(dentry)) {
- inode = d_inode_rcu(dentry);
- if (inode) {
- vnode = AFS_FS_I(inode);
- if (!afs_check_validity(vnode))
- return -ECHILD;
- }
- }
-
return 1; /* Still valid */
}
@@ -1156,17 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (IS_ERR(key))
key = NULL;
- if (d_really_is_positive(dentry)) {
- inode = d_inode(dentry);
- if (inode) {
- vnode = AFS_FS_I(inode);
- afs_validate(vnode, key);
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- goto out_bad;
- }
- }
-
- /* lock down the parent dentry so we can peer at it */
+ /* Hold the parent dentry so we can peer at it */
parent = dget_parent(dentry);
dir = AFS_FS_I(d_inode(parent));
@@ -1175,7 +1153,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
_debug("%pd: parent dir deleted", dentry);
- goto out_bad_parent;
+ goto not_found;
}
/* We only need to invalidate a dentry if the server's copy changed
@@ -1201,12 +1179,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
case 0:
/* the filename maps to something */
if (d_really_is_negative(dentry))
- goto out_bad_parent;
+ goto not_found;
inode = d_inode(dentry);
if (is_bad_inode(inode)) {
printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
dentry);
- goto out_bad_parent;
+ goto not_found;
}
vnode = AFS_FS_I(inode);
@@ -1228,9 +1206,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
dentry, fid.unique,
vnode->fid.unique,
vnode->vfs_inode.i_generation);
- write_seqlock(&vnode->cb_lock);
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- write_sequnlock(&vnode->cb_lock);
goto not_found;
}
goto out_valid;
@@ -1245,7 +1220,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
default:
_debug("failed to iterate dir %pd: %d",
parent, ret);
- goto out_bad_parent;
+ goto not_found;
}
out_valid:
@@ -1256,16 +1231,9 @@ out_valid_noupdate:
_leave(" = 1 [valid]");
return 1;
- /* the dirent, if it exists, now points to a different vnode */
not_found:
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_NFSFS_RENAMED;
- spin_unlock(&dentry->d_lock);
-
-out_bad_parent:
_debug("dropping dentry %pd2", dentry);
dput(parent);
-out_bad:
key_put(key);
_leave(" = 0 [bad]");
@@ -1792,6 +1760,10 @@ static int afs_link(struct dentry *from, struct inode *dir,
goto error;
}
+ ret = afs_validate(vnode, op->key);
+ if (ret < 0)
+ goto error_op;
+
afs_op_set_vnode(op, 0, dvnode);
afs_op_set_vnode(op, 1, vnode);
op->file[0].dv_delta = 1;
@@ -1805,6 +1777,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
op->create.reason = afs_edit_dir_for_link;
return afs_do_sync_operation(op);
+error_op:
+ afs_put_operation(op);
error:
d_drop(dentry);
_leave(" = %d", ret);
@@ -1989,6 +1963,11 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (IS_ERR(op))
return PTR_ERR(op);
+ ret = afs_validate(vnode, op->key);
+ op->error = ret;
+ if (ret < 0)
+ goto error;
+
afs_op_set_vnode(op, 0, orig_dvnode);
afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
op->file[0].dv_delta = 1;
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index f4600c1353ad..540b9fc96824 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -263,7 +263,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
if (b == nr_blocks) {
_debug("init %u", b);
afs_edit_init_block(meta, block, b);
- i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
+ afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
/* Only lower dir pages have a counter in the header. */
@@ -296,7 +296,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
new_directory:
afs_edit_init_block(meta, meta, 0);
i_size = AFS_DIR_BLOCK_SIZE;
- i_size_write(&vnode->vfs_inode, i_size);
+ afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
page = page0;
block = meta;
diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
index dae9a57d7ec0..45cfd50a9521 100644
--- a/fs/afs/dir_silly.c
+++ b/fs/afs/dir_silly.c
@@ -86,8 +86,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
return afs_do_sync_operation(op);
}
-/**
- * afs_sillyrename - Perform a silly-rename of a dentry
+/*
+ * Perform silly-rename of a dentry.
*
* AFS is stateless and the server doesn't know when the client is holding a
* file open. To prevent application problems when a file is unlinked while
diff --git a/fs/afs/file.c b/fs/afs/file.c
index db035ae2a134..e6c447ae91f3 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -24,12 +24,16 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static void afs_readahead(struct readahead_control *ractl);
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+static void afs_vm_open(struct vm_area_struct *area);
+static void afs_vm_close(struct vm_area_struct *area);
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
const struct file_operations afs_file_operations = {
.open = afs_open,
.release = afs_release,
.llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = afs_file_read_iter,
.write_iter = afs_file_write,
.mmap = afs_file_mmap,
.splice_read = generic_file_splice_read,
@@ -59,8 +63,10 @@ const struct address_space_operations afs_fs_aops = {
};
static const struct vm_operations_struct afs_vm_ops = {
+ .open = afs_vm_open,
+ .close = afs_vm_close,
.fault = filemap_fault,
- .map_pages = filemap_map_pages,
+ .map_pages = afs_vm_map_pages,
.page_mkwrite = afs_page_mkwrite,
};
@@ -295,7 +301,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
fsreq->subreq = subreq;
fsreq->pos = subreq->start + subreq->transferred;
fsreq->len = subreq->len - subreq->transferred;
- fsreq->key = subreq->rreq->netfs_priv;
+ fsreq->key = key_get(subreq->rreq->netfs_priv);
fsreq->vnode = vnode;
fsreq->iter = &fsreq->def_iter;
@@ -304,6 +310,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
fsreq->pos, fsreq->len);
afs_fetch_data(fsreq->vnode, fsreq);
+ afs_put_read(fsreq);
}
static int afs_symlink_readpage(struct page *page)
@@ -490,15 +497,88 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
return 1;
}
+static void afs_add_open_mmap(struct afs_vnode *vnode)
+{
+ if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
+ down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+ list_add_tail(&vnode->cb_mmap_link,
+ &vnode->volume->cell->fs_open_mmaps);
+
+ up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ }
+}
+
+static void afs_drop_open_mmap(struct afs_vnode *vnode)
+{
+ if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+ return;
+
+ down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+ if (atomic_read(&vnode->cb_nr_mmap) == 0)
+ list_del_init(&vnode->cb_mmap_link);
+
+ up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ flush_work(&vnode->cb_work);
+}
+
/*
* Handle setting up a memory mapping on an AFS file.
*/
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
int ret;
+ afs_add_open_mmap(vnode);
+
ret = generic_file_mmap(file, vma);
if (ret == 0)
vma->vm_ops = &afs_vm_ops;
+ else
+ afs_drop_open_mmap(vnode);
return ret;
}
+
+static void afs_vm_open(struct vm_area_struct *vma)
+{
+ afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static void afs_vm_close(struct vm_area_struct *vma)
+{
+ afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
+{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
+ struct afs_file *af = vmf->vma->vm_file->private_data;
+
+ switch (afs_validate(vnode, af->key)) {
+ case 0:
+ return filemap_map_pages(vmf, start_pgoff, end_pgoff);
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EINTR:
+ case -ERESTARTSYS:
+ return VM_FAULT_RETRY;
+ case -ESTALE:
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct afs_file *af = iocb->ki_filp->private_data;
+ int ret;
+
+ ret = afs_validate(vnode, af->key);
+ if (ret < 0)
+ return ret;
+
+ return generic_file_read_iter(iocb, iter);
+}
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index e7e98ad63a91..c0031a3ab42f 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include "afs_fs.h"
#include "internal.h"
+#include "protocol_afs.h"
#include "protocol_yfs.h"
static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
@@ -102,7 +103,7 @@ void afs_fileserver_probe_result(struct afs_call *call)
struct afs_addr_list *alist = call->alist;
struct afs_server *server = call->server;
unsigned int index = call->addr_ix;
- unsigned int rtt_us = 0;
+ unsigned int rtt_us = 0, cap0;
int ret = call->error;
_enter("%pU,%u", &server->uuid, index);
@@ -159,6 +160,11 @@ responded:
clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id;
}
+ cap0 = ntohl(call->tmp);
+ if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
+ set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
+ else
+ clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
}
if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index dd3f45d906d2..4943413d9c5f 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -456,9 +456,7 @@ void afs_fs_fetch_data(struct afs_operation *op)
struct afs_read *req = op->fetch.req;
__be32 *bp;
- if (upper_32_bits(req->pos) ||
- upper_32_bits(req->len) ||
- upper_32_bits(req->pos + req->len))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_fetch_data64(op);
_enter("");
@@ -1113,9 +1111,7 @@ void afs_fs_store_data(struct afs_operation *op)
(unsigned long long)op->store.pos,
(unsigned long long)op->store.i_size);
- if (upper_32_bits(op->store.pos) ||
- upper_32_bits(op->store.size) ||
- upper_32_bits(op->store.i_size))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_store_data64(op);
call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
@@ -1229,7 +1225,7 @@ static void afs_fs_setattr_size(struct afs_operation *op)
key_serial(op->key), vp->fid.vid, vp->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
- if (upper_32_bits(attr->ia_size))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_setattr_size64(op);
call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
@@ -1657,20 +1653,33 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
return ret;
count = ntohl(call->tmp);
-
call->count = count;
call->count2 = count;
- afs_extract_discard(call, count * sizeof(__be32));
+ if (count == 0) {
+ call->unmarshall = 4;
+ call->tmp = 0;
+ break;
+ }
+
+ /* Extract the first word of the capabilities to call->tmp */
+ afs_extract_to_tmp(call);
call->unmarshall++;
fallthrough;
- /* Extract capabilities words */
case 2:
ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
- /* TODO: Examine capabilities */
+ afs_extract_discard(call, (count - 1) * sizeof(__be32));
+ call->unmarshall++;
+ fallthrough;
+
+ /* Extract remaining capabilities words */
+ case 3:
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
call->unmarshall++;
break;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 80b6c8d967d5..8fcffea2daf5 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -54,16 +54,6 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
}
/*
- * Set the file size and block count. Estimate the number of 512 bytes blocks
- * used, rounded up to nearest 1K for consistency with other AFS clients.
- */
-static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
-{
- i_size_write(&vnode->vfs_inode, size);
- vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
-}
-
-/*
* Initialise an inode from the vnode status.
*/
static int afs_inode_init_from_status(struct afs_operation *op,
@@ -587,22 +577,32 @@ static void afs_zap_data(struct afs_vnode *vnode)
}
/*
- * Get the server reinit counter for a vnode's current server.
+ * Check to see if we have a server currently serving this volume and that it
+ * hasn't been reinitialised or dropped from the list.
*/
-static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
+static bool afs_check_server_good(struct afs_vnode *vnode)
{
- struct afs_server_list *slist = rcu_dereference(vnode->volume->servers);
+ struct afs_server_list *slist;
struct afs_server *server;
+ bool good;
int i;
+ if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break))
+ return true;
+
+ rcu_read_lock();
+
+ slist = rcu_dereference(vnode->volume->servers);
for (i = 0; i < slist->nr_servers; i++) {
server = slist->servers[i].server;
if (server == vnode->cb_server) {
- *_s_break = READ_ONCE(server->cb_s_break);
- return true;
+ good = (vnode->cb_s_break == server->cb_s_break);
+ rcu_read_unlock();
+ return good;
}
}
+ rcu_read_unlock();
return false;
}
@@ -611,57 +611,46 @@ static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
*/
bool afs_check_validity(struct afs_vnode *vnode)
{
- struct afs_volume *volume = vnode->volume;
enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
time64_t now = ktime_get_real_seconds();
- bool valid;
- unsigned int cb_break, cb_s_break, cb_v_break;
+ unsigned int cb_break;
int seq = 0;
do {
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
- cb_v_break = READ_ONCE(volume->cb_v_break);
cb_break = vnode->cb_break;
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
- afs_get_s_break_rcu(vnode, &cb_s_break)) {
- if (vnode->cb_s_break != cb_s_break ||
- vnode->cb_v_break != cb_v_break) {
- vnode->cb_s_break = cb_s_break;
- vnode->cb_v_break = cb_v_break;
- need_clear = afs_cb_break_for_vsbreak;
- valid = false;
- } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+ if (vnode->cb_v_break != vnode->volume->cb_v_break)
+ need_clear = afs_cb_break_for_v_break;
+ else if (!afs_check_server_good(vnode))
+ need_clear = afs_cb_break_for_s_reinit;
+ else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
need_clear = afs_cb_break_for_zap;
- valid = false;
- } else if (vnode->cb_expires_at - 10 <= now) {
+ else if (vnode->cb_expires_at - 10 <= now)
need_clear = afs_cb_break_for_lapsed;
- valid = false;
- } else {
- valid = true;
- }
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- valid = true;
+ ;
} else {
- vnode->cb_v_break = cb_v_break;
- valid = false;
+ need_clear = afs_cb_break_no_promise;
}
} while (need_seqretry(&vnode->cb_lock, seq));
done_seqretry(&vnode->cb_lock, seq);
- if (need_clear != afs_cb_break_no_break) {
- write_seqlock(&vnode->cb_lock);
- if (cb_break == vnode->cb_break)
- __afs_break_callback(vnode, need_clear);
- else
- trace_afs_cb_miss(&vnode->fid, need_clear);
- write_sequnlock(&vnode->cb_lock);
- valid = false;
- }
+ if (need_clear == afs_cb_break_no_break)
+ return true;
- return valid;
+ write_seqlock(&vnode->cb_lock);
+ if (need_clear == afs_cb_break_no_promise)
+ vnode->cb_v_break = vnode->volume->cb_v_break;
+ else if (cb_break == vnode->cb_break)
+ __afs_break_callback(vnode, need_clear);
+ else
+ trace_afs_cb_miss(&vnode->fid, need_clear);
+ write_sequnlock(&vnode->cb_lock);
+ return false;
}
/*
@@ -675,21 +664,20 @@ bool afs_check_validity(struct afs_vnode *vnode)
*/
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
- bool valid;
int ret;
_enter("{v={%llx:%llu} fl=%lx},%x",
vnode->fid.vid, vnode->fid.vnode, vnode->flags,
key_serial(key));
- rcu_read_lock();
- valid = afs_check_validity(vnode);
- rcu_read_unlock();
-
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- clear_nlink(&vnode->vfs_inode);
+ if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
+ if (vnode->vfs_inode.i_nlink)
+ clear_nlink(&vnode->vfs_inode);
+ goto valid;
+ }
- if (valid)
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
+ afs_check_validity(vnode))
goto valid;
down_write(&vnode->validate_lock);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5ed416f4ff33..0ad97a8fc0d4 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -390,6 +390,9 @@ struct afs_cell {
/* Active fileserver interaction state. */
struct rb_root fs_servers; /* afs_server (by server UUID) */
seqlock_t fs_lock; /* For fs_servers */
+ struct rw_semaphore fs_open_mmaps_lock;
+ struct list_head fs_open_mmaps; /* List of vnodes that are mmapped */
+ atomic_t fs_s_break; /* Counter of CB.InitCallBackState messages */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@@ -503,6 +506,7 @@ struct afs_server {
struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
struct hlist_node proc_link; /* Link in net->fs_proc */
+ struct work_struct initcb_work; /* Work for CB.InitCallBackState* */
struct afs_server *gc_next; /* Next server in manager's list */
time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
@@ -516,6 +520,7 @@ struct afs_server {
#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
+#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */
atomic_t ref; /* Object refcount */
atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
@@ -657,7 +662,11 @@ struct afs_vnode {
afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
+ struct work_struct cb_work; /* Work for mmap'd files */
+ struct list_head cb_mmap_link; /* Link in cell->fs_open_mmaps */
void *cb_server; /* Server with callback/filelock */
+ atomic_t cb_nr_mmap; /* Number of mmaps */
+ unsigned int cb_fs_s_break; /* Mass server break counter (cell->fs_s_break) */
unsigned int cb_s_break; /* Mass break counter on ->server */
unsigned int cb_v_break; /* Mass break counter on ->volume */
unsigned int cb_break; /* Break counter on vnode */
@@ -965,6 +974,8 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
/*
* callback.c
*/
+extern void afs_invalidate_mmap_work(struct work_struct *);
+extern void afs_server_init_callback_work(struct work_struct *work);
extern void afs_init_callback_state(struct afs_server *);
extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
@@ -1586,6 +1597,16 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
}
/*
+ * Set the file size and block count. Estimate the number of 512 bytes blocks
+ * used, rounded up to nearest 1K for consistency with other AFS clients.
+ */
+static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
+{
+ i_size_write(&vnode->vfs_inode, size);
+ vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+}
+
+/*
* Check for a conflicting operation on a directory that we just unlinked from.
* If someone managed to sneak a link or an unlink in on the file we just
* unlinked, we won't be able to trust nlink on an AFS file (but not YFS).
diff --git a/fs/afs/protocol_afs.h b/fs/afs/protocol_afs.h
new file mode 100644
index 000000000000..0c39358c8b70
--- /dev/null
+++ b/fs/afs/protocol_afs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* AFS protocol bits
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells ([email protected])
+ */
+
+
+#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */
+
+/* AFS3 Fileserver capabilities word 0 */
+#define AFS3_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Uses UAE errors */
+#define AFS3_VICED_CAPABILITY_64BITFILES 0x0002 /* FetchData64 & StoreData64 supported */
+#define AFS3_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
+#define AFS3_VICED_CAPABILITY_SANEACLS 0x0008 /* ACLs reviewed for sanity - don't use */
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index b5bd03b1d3c7..e4cd89c44c46 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -168,3 +168,9 @@ enum yfs_lock_type {
yfs_LockMandatoryWrite = 0x101,
yfs_LockMandatoryExtend = 0x102,
};
+
+/* RXYFS Viced Capability Flags */
+#define YFS_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_64BITFILES 0x0002 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
+#define YFS_VICED_CAPABILITY_SANEACLS 0x0008 /* Deprecated v0.195 */
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index d83f13c44b92..79e1a5f6701b 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -374,6 +374,7 @@ selected_server:
if (vnode->cb_server != server) {
vnode->cb_server = server;
vnode->cb_s_break = server->cb_s_break;
+ vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break);
vnode->cb_v_break = vnode->volume->cb_v_break;
clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 684a2b02b9ff..6e5b9a19b234 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -235,6 +235,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
server->addr_version = alist->version;
server->uuid = *uuid;
rwlock_init(&server->fs_lock);
+ INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
init_waitqueue_head(&server->probe_wq);
INIT_LIST_HEAD(&server->probe_link);
spin_lock_init(&server->probe_lock);
@@ -467,6 +468,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
afs_give_up_callbacks(net, server);
+ flush_work(&server->initcb_work);
afs_put_server(net, server, afs_server_trace_destroy);
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index e38bb1e7a4d2..d110def8aa8e 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -698,6 +698,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
vnode->lock_state = AFS_VNODE_LOCK_NONE;
init_rwsem(&vnode->rmdir_lock);
+ INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
_leave(" = %p", &vnode->vfs_inode);
return &vnode->vfs_inode;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c0534697268e..f24370f5c774 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -137,7 +137,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
write_seqlock(&vnode->cb_lock);
i_size = i_size_read(&vnode->vfs_inode);
if (maybe_i_size > i_size)
- i_size_write(&vnode->vfs_inode, maybe_i_size);
+ afs_set_i_size(vnode, maybe_i_size);
write_sequnlock(&vnode->cb_lock);
}
@@ -471,13 +471,18 @@ static void afs_extend_writeback(struct address_space *mapping,
}
/* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas)))
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
break;
+ }
- if (!trylock_page(page))
+ if (!trylock_page(page)) {
+ put_page(page);
break;
+ }
if (!PageDirty(page) || PageWriteback(page)) {
unlock_page(page);
+ put_page(page);
break;
}
@@ -487,6 +492,7 @@ static void afs_extend_writeback(struct address_space *mapping,
t = afs_page_dirty_to(page, priv);
if (f != 0 && !new_content) {
unlock_page(page);
+ put_page(page);
break;
}
@@ -801,6 +807,7 @@ int afs_writepages(struct address_space *mapping,
ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct afs_file *af = iocb->ki_filp->private_data;
ssize_t result;
size_t count = iov_iter_count(from);
@@ -816,6 +823,10 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
if (!count)
return 0;
+ result = afs_validate(vnode, af->key);
+ if (result < 0)
+ return result;
+
result = generic_file_write_iter(iocb, from);
_leave(" = %zd", result);
@@ -829,13 +840,18 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
*/
int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file_inode(file);
- struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_file *af = file->private_data;
+ int ret;
_enter("{%llx:%llu},{n=%pD},%d",
vnode->fid.vid, vnode->fid.vnode, file,
datasync);
+ ret = afs_validate(vnode, af->key);
+ if (ret < 0)
+ return ret;
+
return file_write_and_wait_range(file, start, end);
}
@@ -849,11 +865,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_file *af = file->private_data;
unsigned long priv;
vm_fault_t ret = VM_FAULT_RETRY;
_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
+ afs_validate(vnode, af->key);
+
sb_start_pagefault(inode->i_sb);
/* Wait for the page to be written to the cache before we allow it to
@@ -955,8 +974,7 @@ int afs_launder_page(struct page *page)
iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
- ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE,
- true);
+ ret = afs_store_data(vnode, &iter, page_offset(page) + f, true);
}
trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 69d900a8473d..a813b70f594e 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -630,7 +630,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
vaddr = eppnt->p_vaddr;
if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
- elf_type |= MAP_FIXED_NOREPLACE;
+ elf_type |= MAP_FIXED;
else if (no_base && interp_elf_ex->e_type == ET_DYN)
load_addr = -vaddr;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index dff2c8a3e059..c0cebcf745ce 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3030,7 +3030,7 @@ struct btrfs_dir_item *
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
- u64 objectid, const char *name, int name_len,
+ u64 index, const char *name, int name_len,
int mod);
struct btrfs_dir_item *
btrfs_search_dir_index_item(struct btrfs_root *root,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index f1274d5c3805..7721ce0c0604 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -190,9 +190,20 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
}
/*
- * lookup a directory item based on name. 'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory item by name.
+ *
+ * @trans: The transaction handle to use. Can be NULL if @mod is 0.
+ * @root: The root of the target tree.
+ * @path: Path to use for the search.
+ * @dir: The inode number (objectid) of the directory.
+ * @name: The name associated to the directory entry we are looking for.
+ * @name_len: The length of the name.
+ * @mod: Used to indicate if the tree search is meant for a read only
+ * lookup, for a modification lookup or for a deletion lookup, so
+ * its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir item does not exists, an error pointer if an error
+ * happened, or a pointer to a dir item if a dir item exists for the given name.
*/
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -273,27 +284,42 @@ out:
}
/*
- * lookup a directory item based on index. 'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory index item by name and index number.
*
- * The name is used to make sure the index really points to the name you were
- * looking for.
+ * @trans: The transaction handle to use. Can be NULL if @mod is 0.
+ * @root: The root of the target tree.
+ * @path: Path to use for the search.
+ * @dir: The inode number (objectid) of the directory.
+ * @index: The index number.
+ * @name: The name associated to the directory entry we are looking for.
+ * @name_len: The length of the name.
+ * @mod: Used to indicate if the tree search is meant for a read only
+ * lookup, for a modification lookup or for a deletion lookup, so
+ * its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir index item does not exists, an error pointer if an
+ * error happened, or a pointer to a dir item if the dir index item exists and
+ * matches the criteria (name and index number).
*/
struct btrfs_dir_item *
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
- u64 objectid, const char *name, int name_len,
+ u64 index, const char *name, int name_len,
int mod)
{
+ struct btrfs_dir_item *di;
struct btrfs_key key;
key.objectid = dir;
key.type = BTRFS_DIR_INDEX_KEY;
- key.offset = objectid;
+ key.offset = index;
- return btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+ di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+ if (di == ERR_PTR(-ENOENT))
+ return NULL;
+
+ return di;
}
struct btrfs_dir_item *
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index fc3da7585fb7..0ab456cb4bf8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4859,6 +4859,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
out_free_delayed:
btrfs_free_delayed_extent_op(extent_op);
out_free_buf:
+ btrfs_tree_unlock(buf);
free_extent_buffer(buf);
out_free_reserved:
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 2673c6ba7a4e..0b9401a5afd3 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -665,7 +665,18 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
if (!ordered) {
ordered = btrfs_lookup_ordered_extent(inode, offset);
- BUG_ON(!ordered); /* Logic error */
+ /*
+ * The bio range is not covered by any ordered extent,
+ * must be a code logic error.
+ */
+ if (unlikely(!ordered)) {
+ WARN(1, KERN_WARNING
+ "no ordered extent for root %llu ino %llu offset %llu\n",
+ inode->root->root_key.objectid,
+ btrfs_ino(inode), offset);
+ kvfree(sums);
+ return BLK_STS_IOERR;
+ }
}
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 7ff577005d0f..a1762363f61f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -734,8 +734,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
if (args->start >= inode->disk_i_size && !args->replace_extent)
modify_tree = 0;
- update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
- root == fs_info->tree_root);
+ update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
while (1) {
recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino,
@@ -2704,14 +2703,16 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
drop_args.bytes_found);
if (ret != -ENOSPC) {
/*
- * When cloning we want to avoid transaction aborts when
- * nothing was done and we are attempting to clone parts
- * of inline extents, in such cases -EOPNOTSUPP is
- * returned by __btrfs_drop_extents() without having
- * changed anything in the file.
+ * The only time we don't want to abort is if we are
+ * attempting to clone a partial inline extent, in which
+ * case we'll get EOPNOTSUPP. However if we aren't
+ * clone we need to abort no matter what, because if we
+ * got EOPNOTSUPP via prealloc then we messed up and
+ * need to abort.
*/
- if (extent_info && !extent_info->is_new_extent &&
- ret && ret != -EOPNOTSUPP)
+ if (ret &&
+ (ret != -EOPNOTSUPP ||
+ (extent_info && extent_info->is_new_extent)))
btrfs_abort_transaction(trans, ret);
break;
}
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 5ada02e0e629..aa5be0b24987 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -414,9 +414,10 @@ static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
{
lockdep_assert_held(&info->lock);
- btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
+ /* The free space could be negative in case of overcommit */
+ btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
info->flags,
- info->total_bytes - btrfs_space_info_used(info, true),
+ (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
info->full ? "" : "not ");
btrfs_info(fs_info,
"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f7efc26aa82a..b415c5ec03ea 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -939,9 +939,11 @@ out:
}
/*
- * helper function to see if a given name and sequence number found
- * in an inode back reference are already in a directory and correctly
- * point to this inode
+ * See if a given name and sequence number found in an inode back reference are
+ * already in a directory and correctly point to this inode.
+ *
+ * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
+ * exists.
*/
static noinline int inode_in_dir(struct btrfs_root *root,
struct btrfs_path *path,
@@ -950,29 +952,34 @@ static noinline int inode_in_dir(struct btrfs_root *root,
{
struct btrfs_dir_item *di;
struct btrfs_key location;
- int match = 0;
+ int ret = 0;
di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
index, name, name_len, 0);
- if (di && !IS_ERR(di)) {
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ } else if (di) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
if (location.objectid != objectid)
goto out;
- } else
+ } else {
goto out;
- btrfs_release_path(path);
+ }
+ btrfs_release_path(path);
di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
- if (di && !IS_ERR(di)) {
- btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
- if (location.objectid != objectid)
- goto out;
- } else
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
goto out;
- match = 1;
+ } else if (di) {
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
+ if (location.objectid == objectid)
+ ret = 1;
+ }
out:
btrfs_release_path(path);
- return match;
+ return ret;
}
/*
@@ -1182,7 +1189,9 @@ next:
/* look for a conflicting sequence number */
di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
ref_index, name, namelen, 0);
- if (di && !IS_ERR(di)) {
+ if (IS_ERR(di)) {
+ return PTR_ERR(di);
+ } else if (di) {
ret = drop_one_dir_item(trans, root, path, dir, di);
if (ret)
return ret;
@@ -1192,7 +1201,9 @@ next:
/* look for a conflicting name */
di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
name, namelen, 0);
- if (di && !IS_ERR(di)) {
+ if (IS_ERR(di)) {
+ return PTR_ERR(di);
+ } else if (di) {
ret = drop_one_dir_item(trans, root, path, dir, di);
if (ret)
return ret;
@@ -1517,10 +1528,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- /* if we already have a perfect match, we're done */
- if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
- btrfs_ino(BTRFS_I(inode)), ref_index,
- name, namelen)) {
+ ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
+ btrfs_ino(BTRFS_I(inode)), ref_index,
+ name, namelen);
+ if (ret < 0) {
+ goto out;
+ } else if (ret == 0) {
/*
* look for a conflicting back reference in the
* metadata. if we find one we have to unlink that name
@@ -1580,6 +1593,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
}
+ /* Else, ret == 1, we already have a perfect match, we're done. */
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
kfree(name);
@@ -1936,8 +1950,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
struct btrfs_key log_key;
struct inode *dir;
u8 log_type;
- int exists;
- int ret = 0;
+ bool exists;
+ int ret;
bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
bool name_added = false;
@@ -1957,12 +1971,12 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
name_len);
btrfs_dir_item_key_to_cpu(eb, di, &log_key);
- exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
- if (exists == 0)
- exists = 1;
- else
- exists = 0;
+ ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
btrfs_release_path(path);
+ if (ret < 0)
+ goto out;
+ exists = (ret == 0);
+ ret = 0;
if (key->type == BTRFS_DIR_ITEM_KEY) {
dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
@@ -1977,7 +1991,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
ret = -EINVAL;
goto out;
}
- if (IS_ERR_OR_NULL(dst_di)) {
+
+ if (IS_ERR(dst_di)) {
+ ret = PTR_ERR(dst_di);
+ goto out;
+ } else if (!dst_di) {
/* we need a sequence number to insert, so we only
* do inserts for the BTRFS_DIR_INDEX_KEY types
*/
@@ -2281,7 +2299,7 @@ again:
dir_key->offset,
name, name_len, 0);
}
- if (!log_di || log_di == ERR_PTR(-ENOENT)) {
+ if (!log_di) {
btrfs_dir_item_key_to_cpu(eb, di, &location);
btrfs_release_path(path);
btrfs_release_path(log_path);
@@ -3540,8 +3558,7 @@ out_unlock:
if (err == -ENOSPC) {
btrfs_set_log_full_commit(trans);
err = 0;
- } else if (err < 0 && err != -ENOENT) {
- /* ENOENT can be returned if the entry hasn't been fsynced yet */
+ } else if (err < 0) {
btrfs_abort_transaction(trans, err);
}
diff --git a/fs/btrfs/verity.c b/fs/btrfs/verity.c
index 28d443d3ef93..4968535dfff0 100644
--- a/fs/btrfs/verity.c
+++ b/fs/btrfs/verity.c
@@ -451,7 +451,7 @@ static int del_orphan(struct btrfs_trans_handle *trans, struct btrfs_inode *inod
*/
static int rollback_verity(struct btrfs_inode *inode)
{
- struct btrfs_trans_handle *trans;
+ struct btrfs_trans_handle *trans = NULL;
struct btrfs_root *root = inode->root;
int ret;
@@ -473,6 +473,7 @@ static int rollback_verity(struct btrfs_inode *inode)
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ trans = NULL;
btrfs_handle_fs_error(root->fs_info, ret,
"failed to start transaction in verity rollback %llu",
(u64)inode->vfs_inode.i_ino);
@@ -490,8 +491,9 @@ static int rollback_verity(struct btrfs_inode *inode)
btrfs_abort_transaction(trans, ret);
goto out;
}
- btrfs_end_transaction(trans);
out:
+ if (trans)
+ btrfs_end_transaction(trans);
return ret;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 464485aa7318..2ec3b8ac8fa3 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1137,6 +1137,19 @@ static void btrfs_close_one_device(struct btrfs_device *device)
atomic_set(&device->dev_stats_ccnt, 0);
extent_io_tree_release(&device->alloc_state);
+ /*
+ * Reset the flush error record. We might have a transient flush error
+ * in this mount, and if so we aborted the current transaction and set
+ * the fs to an error state, guaranteeing no super blocks can be further
+ * committed. However that error might be transient and if we unmount the
+ * filesystem and mount it again, we should allow the mount to succeed
+ * (btrfs_check_rw_degradable() should not fail) - if after mounting the
+ * filesystem again we still get flush errors, then we will again abort
+ * any transaction and set the error state, guaranteeing no commits of
+ * unsafe super blocks.
+ */
+ device->last_flush_error = 0;
+
/* Verify the device is back in a pristine state */
ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
diff --git a/fs/buffer.c b/fs/buffer.c
index ab7573d72dd7..c615387aedca 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1425,12 +1425,16 @@ void invalidate_bh_lrus(void)
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
-void invalidate_bh_lrus_cpu(int cpu)
+/*
+ * It's called from workqueue context so we need a bh_lru_lock to close
+ * the race with preemption/irq.
+ */
+void invalidate_bh_lrus_cpu(void)
{
struct bh_lru *b;
bh_lru_lock();
- b = per_cpu_ptr(&bh_lrus, cpu);
+ b = this_cpu_ptr(&bh_lrus);
__invalidate_bh_lrus(b);
bh_lru_unlock();
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 6c0e52fd0743..8f537f1d9d1d 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2263,7 +2263,7 @@ retry:
list_for_each_entry(req, &ci->i_unsafe_dirops,
r_unsafe_dir_item) {
s = req->r_session;
- if (unlikely(s->s_mds > max)) {
+ if (unlikely(s->s_mds >= max)) {
spin_unlock(&ci->i_unsafe_lock);
goto retry;
}
@@ -2277,7 +2277,7 @@ retry:
list_for_each_entry(req, &ci->i_unsafe_iops,
r_unsafe_target_item) {
s = req->r_session;
- if (unlikely(s->s_mds > max)) {
+ if (unlikely(s->s_mds >= max)) {
spin_unlock(&ci->i_unsafe_lock);
goto retry;
}
@@ -2330,7 +2330,6 @@ retry:
int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 flush_tid;
@@ -2365,14 +2364,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (err < 0)
ret = err;
- if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
- spin_lock(&file->f_lock);
- err = errseq_check_and_advance(&ci->i_meta_err,
- &fi->meta_err);
- spin_unlock(&file->f_lock);
- if (err < 0)
- ret = err;
- }
+ err = file_check_and_advance_wb_err(file);
+ if (err < 0)
+ ret = err;
out:
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
return ret;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index d16fd2d5fd42..e61018d9764e 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -233,7 +233,6 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
spin_lock_init(&fi->rw_contexts_lock);
INIT_LIST_HEAD(&fi->rw_contexts);
- fi->meta_err = errseq_sample(&ci->i_meta_err);
fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
return 0;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 2df1e1284451..1c7574105478 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -541,8 +541,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ceph_fscache_inode_init(ci);
- ci->i_meta_err = 0;
-
return &ci->vfs_inode;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 7cad180d6deb..d64413adc0fd 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1493,7 +1493,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
{
struct ceph_mds_request *req;
struct rb_node *p;
- struct ceph_inode_info *ci;
dout("cleanup_session_requests mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
@@ -1502,16 +1501,10 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_request, r_unsafe_item);
pr_warn_ratelimited(" dropping unsafe request %llu\n",
req->r_tid);
- if (req->r_target_inode) {
- /* dropping unsafe change of inode's attributes */
- ci = ceph_inode(req->r_target_inode);
- errseq_set(&ci->i_meta_err, -EIO);
- }
- if (req->r_unsafe_dir) {
- /* dropping unsafe directory operation */
- ci = ceph_inode(req->r_unsafe_dir);
- errseq_set(&ci->i_meta_err, -EIO);
- }
+ if (req->r_target_inode)
+ mapping_set_error(req->r_target_inode->i_mapping, -EIO);
+ if (req->r_unsafe_dir)
+ mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
__unregister_request(mdsc, req);
}
/* zero r_attempts, so kick_requests() will re-send requests */
@@ -1678,7 +1671,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
spin_unlock(&mdsc->cap_dirty_lock);
if (dirty_dropped) {
- errseq_set(&ci->i_meta_err, -EIO);
+ mapping_set_error(inode->i_mapping, -EIO);
if (ci->i_wrbuffer_ref_head == 0 &&
ci->i_wr_ref == 0 &&
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 9b1b7f4cfdd4..fd8742bae847 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -1002,16 +1002,16 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
struct ceph_fs_client *new = fc->s_fs_info;
struct ceph_mount_options *fsopt = new->mount_options;
struct ceph_options *opt = new->client->options;
- struct ceph_fs_client *other = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
dout("ceph_compare_super %p\n", sb);
- if (compare_mount_options(fsopt, opt, other)) {
+ if (compare_mount_options(fsopt, opt, fsc)) {
dout("monitor(s)/mount options don't match\n");
return 0;
}
if ((opt->flags & CEPH_OPT_FSID) &&
- ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
+ ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
dout("fsid doesn't match\n");
return 0;
}
@@ -1019,6 +1019,17 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
dout("flags differ\n");
return 0;
}
+
+ if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
+ dout("client is blocklisted (and CLEANRECOVER is not set)\n");
+ return 0;
+ }
+
+ if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
+ dout("client has been forcibly unmounted\n");
+ return 0;
+ }
+
return 1;
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index a40eb14c282a..14f951cd5b61 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -429,8 +429,6 @@ struct ceph_inode_info {
#ifdef CONFIG_CEPH_FSCACHE
struct fscache_cookie *fscache;
#endif
- errseq_t i_meta_err;
-
struct inode vfs_inode; /* at end */
};
@@ -774,7 +772,6 @@ struct ceph_file_info {
spinlock_t rw_contexts_lock;
struct list_head rw_contexts;
- errseq_t meta_err;
u32 filp_gen;
atomic_t num_locks;
};
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index 8a3b30ec860c..8be57aaedab6 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cache.c - CIFS filesystem cache index structure definitions
+ * CIFS filesystem cache index structure definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman ([email protected]>
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 51a824fc926a..de2c12bcfa4b 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs_debug.c
*
* Copyright (C) International Business Machines Corp., 2000,2005
*
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 4fd788586399..f97407520ea1 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_fs_sb.h
*
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index ef723be358af..b87cbbe6d2d4 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_ioctl.h
*
* Structure definitions for io control for cifs/smb3
*
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 8fa26a8530f8..353bd0dd7026 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS
+ * SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton ([email protected])
diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h
index 31387d0ea32e..e6a0451877d4 100644
--- a/fs/cifs/cifs_spnego.h
+++ b/fs/cifs/cifs_spnego.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_spnego.h -- SPNEGO upcall management for CIFS
+ * SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton ([email protected])
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 171ad8b42107..e7582dd79179 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/cifs_unicode.c
*
* Copyright (c) International Business Machines Corp., 2000,2009
* Modified by Steve French ([email protected])
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 388eb536cff1..ee3aab3dd4ac 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsacl.c
*
* Copyright (C) International Business Machines Corp., 2007,2008
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index f8292bcf8594..ccbfc754bd3c 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsacl.h
*
* Copyright (c) International Business Machines Corp., 2007
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 2e6f40344037..d118282071b3 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsencrypt.c
*
* Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP
* for more detailed information
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8c20bfa187ac..9fa930dfd78d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsfs.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d25a4099b32e..b50da1901ebd 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsfs.h
*
* Copyright (c) International Business Machines Corp., 2002, 2007
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index c068f7d8d879..e916470468ea 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsglob.h
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French ([email protected])
@@ -1400,6 +1399,7 @@ struct cifsInodeInfo {
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
#define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */
+#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */
unsigned long flags;
spinlock_t writers_lock;
unsigned int writers; /* Number of writers on this inode */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 98e8e5aa0613..d2ff438fd31f 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifspdu.h
*
* Copyright (c) International Business Machines Corp., 2002,2009
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f9740c21ca3d..d0f85b666662 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsproto.h
*
* Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French ([email protected])
@@ -268,6 +267,9 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
+extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+ const char *path);
+
extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a8e41c1e80ca..243d17696f06 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifssmb.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0db344807ef1..c3b94c1e4591 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/connect.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French ([email protected])
@@ -1090,7 +1089,7 @@ next_pdu:
module_put_and_exit(0);
}
-/**
+/*
* Returns true if srcaddr isn't specified and rhs isn't specified, or
* if srcaddr is specified and matches the IP address of the rhs argument
*/
@@ -1550,6 +1549,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
/**
* cifs_setup_ipc - helper to setup the IPC tcon for the session
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
+ * new tree connection for the IPC (interprocess communication RPC)
*
* A new IPC connection is made and stored in the session
* tcon_ipc. The IPC tcon has the same lifetime as the session.
@@ -1605,6 +1607,7 @@ out:
/**
* cifs_free_ipc - helper to release the session IPC tcon
+ * @ses: smb session to unmount the IPC from
*
* Needs to be called everytime a session is destroyed.
*
@@ -1855,6 +1858,8 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
/**
* cifs_get_smb_ses - get a session matching @ctx data from @server
+ * @server: server to setup the session to
+ * @ctx: superblock configuration context to use to setup the session
*
* This function assumes it is being called from cifs_mount() where we
* already got a server reference (server refcount +1). See
@@ -2065,6 +2070,8 @@ cifs_put_tcon(struct cifs_tcon *tcon)
/**
* cifs_get_tcon - get a tcon matching @ctx data from @ses
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
*
* - tcon refcount is the number of mount points using the tcon.
* - ses refcount is the number of tcon using the session.
@@ -2382,9 +2389,10 @@ cifs_match_super(struct super_block *sb, void *data)
spin_lock(&cifs_tcp_ses_lock);
cifs_sb = CIFS_SB(sb);
tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
- if (IS_ERR(tlink)) {
+ if (tlink == NULL) {
+ /* can not match superblock if tlink were ever null */
spin_unlock(&cifs_tcp_ses_lock);
- return rc;
+ return 0;
}
tcon = tlink_tcon(tlink);
ses = tcon->ses;
@@ -3030,7 +3038,7 @@ build_unc_path_to_root(const struct smb3_fs_context *ctx,
return full_path;
}
-/**
+/*
* expand_dfs_referral - Perform a dfs referral query and update the cifs_sb
*
* If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 5f8a302ffcb2..6e8e7cc26ae2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/dir.c
*
* vfs operations that deal with dentries
*
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 8c616aaeb7c4..0458d28d71aa 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/dns_resolve.c
*
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov ([email protected])
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
index 9fa2807ef79e..afc0df381246 100644
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
- * Handles host name to IP address resolution
+ * DNS Resolver upcall management for CIFS DFS
+ * Handles host name to IP address resolution
*
* Copyright (c) International Business Machines Corp., 2008
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 747a540db954..37c28415df1e 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/export.c
*
* Copyright (C) International Business Machines Corp., 2007
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d0216472f1c6..13f3182cf796 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/file.c
*
* vfs operations that deal with files
*
@@ -883,8 +882,9 @@ int cifs_close(struct inode *inode, struct file *file)
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
cinode->lease_granted &&
+ !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
dclose) {
- if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
+ if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
inode->i_ctime = inode->i_mtime = current_time(inode);
cifs_fscache_update_inode_cookie(inode);
}
@@ -1865,6 +1865,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
cifs_sb = CIFS_FILE_SB(file);
+ set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
@@ -3112,7 +3113,7 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
struct dentry *dentry = ctx->cfile->dentry;
- int rc;
+ ssize_t rc;
tcon = tlink_tcon(ctx->cfile->tlink);
cifs_sb = CIFS_SB(dentry->d_sb);
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index fab47fa7df74..8eedd20c44ab 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/fscache.c - CIFS filesystem cache interface
+ * CIFS filesystem cache interface
*
* Copyright (c) 2010 Novell, Inc.
* Author(s): Suresh Jayaraman <[email protected]>
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 82e856b9cf89..9baa1d0f22bd 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/fscache.h - CIFS filesystem cache interface definitions
+ * CIFS filesystem cache interface definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman ([email protected]>
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 50c01cff4c84..82848412ad85 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/inode.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French ([email protected])
@@ -1625,7 +1624,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
goto unlink_out;
}
- cifs_close_deferred_file(CIFS_I(inode));
+ cifs_close_deferred_file_under_dentry(tcon, full_path);
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2114,9 +2113,9 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
goto cifs_rename_exit;
}
- cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+ cifs_close_deferred_file_under_dentry(tcon, from_name);
if (d_inode(target_dentry) != NULL)
- cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+ cifs_close_deferred_file_under_dentry(tcon, to_name);
rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
to_name);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 42c6a0bac6c8..0359b604bdbc 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/ioctl.c
*
* vfs operations that deal with io control
*
@@ -359,7 +358,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
if (pSMBFile == NULL)
break;
tcon = tlink_tcon(pSMBFile->tlink);
- caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+ /* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */
if (get_user(ExtAttrBits, (int __user *)arg)) {
rc = -EFAULT;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index f0a6d63bc08c..852e54ee82c2 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/link.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 9469f1cf0b46..bb1185fff8cc 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/misc.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French ([email protected])
@@ -265,7 +264,8 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
/* Uid is not converted */
buffer->Uid = treeCon->ses->Suid;
- buffer->Mid = get_next_mid(treeCon->ses->server);
+ if (treeCon->ses->server)
+ buffer->Mid = get_next_mid(treeCon->ses->server);
}
if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
buffer->Flags2 |= SMBFLG2_DFS;
@@ -591,6 +591,7 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
/**
* cifs_queue_oplock_break - queue the oplock break handler for cfile
+ * @cfile: The file to break the oplock on
*
* This function is called from the demultiplex thread when it
* receives an oplock break for @cfile.
@@ -736,7 +737,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
- continue;
+ break;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
@@ -767,7 +768,7 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
- continue;
+ break;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
@@ -781,6 +782,43 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
kfree(tmp_list);
}
}
+void
+cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+{
+ struct cifsFileInfo *cfile;
+ struct list_head *tmp;
+ struct file_list *tmp_list, *tmp_next_list;
+ struct list_head file_head;
+ void *page;
+ const char *full_path;
+
+ INIT_LIST_HEAD(&file_head);
+ page = alloc_dentry_path();
+ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp, &tcon->openFileList) {
+ cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+ full_path = build_path_from_dentry(cfile->dentry, page);
+ if (strstr(full_path, path)) {
+ if (delayed_work_pending(&cfile->deferred)) {
+ if (cancel_delayed_work(&cfile->deferred)) {
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
+ tmp_list->cfile = cfile;
+ list_add_tail(&tmp_list->list, &file_head);
+ }
+ }
+ }
+ }
+ spin_unlock(&tcon->open_file_lock);
+
+ list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+ _cifsFileInfo_put(tmp_list->cfile, true, false);
+ list_del(&tmp_list->list);
+ kfree(tmp_list);
+ }
+ free_dentry_path(page);
+}
/* parses DFS refferal V3 structure
* caller is responsible for freeing target_nodes
@@ -1029,6 +1067,9 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
/**
* cifs_alloc_hash - allocate hash and hash context together
+ * @name: The name of the crypto hash algo
+ * @shash: Where to put the pointer to the hash algo
+ * @sdesc: Where to put the pointer to the hash descriptor
*
* The caller has to make sure @sdesc is initialized to either NULL or
* a valid context. Both can be freed via cifs_free_hash().
@@ -1067,6 +1108,8 @@ cifs_alloc_hash(const char *name,
/**
* cifs_free_hash - free hash and hash context together
+ * @shash: Where to find the pointer to the hash algo
+ * @sdesc: Where to find the pointer to the hash descriptor
*
* Freeing a NULL hash or context is safe.
*/
@@ -1082,8 +1125,10 @@ cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
/**
* rqst_page_get_length - obtain the length and offset for a page in smb_rqst
- * Input: rqst - a smb_rqst, page - a page index for rqst
- * Output: *len - the length for this page, *offset - the offset for this page
+ * @rqst: The request descriptor
+ * @page: The index of the page to query
+ * @len: Where to store the length for this page:
+ * @offset: Where to store the offset for this page
*/
void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
unsigned int *len, unsigned int *offset)
@@ -1116,6 +1161,8 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
/**
* copy_path_name - copy src path to dst, possibly truncating
+ * @dst: The destination buffer
+ * @src: The source name
*
* returns number of bytes written (including trailing nul)
*/
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 0e728aac67e9..fa9fbd6a819c 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/netmisc.c
*
* Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 378133ce8869..25a2b8ef88b9 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/ntlmssp.h
*
* Copyright (c) International Business Machines Corp., 2002,2007
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 54d77c99e21c..1929e80c09ee 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/readdir.c
*
* Directory search handling
*
diff --git a/fs/cifs/rfc1002pdu.h b/fs/cifs/rfc1002pdu.h
index 137f7c95afd6..ae1d025da294 100644
--- a/fs/cifs/rfc1002pdu.h
+++ b/fs/cifs/rfc1002pdu.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/rfc1002pdu.h
*
* Protocol Data Unit definitions for RFC 1001/1002 support
*
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 118403fbeda2..23e02db7923f 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/sess.c
*
* SMB/CIFS session setup handling routines
*
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index c9d8a50062b8..f5dcc4940b6d 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2file.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Author(s): Steve French ([email protected]),
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index d0e9f3782bd9..ca692b2283cd 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2glob.h
*
* Definitions for various global variables and structures
*
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 957b2594f02e..8297703492ee 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2inode.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 668f77108831..29b5554f6263 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2misc.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Etersoft, 2012
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index b6d2e3591927..7829c590eeac 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2pdu.c
*
* Copyright (C) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
@@ -2398,7 +2397,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
/* Ship the ACL for now. we will copy it into buf later. */
aclptr = ptr;
- ptr += sizeof(struct cifs_acl);
+ ptr += sizeof(struct smb3_acl);
/* create one ACE to hold the mode embedded in reserved special SID */
acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
@@ -2423,7 +2422,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
acl.AclSize = cpu_to_le16(acl_size);
acl.AceCount = cpu_to_le16(ace_count);
- memcpy(aclptr, &acl, sizeof(struct cifs_acl));
+ memcpy(aclptr, &acl, sizeof(struct smb3_acl));
buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
*len = roundup(ptr - (__u8 *)buf, 8);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index e9cac7970b66..f32c99c9ba13 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2pdu.h
*
* Copyright (c) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 263767f644f8..547945443fa7 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2proto.h
*
* Copyright (c) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
diff --git a/fs/cifs/smb2status.h b/fs/cifs/smb2status.h
index 0215ef36e240..a9e958166fc5 100644
--- a/fs/cifs/smb2status.h
+++ b/fs/cifs/smb2status.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2status.h
*
* SMB2 Status code (network error) definitions
* Definitions are from MS-ERREF
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 6f7952ea4941..f59b956f9d25 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2transport.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
index 60189efb3236..aeffdad829e2 100644
--- a/fs/cifs/smberr.h
+++ b/fs/cifs/smberr.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smberr.h
*
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 75a95de320cf..b7379329b741 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/transport.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French ([email protected])
diff --git a/fs/cifs/winucase.c b/fs/cifs/winucase.c
index 59b6c577aa0a..2f075b5b50df 100644
--- a/fs/cifs/winucase.c
+++ b/fs/cifs/winucase.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/winucase.c
*
* Copyright (c) Jeffrey Layton <[email protected]>, 2013
*
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 9ed481e79ce0..7d8b72d67c80 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/xattr.c
*
* Copyright (c) International Business Machines Corp., 2003, 2007
* Author(s): Steve French ([email protected])
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 8129a430d789..2f117c57160d 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -528,7 +528,7 @@ void debugfs_create_file_size(const char *name, umode_t mode,
{
struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
- if (de)
+ if (!IS_ERR(de))
d_inode(de)->i_size = file_size;
}
EXPORT_SYMBOL_GPL(debugfs_create_file_size);
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 31ac3a73b390..a552399e211d 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -176,7 +176,7 @@ static struct page *erofs_read_inode(struct inode *inode,
}
if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
- if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_ALL)) {
+ if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
erofs_err(inode->i_sb,
"unsupported chunk format %x of nid %llu",
vi->chunkformat, vi->nid);
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 9fb98d85a3ce..7a6df35fdc91 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -369,7 +369,8 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
if (compacted_4b_initial == 32 / 4)
compacted_4b_initial = 0;
- if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
+ if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
+ compacted_4b_initial < totalidx)
compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
else
compacted_2b = 0;
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 1f3f4326bf3c..c17ccc19b938 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -48,10 +48,9 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
struct ext2_sb_info *sbi = EXT2_SB(sb);
if (block_group >= sbi->s_groups_count) {
- ext2_error (sb, "ext2_get_group_desc",
- "block_group >= groups_count - "
- "block_group = %d, groups_count = %lu",
- block_group, sbi->s_groups_count);
+ WARN(1, "block_group >= groups_count - "
+ "block_group = %d, groups_count = %lu",
+ block_group, sbi->s_groups_count);
return NULL;
}
@@ -59,10 +58,9 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(sb);
offset = block_group & (EXT2_DESC_PER_BLOCK(sb) - 1);
if (!sbi->s_group_desc[group_desc]) {
- ext2_error (sb, "ext2_get_group_desc",
- "Group descriptor not loaded - "
- "block_group = %d, group_desc = %lu, desc = %lu",
- block_group, group_desc, offset);
+ WARN(1, "Group descriptor not loaded - "
+ "block_group = %d, group_desc = %lu, desc = %lu",
+ block_group, group_desc, offset);
return NULL;
}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ffb295aa891c..74b172a4adda 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -551,7 +551,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
struct dir_private_info *info = file->private_data;
struct inode *inode = file_inode(file);
struct fname *fname;
- int ret;
+ int ret = 0;
if (!info) {
info = ext4_htree_create_dir_info(file, ctx->pos);
@@ -599,7 +599,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
info->curr_minor_hash,
&info->next_hash);
if (ret < 0)
- return ret;
+ goto finished;
if (ret == 0) {
ctx->pos = ext4_get_htree_eof(file);
break;
@@ -630,7 +630,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
}
finished:
info->last_pos = ctx->pos;
- return 0;
+ return ret < 0 ? ret : 0;
}
static int ext4_release_dir(struct inode *inode, struct file *filp)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 90ff5acaf11f..3825195539d7 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -3593,9 +3593,6 @@ extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
unsigned flags,
struct page **pagep,
void **fsdata);
-extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
- unsigned len, unsigned copied,
- struct page *page);
extern int ext4_try_add_inline_entry(handle_t *handle,
struct ext4_filename *fname,
struct inode *dir, struct inode *inode);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c0de30f25185..0e02571f2f82 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5916,7 +5916,7 @@ void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
}
/* Check if *cur is a hole and if it is, skip it */
-static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
+static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
{
int ret;
struct ext4_map_blocks map;
@@ -5925,9 +5925,12 @@ static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0)
+ return ret;
if (ret != 0)
- return;
+ return 0;
*cur = *cur + map.m_len;
+ return 0;
}
/* Count number of blocks used by this inode and update i_blocks */
@@ -5976,7 +5979,9 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
* iblocks by total number of differences found.
*/
cur = 0;
- skip_hole(inode, &cur);
+ ret = skip_hole(inode, &cur);
+ if (ret < 0)
+ goto out;
path = ext4_find_extent(inode, cur, NULL, 0);
if (IS_ERR(path))
goto out;
@@ -5995,8 +6000,12 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
}
cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
ext4_ext_get_actual_len(ex));
- skip_hole(inode, &cur);
-
+ ret = skip_hole(inode, &cur);
+ if (ret < 0) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ break;
+ }
path2 = ext4_find_extent(inode, cur, NULL, 0);
if (IS_ERR(path2)) {
ext4_ext_drop_refs(path);
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 8e610a381862..8ea5a81e6554 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -892,6 +892,12 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
sizeof(lrange), (u8 *)&lrange, crc))
return -ENOSPC;
} else {
+ unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?
+ EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;
+
+ /* Limit the number of blocks in one extent */
+ map.m_len = min(max, map.m_len);
+
fc_ext.fc_ino = cpu_to_le32(inode->i_ino);
ex = (struct ext4_extent *)&fc_ext.fc_ex;
ex->ee_block = cpu_to_le32(map.m_lblk);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 82bf4ff6be28..39a1ab129fdc 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -7,6 +7,7 @@
#include <linux/iomap.h>
#include <linux/fiemap.h>
#include <linux/iversion.h>
+#include <linux/backing-dev.h>
#include "ext4_jbd2.h"
#include "ext4.h"
@@ -733,45 +734,83 @@ convert:
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
unsigned copied, struct page *page)
{
- int ret, no_expand;
+ handle_t *handle = ext4_journal_current_handle();
+ int no_expand;
void *kaddr;
struct ext4_iloc iloc;
+ int ret = 0, ret2;
+
+ if (unlikely(copied < len) && !PageUptodate(page))
+ copied = 0;
- if (unlikely(copied < len)) {
- if (!PageUptodate(page)) {
- copied = 0;
+ if (likely(copied)) {
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret) {
+ unlock_page(page);
+ put_page(page);
+ ext4_std_error(inode->i_sb, ret);
goto out;
}
- }
+ ext4_write_lock_xattr(inode, &no_expand);
+ BUG_ON(!ext4_has_inline_data(inode));
- ret = ext4_get_inode_loc(inode, &iloc);
- if (ret) {
- ext4_std_error(inode->i_sb, ret);
- copied = 0;
- goto out;
- }
+ /*
+ * ei->i_inline_off may have changed since
+ * ext4_write_begin() called
+ * ext4_try_to_write_inline_data()
+ */
+ (void) ext4_find_inline_data_nolock(inode);
- ext4_write_lock_xattr(inode, &no_expand);
- BUG_ON(!ext4_has_inline_data(inode));
+ kaddr = kmap_atomic(page);
+ ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);
+ kunmap_atomic(kaddr);
+ SetPageUptodate(page);
+ /* clear page dirty so that writepages wouldn't work for us. */
+ ClearPageDirty(page);
- /*
- * ei->i_inline_off may have changed since ext4_write_begin()
- * called ext4_try_to_write_inline_data()
- */
- (void) ext4_find_inline_data_nolock(inode);
+ ext4_write_unlock_xattr(inode, &no_expand);
+ brelse(iloc.bh);
- kaddr = kmap_atomic(page);
- ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
- kunmap_atomic(kaddr);
- SetPageUptodate(page);
- /* clear page dirty so that writepages wouldn't work for us. */
- ClearPageDirty(page);
+ /*
+ * It's important to update i_size while still holding page
+ * lock: page writeout could otherwise come in and zero
+ * beyond i_size.
+ */
+ ext4_update_inode_size(inode, pos + copied);
+ }
+ unlock_page(page);
+ put_page(page);
- ext4_write_unlock_xattr(inode, &no_expand);
- brelse(iloc.bh);
- mark_inode_dirty(inode);
+ /*
+ * Don't mark the inode dirty under page lock. First, it unnecessarily
+ * makes the holding time of page lock longer. Second, it forces lock
+ * ordering of page lock and transaction start for journaling
+ * filesystems.
+ */
+ if (likely(copied))
+ mark_inode_dirty(inode);
out:
- return copied;
+ /*
+ * If we didn't copy as much data as expected, we need to trim back
+ * size of xattr containing inline data.
+ */
+ if (pos + len > inode->i_size && ext4_can_truncate(inode))
+ ext4_orphan_add(handle, inode);
+
+ ret2 = ext4_journal_stop(handle);
+ if (!ret)
+ ret = ret2;
+ if (pos + len > inode->i_size) {
+ ext4_truncate_failed_write(inode);
+ /*
+ * If truncate failed early the inode might still be
+ * on the orphan list; we need to make sure the inode
+ * is removed from the orphan list in that case.
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+ }
+ return ret ? ret : copied;
}
struct buffer_head *
@@ -953,43 +992,6 @@ out:
return ret;
}
-int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
- unsigned len, unsigned copied,
- struct page *page)
-{
- int ret;
-
- ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
- if (ret < 0) {
- unlock_page(page);
- put_page(page);
- return ret;
- }
- copied = ret;
-
- /*
- * No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_mutex.
- *
- * But it's important to update i_size while still holding page lock:
- * page writeout could otherwise come in and zero beyond i_size.
- */
- if (pos+copied > inode->i_size)
- i_size_write(inode, pos+copied);
- unlock_page(page);
- put_page(page);
-
- /*
- * Don't mark the inode dirty under page lock. First, it unnecessarily
- * makes the holding time of page lock longer. Second, it forces lock
- * ordering of page lock and transaction start for journaling
- * filesystems.
- */
- mark_inode_dirty(inode);
-
- return copied;
-}
-
#ifdef INLINE_DIR_DEBUG
void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
void *inline_start, int inline_size)
@@ -1917,6 +1919,24 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
EXT4_I(inode)->i_disksize = i_size;
if (i_size < inline_size) {
+ /*
+ * if there's inline data to truncate and this file was
+ * converted to extents after that inline data was written,
+ * the extent status cache must be cleared to avoid leaving
+ * behind stale delayed allocated extent entries
+ */
+ if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+retry:
+ err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+ if (err == -ENOMEM) {
+ cond_resched();
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
+ if (err)
+ goto out_error;
+ }
+
/* Clear the content in the xattr space. */
if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {
if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d18852d6029c..0f06305167d5 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1284,22 +1284,14 @@ static int ext4_write_end(struct file *file,
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int i_size_changed = 0;
- int inline_data = ext4_has_inline_data(inode);
bool verity = ext4_verity_in_progress(inode);
trace_ext4_write_end(inode, pos, len, copied);
- if (inline_data) {
- ret = ext4_write_inline_data_end(inode, pos, len,
- copied, page);
- if (ret < 0) {
- unlock_page(page);
- put_page(page);
- goto errout;
- }
- copied = ret;
- } else
- copied = block_write_end(file, mapping, pos,
- len, copied, page, fsdata);
+
+ if (ext4_has_inline_data(inode))
+ return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
/*
* it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
@@ -1320,7 +1312,7 @@ static int ext4_write_end(struct file *file,
* ordering of page lock and transaction start for journaling
* filesystems.
*/
- if (i_size_changed || inline_data)
+ if (i_size_changed)
ret = ext4_mark_inode_dirty(handle, inode);
if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
@@ -1329,7 +1321,7 @@ static int ext4_write_end(struct file *file,
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
-errout:
+
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
@@ -1395,7 +1387,6 @@ static int ext4_journalled_write_end(struct file *file,
int partial = 0;
unsigned from, to;
int size_changed = 0;
- int inline_data = ext4_has_inline_data(inode);
bool verity = ext4_verity_in_progress(inode);
trace_ext4_journalled_write_end(inode, pos, len, copied);
@@ -1404,16 +1395,10 @@ static int ext4_journalled_write_end(struct file *file,
BUG_ON(!ext4_handle_valid(handle));
- if (inline_data) {
- ret = ext4_write_inline_data_end(inode, pos, len,
- copied, page);
- if (ret < 0) {
- unlock_page(page);
- put_page(page);
- goto errout;
- }
- copied = ret;
- } else if (unlikely(copied < len) && !PageUptodate(page)) {
+ if (ext4_has_inline_data(inode))
+ return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+ if (unlikely(copied < len) && !PageUptodate(page)) {
copied = 0;
ext4_journalled_zero_new_buffers(handle, inode, page, from, to);
} else {
@@ -1436,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file,
if (old_size < pos && !verity)
pagecache_isize_extended(inode, old_size, pos);
- if (size_changed || inline_data) {
+ if (size_changed) {
ret2 = ext4_mark_inode_dirty(handle, inode);
if (!ret)
ret = ret2;
@@ -1449,7 +1434,6 @@ static int ext4_journalled_write_end(struct file *file,
*/
ext4_orphan_add(handle, inode);
-errout:
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
@@ -1644,6 +1628,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int ret;
bool allocated = false;
+ bool reserved = false;
/*
* If the cluster containing lblk is shared with a delayed,
@@ -1660,6 +1645,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
ret = ext4_da_reserve_space(inode);
if (ret != 0) /* ENOSPC */
goto errout;
+ reserved = true;
} else { /* bigalloc */
if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
if (!ext4_es_scan_clu(inode,
@@ -1672,6 +1658,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
ret = ext4_da_reserve_space(inode);
if (ret != 0) /* ENOSPC */
goto errout;
+ reserved = true;
} else {
allocated = true;
}
@@ -1682,6 +1669,8 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
}
ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
+ if (ret && reserved)
+ ext4_da_release_space(inode, 1);
errout:
return ret;
@@ -1722,13 +1711,16 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
}
/*
- * Delayed extent could be allocated by fallocate.
- * So we need to check it.
+ * the buffer head associated with a delayed and not unwritten
+ * block found in the extent status cache must contain an
+ * invalid block number and have its BH_New and BH_Delay bits
+ * set, reflecting the state assigned when the block was
+ * initially delayed allocated
*/
- if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
- map_bh(bh, inode->i_sb, invalid_block);
- set_buffer_new(bh);
- set_buffer_delay(bh);
+ if (ext4_es_is_delonly(&es)) {
+ BUG_ON(bh->b_blocknr != invalid_block);
+ BUG_ON(!buffer_new(bh));
+ BUG_ON(!buffer_delay(bh));
return 0;
}
@@ -2932,19 +2924,6 @@ static int ext4_nonda_switch(struct super_block *sb)
return 0;
}
-/* We always reserve for an inode update; the superblock could be there too */
-static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
-{
- if (likely(ext4_has_feature_large_file(inode->i_sb)))
- return 1;
-
- if (pos + len <= 0x7fffffffULL)
- return 1;
-
- /* We might need to update the superblock to set LARGE_FILE */
- return 2;
-}
-
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -2953,7 +2932,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
struct page *page;
pgoff_t index;
struct inode *inode = mapping->host;
- handle_t *handle;
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
@@ -2979,41 +2957,11 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
return 0;
}
- /*
- * grab_cache_page_write_begin() can take a long time if the
- * system is thrashing due to memory pressure, or if the page
- * is being written back. So grab it first before we start
- * the transaction handle. This also allows us to allocate
- * the page (if needed) without using GFP_NOFS.
- */
-retry_grab:
+retry:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
- unlock_page(page);
- /*
- * With delayed allocation, we don't log the i_disksize update
- * if there is delayed block allocation. But we still need
- * to journalling the i_disksize update if writes to the end
- * of file which has an already mapped buffer.
- */
-retry_journal:
- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
- ext4_da_write_credits(inode, pos, len));
- if (IS_ERR(handle)) {
- put_page(page);
- return PTR_ERR(handle);
- }
-
- lock_page(page);
- if (page->mapping != mapping) {
- /* The page got truncated from under us */
- unlock_page(page);
- put_page(page);
- ext4_journal_stop(handle);
- goto retry_grab;
- }
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
@@ -3025,20 +2973,18 @@ retry_journal:
#endif
if (ret < 0) {
unlock_page(page);
- ext4_journal_stop(handle);
+ put_page(page);
/*
* block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
+ * i_size_read because we hold inode lock.
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry_journal;
-
- put_page(page);
+ goto retry;
return ret;
}
@@ -3075,8 +3021,6 @@ static int ext4_da_write_end(struct file *file,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
- int ret = 0, ret2;
- handle_t *handle = ext4_journal_current_handle();
loff_t new_i_size;
unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata;
@@ -3086,44 +3030,36 @@ static int ext4_da_write_end(struct file *file,
len, copied, page, fsdata);
trace_ext4_da_write_end(inode, pos, len, copied);
+
+ if (write_mode != CONVERT_INLINE_DATA &&
+ ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
+ ext4_has_inline_data(inode))
+ return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
start = pos & (PAGE_SIZE - 1);
end = start + copied - 1;
/*
- * generic_write_end() will run mark_inode_dirty() if i_size
- * changes. So let's piggyback the i_disksize mark_inode_dirty
- * into that.
+ * Since we are holding inode lock, we are sure i_disksize <=
+ * i_size. We also know that if i_disksize < i_size, there are
+ * delalloc writes pending in the range upto i_size. If the end of
+ * the current write is <= i_size, there's no need to touch
+ * i_disksize since writeback will push i_disksize upto i_size
+ * eventually. If the end of the current write is > i_size and
+ * inside an allocated block (ext4_da_should_update_i_disksize()
+ * check), we need to update i_disksize here as neither
+ * ext4_writepage() nor certain ext4_writepages() paths not
+ * allocating blocks update i_disksize.
+ *
+ * Note that we defer inode dirtying to generic_write_end() /
+ * ext4_da_write_inline_data_end().
*/
new_i_size = pos + copied;
- if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
- if (ext4_has_inline_data(inode) ||
- ext4_da_should_update_i_disksize(page, end)) {
- ext4_update_i_disksize(inode, new_i_size);
- /* We need to mark inode dirty even if
- * new_i_size is less that inode->i_size
- * bu greater than i_disksize.(hint delalloc)
- */
- ret = ext4_mark_inode_dirty(handle, inode);
- }
- }
+ if (copied && new_i_size > inode->i_size &&
+ ext4_da_should_update_i_disksize(page, end))
+ ext4_update_i_disksize(inode, new_i_size);
- if (write_mode != CONVERT_INLINE_DATA &&
- ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
- ext4_has_inline_data(inode))
- ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
- page);
- else
- ret2 = generic_write_end(file, mapping, pos, len, copied,
- page, fsdata);
-
- copied = ret2;
- if (ret2 < 0)
- ret = ret2;
- ret2 = ext4_journal_stop(handle);
- if (unlikely(ret2 && !ret))
- ret = ret2;
-
- return ret ? ret : copied;
+ return generic_write_end(file, mapping, pos, len, copied, page, fsdata);
}
/*
@@ -4340,6 +4276,12 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
goto has_buffer;
lock_buffer(bh);
+ if (ext4_buffer_uptodate(bh)) {
+ /* Someone brought it uptodate while we waited */
+ unlock_buffer(bh);
+ goto has_buffer;
+ }
+
/*
* If we have all information of the inode in memory and this
* is the only valid inode in the block, we need not read the
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0775950ee84e..88d5d274a868 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -658,7 +658,7 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
* constraints, it may not be safe to do it right here so we
* defer superblock flushing to a workqueue.
*/
- if (continue_fs)
+ if (continue_fs && journal)
schedule_work(&EXT4_SB(sb)->s_error_work);
else
ext4_commit_super(sb);
@@ -1350,6 +1350,12 @@ static void ext4_destroy_inode(struct inode *inode)
true);
dump_stack();
}
+
+ if (EXT4_I(inode)->i_reserved_data_blocks)
+ ext4_msg(inode->i_sb, KERN_ERR,
+ "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
+ inode->i_ino, EXT4_I(inode),
+ EXT4_I(inode)->i_reserved_data_blocks);
}
static void init_once(void *foo)
@@ -3021,17 +3027,17 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
*/
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
{
- loff_t res = EXT4_NDIR_BLOCKS;
+ unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;
int meta_blocks;
- loff_t upper_limit;
- /* This is calculated to be the largest file size for a dense, block
+
+ /*
+ * This is calculated to be the largest file size for a dense, block
* mapped file such that the file's total number of 512-byte sectors,
* including data and all indirect blocks, does not exceed (2^48 - 1).
*
* __u32 i_blocks_lo and _u16 i_blocks_high represent the total
* number of 512-byte sectors of the file.
*/
-
if (!has_huge_files) {
/*
* !has_huge_files or implies that the inode i_block field
@@ -3074,7 +3080,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
if (res > MAX_LFS_FILESIZE)
res = MAX_LFS_FILESIZE;
- return res;
+ return (loff_t)res;
}
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
@@ -5042,12 +5048,15 @@ failed_mount_wq:
sbi->s_ea_block_cache = NULL;
if (sbi->s_journal) {
+ /* flush s_error_work before journal destroy. */
+ flush_work(&sbi->s_error_work);
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
failed_mount3a:
ext4_es_unregister_shrinker(sbi);
failed_mount3:
+ /* flush s_error_work before sbi destroy */
flush_work(&sbi->s_error_work);
del_timer_sync(&sbi->s_err_report);
ext4_stop_mmpd(sbi);
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index f346a78f4bd6..6a675652129b 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -77,7 +77,6 @@ static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
-static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);
static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
@@ -907,6 +906,7 @@ static void fscache_dequeue_object(struct fscache_object *object)
* @object: The object to ask about
* @data: The auxiliary data for the object
* @datalen: The size of the auxiliary data
+ * @object_size: The size of the object according to the server.
*
* This function consults the netfs about the coherency state of an object.
* The caller must be holding a ref on cookie->n_active (held by
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index 433877107700..e002cdfaf3cc 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -22,7 +22,10 @@ static void fscache_operation_dummy_cancel(struct fscache_operation *op)
/**
* fscache_operation_init - Do basic initialisation of an operation
+ * @cookie: The cookie to operate on
* @op: The operation to initialise
+ * @processor: The function to perform the operation
+ * @cancel: A function to handle operation cancellation
* @release: The release function to assign
*
* Do basic initialisation of an operation. The caller must still set flags,
diff --git a/fs/inode.c b/fs/inode.c
index 37710ca863b5..ed0cab8a32db 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -190,8 +190,10 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
mapping->private_data = NULL;
mapping->writeback_index = 0;
- __init_rwsem(&mapping->invalidate_lock, "mapping.invalidate_lock",
- &sb->s_type->invalidate_lock_key);
+ init_rwsem(&mapping->invalidate_lock);
+ lockdep_set_class_and_name(&mapping->invalidate_lock,
+ &sb->s_type->invalidate_lock_key,
+ "mapping.invalidate_lock");
inode->i_private = NULL;
inode->i_mapping = mapping;
INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 6c55362c1f99..5bf8aa81715e 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -14,6 +14,7 @@
#include <linux/rculist_nulls.h>
#include <linux/cpu.h>
#include <linux/tracehook.h>
+#include <uapi/linux/io_uring.h>
#include "io-wq.h"
@@ -176,7 +177,6 @@ static void io_worker_ref_put(struct io_wq *wq)
static void io_worker_exit(struct io_worker *worker)
{
struct io_wqe *wqe = worker->wqe;
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
if (refcount_dec_and_test(&worker->ref))
complete(&worker->ref_done);
@@ -186,7 +186,6 @@ static void io_worker_exit(struct io_worker *worker)
if (worker->flags & IO_WORKER_F_FREE)
hlist_nulls_del_rcu(&worker->nulls_node);
list_del_rcu(&worker->all_list);
- acct->nr_workers--;
preempt_disable();
io_wqe_dec_running(worker);
worker->flags = 0;
@@ -246,8 +245,6 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
*/
static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
{
- bool do_create = false;
-
/*
* Most likely an attempt to queue unbounded work on an io_wq that
* wasn't setup with any unbounded workers.
@@ -256,18 +253,15 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
pr_warn_once("io-wq is not configured for unbound workers");
raw_spin_lock(&wqe->lock);
- if (acct->nr_workers < acct->max_workers) {
- acct->nr_workers++;
- do_create = true;
+ if (acct->nr_workers == acct->max_workers) {
+ raw_spin_unlock(&wqe->lock);
+ return true;
}
+ acct->nr_workers++;
raw_spin_unlock(&wqe->lock);
- if (do_create) {
- atomic_inc(&acct->nr_running);
- atomic_inc(&wqe->wq->worker_refs);
- return create_io_worker(wqe->wq, wqe, acct->index);
- }
-
- return true;
+ atomic_inc(&acct->nr_running);
+ atomic_inc(&wqe->wq->worker_refs);
+ return create_io_worker(wqe->wq, wqe, acct->index);
}
static void io_wqe_inc_running(struct io_worker *worker)
@@ -574,6 +568,7 @@ loop:
}
/* timed out, exit unless we're the last worker */
if (last_timeout && acct->nr_workers > 1) {
+ acct->nr_workers--;
raw_spin_unlock(&wqe->lock);
__set_current_state(TASK_RUNNING);
break;
@@ -589,9 +584,7 @@ loop:
if (!get_signal(&ksig))
continue;
- if (fatal_signal_pending(current))
- break;
- continue;
+ break;
}
last_timeout = !ret;
}
@@ -1287,6 +1280,10 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
{
int i, node, prev = 0;
+ BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
+ BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
+ BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
+
for (i = 0; i < 2; i++) {
if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
new_count[i] = task_rlimit(current, RLIMIT_NPROC);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 16fb7436043c..e68d27829bb2 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -403,7 +403,6 @@ struct io_ring_ctx {
struct wait_queue_head cq_wait;
unsigned cq_extra;
atomic_t cq_timeouts;
- struct fasync_struct *cq_fasync;
unsigned cq_last_tm_flush;
} ____cacheline_aligned_in_smp;
@@ -502,6 +501,7 @@ struct io_poll_update {
struct io_close {
struct file *file;
int fd;
+ u32 file_slot;
};
struct io_timeout_data {
@@ -712,6 +712,7 @@ struct io_async_rw {
struct iovec fast_iov[UIO_FASTIOV];
const struct iovec *free_iovec;
struct iov_iter iter;
+ struct iov_iter_state iter_state;
size_t bytes_done;
struct wait_page_queue wpq;
};
@@ -735,7 +736,6 @@ enum {
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_COMPLETE_INLINE_BIT,
REQ_F_REISSUE_BIT,
- REQ_F_DONT_REISSUE_BIT,
REQ_F_CREDS_BIT,
REQ_F_REFCOUNT_BIT,
REQ_F_ARM_LTIMEOUT_BIT,
@@ -782,8 +782,6 @@ enum {
REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
/* caller should reissue async */
REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
- /* don't attempt request reissue, see io_rw_reissue() */
- REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
/* supports async reads */
REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
/* supports async writes */
@@ -1100,6 +1098,8 @@ static int io_req_prep_async(struct io_kiocb *req);
static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
unsigned int issue_flags, u32 slot_index);
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
+
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
static struct kmem_cache *req_cachep;
@@ -1613,10 +1613,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
wake_up(&ctx->sq_data->wait);
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
- if (waitqueue_active(&ctx->poll_wait)) {
+ if (waitqueue_active(&ctx->poll_wait))
wake_up_interruptible(&ctx->poll_wait);
- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
- }
}
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1630,10 +1628,8 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
}
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
- if (waitqueue_active(&ctx->poll_wait)) {
+ if (waitqueue_active(&ctx->poll_wait))
wake_up_interruptible(&ctx->poll_wait);
- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
- }
}
/* Returns true if there are no backlogged entries after the flush */
@@ -2444,13 +2440,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
- if (READ_ONCE(req->result) == -EAGAIN &&
- !(req->flags & REQ_F_DONT_REISSUE)) {
- req->iopoll_completed = 0;
- io_req_task_queue_reissue(req);
- continue;
- }
-
__io_cqring_fill_event(ctx, req->user_data, req->result,
io_put_rw_kbuf(req));
(*nr_events)++;
@@ -2613,8 +2602,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
if (!rw)
return !io_req_prep_async(req);
- /* may have left rw->iter inconsistent on -EIOCBQUEUED */
- iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
+ iov_iter_restore(&rw->iter, &rw->iter_state);
return true;
}
@@ -2714,10 +2702,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
if (unlikely(res != req->result)) {
- if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
- io_resubmit_prep(req))) {
- req_set_fail(req);
- req->flags |= REQ_F_DONT_REISSUE;
+ if (res == -EAGAIN && io_rw_should_reissue(req)) {
+ req->flags |= REQ_F_REISSUE;
+ return;
}
}
@@ -2843,7 +2830,8 @@ static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
return __io_file_supports_nowait(req->file, rw);
}
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ int rw)
{
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw.kiocb;
@@ -2865,8 +2853,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(ret))
return ret;
- /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
- if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
+ /*
+ * If the file is marked O_NONBLOCK, still allow retry for it if it
+ * supports async. Otherwise it's impossible to use O_NONBLOCK files
+ * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+ */
+ if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+ ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
req->flags |= REQ_F_NOWAIT;
ioprio = READ_ONCE(sqe->ioprio);
@@ -2931,7 +2924,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_async_rw *io = req->async_data;
- bool check_reissue = kiocb->ki_complete == io_complete_rw;
/* add previously done IO, if any */
if (io && io->bytes_done > 0) {
@@ -2943,19 +2935,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
- if (ret >= 0 && check_reissue)
+ if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
__io_complete_rw(req, ret, 0, issue_flags);
else
io_rw_done(kiocb, ret);
- if (check_reissue && (req->flags & REQ_F_REISSUE)) {
+ if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) {
io_req_task_queue_reissue(req);
} else {
+ unsigned int cflags = io_put_rw_kbuf(req);
+ struct io_ring_ctx *ctx = req->ctx;
+
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret,
- io_put_rw_kbuf(req));
+ if (!(issue_flags & IO_URING_F_NONBLOCK)) {
+ mutex_lock(&ctx->uring_lock);
+ __io_req_complete(req, issue_flags, ret, cflags);
+ mutex_unlock(&ctx->uring_lock);
+ } else {
+ __io_req_complete(req, issue_flags, ret, cflags);
+ }
}
}
}
@@ -3263,12 +3263,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
ret = nr;
break;
}
+ if (!iov_iter_is_bvec(iter)) {
+ iov_iter_advance(iter, nr);
+ } else {
+ req->rw.len -= nr;
+ req->rw.addr += nr;
+ }
ret += nr;
if (nr != iovec.iov_len)
break;
- req->rw.len -= nr;
- req->rw.addr += nr;
- iov_iter_advance(iter, nr);
}
return ret;
@@ -3315,12 +3318,17 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
if (!force && !io_op_defs[req->opcode].needs_async_setup)
return 0;
if (!req->async_data) {
+ struct io_async_rw *iorw;
+
if (io_alloc_async_data(req)) {
kfree(iovec);
return -ENOMEM;
}
io_req_map_rw(req, iovec, fast_iov, iter);
+ iorw = req->async_data;
+ /* we've copied and mapped the iter, ensure state is saved */
+ iov_iter_save_state(&iorw->iter, &iorw->iter_state);
}
return 0;
}
@@ -3339,6 +3347,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
iorw->free_iovec = iov;
if (iov)
req->flags |= REQ_F_NEED_CLEANUP;
+ iov_iter_save_state(&iorw->iter, &iorw->iter_state);
return 0;
}
@@ -3346,7 +3355,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
if (unlikely(!(req->file->f_mode & FMODE_READ)))
return -EBADF;
- return io_prep_rw(req, sqe);
+ return io_prep_rw(req, sqe, READ);
}
/*
@@ -3442,19 +3451,28 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
- ssize_t io_size, ret, ret2;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ struct iov_iter_state __state, *state;
+ ssize_t ret, ret2;
if (rw) {
iter = &rw->iter;
+ state = &rw->iter_state;
+ /*
+ * We come here from an earlier attempt, restore our state to
+ * match in case it doesn't. It's cheap enough that we don't
+ * need to make this conditional.
+ */
+ iov_iter_restore(iter, state);
iovec = NULL;
} else {
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ state = &__state;
+ iov_iter_save_state(iter, state);
}
- io_size = iov_iter_count(iter);
- req->result = io_size;
+ req->result = iov_iter_count(iter);
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
@@ -3468,7 +3486,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
return ret ?: -EAGAIN;
}
- ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
+ ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
if (unlikely(ret)) {
kfree(iovec);
return ret;
@@ -3484,30 +3502,49 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
/* no retry on NONBLOCK nor RWF_NOWAIT */
if (req->flags & REQ_F_NOWAIT)
goto done;
- /* some cases will consume bytes even on error returns */
- iov_iter_reexpand(iter, iter->count + iter->truncated);
- iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = 0;
} else if (ret == -EIOCBQUEUED) {
goto out_free;
- } else if (ret <= 0 || ret == io_size || !force_nonblock ||
+ } else if (ret <= 0 || ret == req->result || !force_nonblock ||
(req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
/* read all, failed, already did sync or don't want to retry */
goto done;
}
+ /*
+ * Don't depend on the iter state matching what was consumed, or being
+ * untouched in case of error. Restore it and we'll advance it
+ * manually if we need to.
+ */
+ iov_iter_restore(iter, state);
+
ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
if (ret2)
return ret2;
iovec = NULL;
rw = req->async_data;
- /* now use our persistent iterator, if we aren't already */
- iter = &rw->iter;
+ /*
+ * Now use our persistent iterator and state, if we aren't already.
+ * We've restored and mapped the iter to match.
+ */
+ if (iter != &rw->iter) {
+ iter = &rw->iter;
+ state = &rw->iter_state;
+ }
do {
- io_size -= ret;
+ /*
+ * We end up here because of a partial read, either from
+ * above or inside this loop. Advance the iter by the bytes
+ * that were consumed.
+ */
+ iov_iter_advance(iter, ret);
+ if (!iov_iter_count(iter))
+ break;
rw->bytes_done += ret;
+ iov_iter_save_state(iter, state);
+
/* if we can retry, do so with the callbacks armed */
if (!io_rw_should_retry(req)) {
kiocb->ki_flags &= ~IOCB_WAITQ;
@@ -3525,7 +3562,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
return 0;
/* we got some bytes, but not all. retry. */
kiocb->ki_flags &= ~IOCB_WAITQ;
- } while (ret > 0 && ret < io_size);
+ iov_iter_restore(iter, state);
+ } while (ret > 0);
done:
kiocb_done(kiocb, ret, issue_flags);
out_free:
@@ -3539,7 +3577,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
return -EBADF;
- return io_prep_rw(req, sqe);
+ return io_prep_rw(req, sqe, WRITE);
}
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
@@ -3548,19 +3586,23 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
- ssize_t ret, ret2, io_size;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ struct iov_iter_state __state, *state;
+ ssize_t ret, ret2;
if (rw) {
iter = &rw->iter;
+ state = &rw->iter_state;
+ iov_iter_restore(iter, state);
iovec = NULL;
} else {
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ state = &__state;
+ iov_iter_save_state(iter, state);
}
- io_size = iov_iter_count(iter);
- req->result = io_size;
+ req->result = iov_iter_count(iter);
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
@@ -3577,7 +3619,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
(req->flags & REQ_F_ISREG))
goto copy_iov;
- ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
+ ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
if (unlikely(ret))
goto out_free;
@@ -3624,9 +3666,7 @@ done:
kiocb_done(kiocb, ret2, issue_flags);
} else {
copy_iov:
- /* some cases will consume bytes even on error returns */
- iov_iter_reexpand(iter, iter->count + iter->truncated);
- iov_iter_revert(iter, io_size - iov_iter_count(iter));
+ iov_iter_restore(iter, state);
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
return ret ?: -EAGAIN;
}
@@ -4342,7 +4382,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
int i, bid = pbuf->bid;
for (i = 0; i < pbuf->nbufs; i++) {
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
if (!buf)
break;
@@ -4549,12 +4589,16 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
- sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+ sqe->rw_flags || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
return -EBADF;
req->close.fd = READ_ONCE(sqe->fd);
+ req->close.file_slot = READ_ONCE(sqe->file_index);
+ if (req->close.file_slot && req->close.fd)
+ return -EINVAL;
+
return 0;
}
@@ -4566,6 +4610,11 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
struct file *file = NULL;
int ret = -EBADF;
+ if (req->close.file_slot) {
+ ret = io_close_fixed(req, issue_flags);
+ goto err;
+ }
+
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
if (close->fd >= fdt->max_fds) {
@@ -5293,7 +5342,7 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
if (req->poll.events & EPOLLONESHOT)
flags = 0;
if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
- req->poll.done = true;
+ req->poll.events |= EPOLLONESHOT;
flags = 0;
}
if (flags & IORING_CQE_F_MORE)
@@ -5322,10 +5371,15 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
} else {
bool done;
+ if (req->poll.done) {
+ spin_unlock(&ctx->completion_lock);
+ return;
+ }
done = __io_poll_complete(req, req->result);
if (done) {
io_poll_remove_double(req);
hash_del(&req->hash_node);
+ req->poll.done = true;
} else {
req->result = 0;
add_wait_queue(req->poll.head, &req->poll.wait);
@@ -5463,6 +5517,7 @@ static void io_async_task_func(struct io_kiocb *req, bool *locked)
hash_del(&req->hash_node);
io_poll_remove_double(req);
+ apoll->poll.done = true;
spin_unlock(&ctx->completion_lock);
if (!READ_ONCE(apoll->poll.canceled))
@@ -5783,6 +5838,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *ctx = req->ctx;
struct io_poll_table ipt;
__poll_t mask;
+ bool done;
ipt.pt._qproc = io_poll_queue_proc;
@@ -5791,13 +5847,13 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
if (mask) { /* no async, we'd stolen it */
ipt.error = 0;
- io_poll_complete(req, mask);
+ done = io_poll_complete(req, mask);
}
spin_unlock(&ctx->completion_lock);
if (mask) {
io_cqring_ev_posted(ctx);
- if (poll->events & EPOLLONESHOT)
+ if (done)
io_put_req(req);
}
return ipt.error;
@@ -6288,19 +6344,16 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
struct io_uring_rsrc_update2 up;
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
up.offset = req->rsrc_update.offset;
up.data = req->rsrc_update.arg;
up.nr = 0;
up.tags = 0;
up.resv = 0;
- mutex_lock(&ctx->uring_lock);
+ io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
&up, req->rsrc_update.nr_args);
- mutex_unlock(&ctx->uring_lock);
+ io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
if (ret < 0)
req_set_fail(req);
@@ -7515,6 +7568,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
break;
} while (1);
+ if (uts) {
+ struct timespec64 ts;
+
+ if (get_timespec64(&ts, uts))
+ return -EFAULT;
+ timeout = timespec64_to_jiffies(&ts);
+ }
+
if (sig) {
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
@@ -7528,14 +7589,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- if (uts) {
- struct timespec64 ts;
-
- if (get_timespec64(&ts, uts))
- return -EFAULT;
- timeout = timespec64_to_jiffies(&ts);
- }
-
init_waitqueue_func_entry(&iowq.wq, io_wake_function);
iowq.wq.private = current;
INIT_LIST_HEAD(&iowq.wq.entry);
@@ -8284,11 +8337,27 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
#endif
}
+static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
+ struct io_rsrc_node *node, void *rsrc)
+{
+ struct io_rsrc_put *prsrc;
+
+ prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+ if (!prsrc)
+ return -ENOMEM;
+
+ prsrc->tag = *io_get_tag_slot(data, idx);
+ prsrc->rsrc = rsrc;
+ list_add(&prsrc->list, &node->rsrc_list);
+ return 0;
+}
+
static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
unsigned int issue_flags, u32 slot_index)
{
struct io_ring_ctx *ctx = req->ctx;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ bool needs_switch = false;
struct io_fixed_file *file_slot;
int ret = -EBADF;
@@ -8304,9 +8373,22 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
- ret = -EBADF;
- if (file_slot->file_ptr)
- goto err;
+
+ if (file_slot->file_ptr) {
+ struct file *old_file;
+
+ ret = io_rsrc_node_switch_start(ctx);
+ if (ret)
+ goto err;
+
+ old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+ ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
+ ctx->rsrc_node, old_file);
+ if (ret)
+ goto err;
+ file_slot->file_ptr = 0;
+ needs_switch = true;
+ }
*io_get_tag_slot(ctx->file_data, slot_index) = 0;
io_fixed_file_set(file_slot, file);
@@ -8318,25 +8400,50 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
ret = 0;
err:
+ if (needs_switch)
+ io_rsrc_node_switch(ctx, ctx->file_data);
io_ring_submit_unlock(ctx, !force_nonblock);
if (ret)
fput(file);
return ret;
}
-static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
- struct io_rsrc_node *node, void *rsrc)
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_rsrc_put *prsrc;
+ unsigned int offset = req->close.file_slot - 1;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_fixed_file *file_slot;
+ struct file *file;
+ int ret, i;
- prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
- if (!prsrc)
- return -ENOMEM;
+ io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+ ret = -ENXIO;
+ if (unlikely(!ctx->file_data))
+ goto out;
+ ret = -EINVAL;
+ if (offset >= ctx->nr_user_files)
+ goto out;
+ ret = io_rsrc_node_switch_start(ctx);
+ if (ret)
+ goto out;
- prsrc->tag = *io_get_tag_slot(data, idx);
- prsrc->rsrc = rsrc;
- list_add(&prsrc->list, &node->rsrc_list);
- return 0;
+ i = array_index_nospec(offset, ctx->nr_user_files);
+ file_slot = io_fixed_file_slot(&ctx->file_table, i);
+ ret = -EBADF;
+ if (!file_slot->file_ptr)
+ goto out;
+
+ file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+ ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
+ if (ret)
+ goto out;
+
+ file_slot->file_ptr = 0;
+ io_rsrc_node_switch(ctx, ctx->file_data);
+ ret = 0;
+out:
+ io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+ return ret;
}
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
@@ -9105,8 +9212,10 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
struct io_buffer *buf;
unsigned long index;
- xa_for_each(&ctx->io_buffers, index, buf)
+ xa_for_each(&ctx->io_buffers, index, buf) {
__io_remove_buffers(ctx, buf, index, -1U);
+ cond_resched();
+ }
}
static void io_req_cache_free(struct list_head *list)
@@ -9231,13 +9340,6 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
return mask;
}
-static int io_uring_fasync(int fd, struct file *file, int on)
-{
- struct io_ring_ctx *ctx = file->private_data;
-
- return fasync_helper(fd, file, on, &ctx->cq_fasync);
-}
-
static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
{
const struct cred *creds;
@@ -9604,8 +9706,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
struct io_tctx_node *node;
unsigned long index;
- xa_for_each(&tctx->xa, index, node)
+ xa_for_each(&tctx->xa, index, node) {
io_uring_del_tctx_node(index);
+ cond_resched();
+ }
if (wq) {
/*
* Must be after io_uring_del_task_file() (removes nodes under
@@ -10029,7 +10133,6 @@ static const struct file_operations io_uring_fops = {
.mmap_capabilities = io_uring_nommu_mmap_capabilities,
#endif
.poll = io_uring_poll,
- .fasync = io_uring_fasync,
#ifdef CONFIG_PROC_FS
.show_fdinfo = io_uring_show_fdinfo,
#endif
@@ -10560,10 +10663,12 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
* ordering. Fine to drop uring_lock here, we hold
* a ref to the ctx.
*/
+ refcount_inc(&sqd->refs);
mutex_unlock(&ctx->uring_lock);
mutex_lock(&sqd->lock);
mutex_lock(&ctx->uring_lock);
- tctx = sqd->thread->io_uring;
+ if (sqd->thread)
+ tctx = sqd->thread->io_uring;
}
} else {
tctx = current->io_uring;
@@ -10577,16 +10682,20 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
if (ret)
goto err;
- if (sqd)
+ if (sqd) {
mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
+ }
if (copy_to_user(arg, new_count, sizeof(new_count)))
return -EFAULT;
return 0;
err:
- if (sqd)
+ if (sqd) {
mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
+ }
return ret;
}
diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
index 87aac4c72c37..1b07550485b9 100644
--- a/fs/kernel_read_file.c
+++ b/fs/kernel_read_file.c
@@ -178,7 +178,7 @@ int kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
struct fd f = fdget(fd);
int ret = -EBADF;
- if (!f.file)
+ if (!f.file || !(f.file->f_mode & FMODE_READ))
goto out;
ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index ba581429bf7b..8e0a1378a4b1 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -1111,13 +1111,25 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
/* attach dentry and inode */
- if (kn && kernfs_active(kn)) {
+ if (kn) {
+ /* Inactive nodes are invisible to the VFS so don't
+ * create a negative.
+ */
+ if (!kernfs_active(kn)) {
+ up_read(&kernfs_rwsem);
+ return NULL;
+ }
inode = kernfs_get_inode(dir->i_sb, kn);
if (!inode)
inode = ERR_PTR(-ENOMEM);
}
- /* Needed only for negative dentry validation */
- if (!inode)
+ /*
+ * Needed for negative dentry validation.
+ * The negative dentry can be created in kernfs_iop_lookup()
+ * or transforms from positive dentry in dentry_unlink_inode()
+ * called from vfs_rmdir().
+ */
+ if (!IS_ERR(inode))
kernfs_set_rev(parent, dentry);
up_read(&kernfs_rwsem);
diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
index de36f12070bf..71c989f1568d 100644
--- a/fs/ksmbd/auth.c
+++ b/fs/ksmbd/auth.c
@@ -68,125 +68,6 @@ void ksmbd_copy_gss_neg_header(void *buf)
memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);
}
-static void
-str_to_key(unsigned char *str, unsigned char *key)
-{
- int i;
-
- key[0] = str[0] >> 1;
- key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);
- key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);
- key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);
- key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);
- key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
- key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
- key[7] = str[6] & 0x7F;
- for (i = 0; i < 8; i++)
- key[i] = (key[i] << 1);
-}
-
-static int
-smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
-{
- unsigned char key2[8];
- struct des_ctx ctx;
-
- if (fips_enabled) {
- ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n");
- return -ENOENT;
- }
-
- str_to_key(key, key2);
- des_expand_key(&ctx, key2, DES_KEY_SIZE);
- des_encrypt(&ctx, out, in);
- memzero_explicit(&ctx, sizeof(ctx));
- return 0;
-}
-
-static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
-{
- int rc;
-
- rc = smbhash(p24, c8, p21);
- if (rc)
- return rc;
- rc = smbhash(p24 + 8, c8, p21 + 7);
- if (rc)
- return rc;
- return smbhash(p24 + 16, c8, p21 + 14);
-}
-
-/* produce a md4 message digest from data of length n bytes */
-static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str,
- int link_len)
-{
- int rc;
- struct ksmbd_crypto_ctx *ctx;
-
- ctx = ksmbd_crypto_ctx_find_md4();
- if (!ctx) {
- ksmbd_debug(AUTH, "Crypto md4 allocation error\n");
- return -ENOMEM;
- }
-
- rc = crypto_shash_init(CRYPTO_MD4(ctx));
- if (rc) {
- ksmbd_debug(AUTH, "Could not init md4 shash\n");
- goto out;
- }
-
- rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len);
- if (rc) {
- ksmbd_debug(AUTH, "Could not update with link_str\n");
- goto out;
- }
-
- rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash);
- if (rc)
- ksmbd_debug(AUTH, "Could not generate md4 hash\n");
-out:
- ksmbd_release_crypto_ctx(ctx);
- return rc;
-}
-
-static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce,
- char *server_challenge, int len)
-{
- int rc;
- struct ksmbd_crypto_ctx *ctx;
-
- ctx = ksmbd_crypto_ctx_find_md5();
- if (!ctx) {
- ksmbd_debug(AUTH, "Crypto md5 allocation error\n");
- return -ENOMEM;
- }
-
- rc = crypto_shash_init(CRYPTO_MD5(ctx));
- if (rc) {
- ksmbd_debug(AUTH, "Could not init md5 shash\n");
- goto out;
- }
-
- rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len);
- if (rc) {
- ksmbd_debug(AUTH, "Could not update with challenge\n");
- goto out;
- }
-
- rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len);
- if (rc) {
- ksmbd_debug(AUTH, "Could not update with nonce\n");
- goto out;
- }
-
- rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash);
- if (rc)
- ksmbd_debug(AUTH, "Could not generate md5 hash\n");
-out:
- ksmbd_release_crypto_ctx(ctx);
- return rc;
-}
-
/**
* ksmbd_gen_sess_key() - function to generate session key
* @sess: session of connection
@@ -325,43 +206,6 @@ out:
}
/**
- * ksmbd_auth_ntlm() - NTLM authentication handler
- * @sess: session of connection
- * @pw_buf: NTLM challenge response
- * @passkey: user password
- *
- * Return: 0 on success, error number on error
- */
-int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf)
-{
- int rc;
- unsigned char p21[21];
- char key[CIFS_AUTH_RESP_SIZE];
-
- memset(p21, '\0', 21);
- memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
- rc = ksmbd_enc_p24(p21, sess->ntlmssp.cryptkey, key);
- if (rc) {
- pr_err("password processing failed\n");
- return rc;
- }
-
- ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user),
- CIFS_SMB1_SESSKEY_SIZE);
- memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key,
- CIFS_AUTH_RESP_SIZE);
- sess->sequence_number = 1;
-
- if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) {
- ksmbd_debug(AUTH, "ntlmv1 authentication failed\n");
- return -EINVAL;
- }
-
- ksmbd_debug(AUTH, "ntlmv1 authentication pass\n");
- return 0;
-}
-
-/**
* ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
* @sess: session of connection
* @ntlmv2: NTLMv2 challenge response
@@ -442,44 +286,6 @@ out:
}
/**
- * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler
- * @sess: session of connection
- * @client_nonce: client nonce from LM response.
- * @ntlm_resp: ntlm response data from client.
- *
- * Return: 0 on success, error number on error
- */
-static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess, char *client_nonce,
- char *ntlm_resp)
-{
- char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0};
- int rc;
- unsigned char p21[21];
- char key[CIFS_AUTH_RESP_SIZE];
-
- rc = ksmbd_enc_update_sess_key(sess_key,
- client_nonce,
- (char *)sess->ntlmssp.cryptkey, 8);
- if (rc) {
- pr_err("password processing failed\n");
- goto out;
- }
-
- memset(p21, '\0', 21);
- memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
- rc = ksmbd_enc_p24(p21, sess_key, key);
- if (rc) {
- pr_err("password processing failed\n");
- goto out;
- }
-
- if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0)
- rc = -EINVAL;
-out:
- return rc;
-}
-
-/**
* ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
* authenticate blob
* @authblob: authenticate blob source pointer
@@ -512,17 +318,6 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
- /* process NTLM authentication */
- if (nt_len == CIFS_AUTH_RESP_SIZE) {
- if (le32_to_cpu(authblob->NegotiateFlags) &
- NTLMSSP_NEGOTIATE_EXTENDED_SEC)
- return __ksmbd_auth_ntlmv2(sess, (char *)authblob +
- lm_off, (char *)authblob + nt_off);
- else
- return ksmbd_auth_ntlm(sess, (char *)authblob +
- nt_off);
- }
-
/* TODO : use domain name that imported from configuration file */
domain_name = smb_strndup_from_utf16((const char *)authblob +
le32_to_cpu(authblob->DomainName.BufferOffset),
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index af086d35398a..48b18b4ec117 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -296,10 +296,12 @@ int ksmbd_conn_handler_loop(void *p)
pdu_size = get_rfc1002_len(hdr_buf);
ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
- /* make sure we have enough to get to SMB header end */
- if (!ksmbd_pdu_size_has_room(pdu_size)) {
- ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
- pdu_size);
+ /*
+ * Check if pdu size is valid (min : smb header size,
+ * max : 0x00FFFFFF).
+ */
+ if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+ pdu_size > MAX_STREAM_PROT_LEN) {
continue;
}
diff --git a/fs/ksmbd/crypto_ctx.c b/fs/ksmbd/crypto_ctx.c
index 5f4b1008d17e..81488d04199d 100644
--- a/fs/ksmbd/crypto_ctx.c
+++ b/fs/ksmbd/crypto_ctx.c
@@ -81,12 +81,6 @@ static struct shash_desc *alloc_shash_desc(int id)
case CRYPTO_SHASH_SHA512:
tfm = crypto_alloc_shash("sha512", 0, 0);
break;
- case CRYPTO_SHASH_MD4:
- tfm = crypto_alloc_shash("md4", 0, 0);
- break;
- case CRYPTO_SHASH_MD5:
- tfm = crypto_alloc_shash("md5", 0, 0);
- break;
default:
return NULL;
}
@@ -214,16 +208,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void)
return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
}
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void)
-{
- return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD4);
-}
-
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void)
-{
- return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD5);
-}
-
static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
{
struct ksmbd_crypto_ctx *ctx;
diff --git a/fs/ksmbd/crypto_ctx.h b/fs/ksmbd/crypto_ctx.h
index ef11154b43df..4a367c62f653 100644
--- a/fs/ksmbd/crypto_ctx.h
+++ b/fs/ksmbd/crypto_ctx.h
@@ -15,8 +15,6 @@ enum {
CRYPTO_SHASH_CMACAES,
CRYPTO_SHASH_SHA256,
CRYPTO_SHASH_SHA512,
- CRYPTO_SHASH_MD4,
- CRYPTO_SHASH_MD5,
CRYPTO_SHASH_MAX,
};
@@ -43,8 +41,6 @@ struct ksmbd_crypto_ctx {
#define CRYPTO_CMACAES(c) ((c)->desc[CRYPTO_SHASH_CMACAES])
#define CRYPTO_SHA256(c) ((c)->desc[CRYPTO_SHASH_SHA256])
#define CRYPTO_SHA512(c) ((c)->desc[CRYPTO_SHASH_SHA512])
-#define CRYPTO_MD4(c) ((c)->desc[CRYPTO_SHASH_MD4])
-#define CRYPTO_MD5(c) ((c)->desc[CRYPTO_SHASH_MD5])
#define CRYPTO_HMACMD5_TFM(c) ((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
#define CRYPTO_HMACSHA256_TFM(c)\
@@ -52,8 +48,6 @@ struct ksmbd_crypto_ctx {
#define CRYPTO_CMACAES_TFM(c) ((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
#define CRYPTO_SHA256_TFM(c) ((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
#define CRYPTO_SHA512_TFM(c) ((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
-#define CRYPTO_MD4_TFM(c) ((c)->desc[CRYPTO_SHASH_MD4]->tfm)
-#define CRYPTO_MD5_TFM(c) ((c)->desc[CRYPTO_SHASH_MD5]->tfm)
#define CRYPTO_GCM(c) ((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
#define CRYPTO_CCM(c) ((c)->ccmaes[CRYPTO_AEAD_AES_CCM])
@@ -64,8 +58,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void);
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
void ksmbd_crypto_destroy(void);
diff --git a/fs/ksmbd/glob.h b/fs/ksmbd/glob.h
index 49a5a3afa118..5b8f3e0ebdb3 100644
--- a/fs/ksmbd/glob.h
+++ b/fs/ksmbd/glob.h
@@ -12,7 +12,7 @@
#include "unicode.h"
#include "vfs_cache.h"
-#define KSMBD_VERSION "3.1.9"
+#define KSMBD_VERSION "3.4.2"
extern int ksmbd_debug_types;
diff --git a/fs/ksmbd/misc.c b/fs/ksmbd/misc.c
index 0b307ca28a19..60e7ac62c917 100644
--- a/fs/ksmbd/misc.c
+++ b/fs/ksmbd/misc.c
@@ -158,25 +158,18 @@ out:
* Return : windows path string or error
*/
-char *convert_to_nt_pathname(char *filename, char *sharepath)
+char *convert_to_nt_pathname(char *filename)
{
char *ab_pathname;
- int len, name_len;
- name_len = strlen(filename);
- ab_pathname = kmalloc(name_len, GFP_KERNEL);
+ if (strlen(filename) == 0)
+ filename = "\\";
+
+ ab_pathname = kstrdup(filename, GFP_KERNEL);
if (!ab_pathname)
return NULL;
- ab_pathname[0] = '\\';
- ab_pathname[1] = '\0';
-
- len = strlen(sharepath);
- if (!strncmp(filename, sharepath, len) && name_len != len) {
- strscpy(ab_pathname, &filename[len], name_len);
- ksmbd_conv_path_to_windows(ab_pathname);
- }
-
+ ksmbd_conv_path_to_windows(ab_pathname);
return ab_pathname;
}
@@ -240,7 +233,7 @@ char *ksmbd_extract_sharename(char *treename)
*
* Return: converted name on success, otherwise NULL
*/
-char *convert_to_unix_name(struct ksmbd_share_config *share, char *name)
+char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name)
{
int no_slash = 0, name_len, path_len;
char *new_name;
diff --git a/fs/ksmbd/misc.h b/fs/ksmbd/misc.h
index af8717d4d85b..253366bd0951 100644
--- a/fs/ksmbd/misc.h
+++ b/fs/ksmbd/misc.h
@@ -14,13 +14,13 @@ struct ksmbd_file;
int match_pattern(const char *str, size_t len, const char *pattern);
int ksmbd_validate_filename(char *filename);
int parse_stream_name(char *filename, char **stream_name, int *s_type);
-char *convert_to_nt_pathname(char *filename, char *sharepath);
+char *convert_to_nt_pathname(char *filename);
int get_nlink(struct kstat *st);
void ksmbd_conv_path_to_unix(char *path);
void ksmbd_strip_last_slash(char *path);
void ksmbd_conv_path_to_windows(char *path);
char *ksmbd_extract_sharename(char *treename);
-char *convert_to_unix_name(struct ksmbd_share_config *share, char *name);
+char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name);
#define KSMBD_DIR_INFO_ALIGNMENT 8
struct ksmbd_dir_info;
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index 16b6236d1bd2..f9dae6ef2115 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -1451,26 +1451,47 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
*/
struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
{
- char *data_offset;
struct create_context *cc;
unsigned int next = 0;
char *name;
struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+ unsigned int remain_len, name_off, name_len, value_off, value_len,
+ cc_len;
- data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);
- cc = (struct create_context *)data_offset;
+ /*
+ * CreateContextsOffset and CreateContextsLength are guaranteed to
+ * be valid because of ksmbd_smb2_check_message().
+ */
+ cc = (struct create_context *)((char *)req + 4 +
+ le32_to_cpu(req->CreateContextsOffset));
+ remain_len = le32_to_cpu(req->CreateContextsLength);
do {
- int val;
-
cc = (struct create_context *)((char *)cc + next);
- name = le16_to_cpu(cc->NameOffset) + (char *)cc;
- val = le16_to_cpu(cc->NameLength);
- if (val < 4)
+ if (remain_len < offsetof(struct create_context, Buffer))
return ERR_PTR(-EINVAL);
- if (memcmp(name, tag, val) == 0)
- return cc;
next = le32_to_cpu(cc->Next);
+ name_off = le16_to_cpu(cc->NameOffset);
+ name_len = le16_to_cpu(cc->NameLength);
+ value_off = le16_to_cpu(cc->DataOffset);
+ value_len = le32_to_cpu(cc->DataLength);
+ cc_len = next ? next : remain_len;
+
+ if ((next & 0x7) != 0 ||
+ next > remain_len ||
+ name_off != offsetof(struct create_context, Buffer) ||
+ name_len < 4 ||
+ name_off + name_len > cc_len ||
+ (value_off & 0x7) != 0 ||
+ (value_off && (value_off < name_off + name_len)) ||
+ ((u64)value_off + value_len > cc_len))
+ return ERR_PTR(-EINVAL);
+
+ name = (char *)cc + name_off;
+ if (memcmp(name, tag, name_len) == 0)
+ return cc;
+
+ remain_len -= next;
} while (next != 0);
return NULL;
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
index e6a9f6aa47eb..2a2b2135bfde 100644
--- a/fs/ksmbd/server.c
+++ b/fs/ksmbd/server.c
@@ -584,6 +584,9 @@ static int __init ksmbd_server_init(void)
ret = ksmbd_workqueue_init();
if (ret)
goto err_crypto_destroy;
+
+ pr_warn_once("The ksmbd server is experimental, use at your own risk.\n");
+
return 0;
err_crypto_destroy:
diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
index 9aa46bb3e10d..9edd9c161b27 100644
--- a/fs/ksmbd/smb2misc.c
+++ b/fs/ksmbd/smb2misc.c
@@ -80,18 +80,21 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
};
/*
- * Returns the pointer to the beginning of the data area. Length of the data
- * area and the offset to it (from the beginning of the smb are also returned.
+ * Set length of the data area and the offset to arguments.
+ * if they are invalid, return error.
*/
-static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
+static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+ struct smb2_hdr *hdr)
{
+ int ret = 0;
+
*off = 0;
*len = 0;
/* error reqeusts do not have data area */
if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
(((struct smb2_err_rsp *)hdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2_LE)
- return NULL;
+ return ret;
/*
* Following commands have data areas so we have to get the location
@@ -165,69 +168,60 @@ static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
case SMB2_IOCTL:
*off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
*len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
-
break;
default:
ksmbd_debug(SMB, "no length check for command\n");
break;
}
- /*
- * Invalid length or offset probably means data area is invalid, but
- * we have little choice but to ignore the data area in this case.
- */
if (*off > 4096) {
- ksmbd_debug(SMB, "offset %d too large, data area ignored\n",
- *off);
- *len = 0;
- *off = 0;
- } else if (*off < 0) {
- ksmbd_debug(SMB,
- "negative offset %d to data invalid ignore data area\n",
- *off);
- *off = 0;
- *len = 0;
- } else if (*len < 0) {
- ksmbd_debug(SMB,
- "negative data length %d invalid, data area ignored\n",
- *len);
- *len = 0;
- } else if (*len > 128 * 1024) {
- ksmbd_debug(SMB, "data area larger than 128K: %d\n", *len);
- *len = 0;
+ ksmbd_debug(SMB, "offset %d too large\n", *off);
+ ret = -EINVAL;
+ } else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) {
+ ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n",
+ MAX_STREAM_PROT_LEN, (u64)*off + *len);
+ ret = -EINVAL;
}
- /* return pointer to beginning of data area, ie offset from SMB start */
- if ((*off != 0) && (*len != 0))
- return (char *)hdr + *off;
- else
- return NULL;
+ return ret;
}
/*
* Calculate the size of the SMB message based on the fixed header
* portion, the number of word parameters and the data portion of the message.
*/
-static unsigned int smb2_calc_size(void *buf)
+static int smb2_calc_size(void *buf, unsigned int *len)
{
struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
struct smb2_hdr *hdr = &pdu->hdr;
- int offset; /* the offset from the beginning of SMB to data area */
- int data_length; /* the length of the variable length data area */
+ unsigned int offset; /* the offset from the beginning of SMB to data area */
+ unsigned int data_length; /* the length of the variable length data area */
+ int ret;
+
/* Structure Size has already been checked to make sure it is 64 */
- int len = le16_to_cpu(hdr->StructureSize);
+ *len = le16_to_cpu(hdr->StructureSize);
/*
* StructureSize2, ie length of fixed parameter area has already
* been checked to make sure it is the correct length.
*/
- len += le16_to_cpu(pdu->StructureSize2);
+ *len += le16_to_cpu(pdu->StructureSize2);
+ /*
+ * StructureSize2 of smb2_lock pdu is set to 48, indicating
+ * the size of smb2 lock request with single smb2_lock_element
+ * regardless of number of locks. Subtract single
+ * smb2_lock_element for correct buffer size check.
+ */
+ if (hdr->Command == SMB2_LOCK)
+ *len -= sizeof(struct smb2_lock_element);
if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
goto calc_size_exit;
- smb2_get_data_area_len(&offset, &data_length, hdr);
- ksmbd_debug(SMB, "SMB2 data length %d offset %d\n", data_length,
+ ret = smb2_get_data_area_len(&offset, &data_length, hdr);
+ if (ret)
+ return ret;
+ ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length,
offset);
if (data_length > 0) {
@@ -237,16 +231,19 @@ static unsigned int smb2_calc_size(void *buf)
* for some commands, typically those with odd StructureSize,
* so we must add one to the calculation.
*/
- if (offset + 1 < len)
+ if (offset + 1 < *len) {
ksmbd_debug(SMB,
- "data area offset %d overlaps SMB2 header %d\n",
- offset + 1, len);
- else
- len = offset + data_length;
+ "data area offset %d overlaps SMB2 header %u\n",
+ offset + 1, *len);
+ return -EINVAL;
+ }
+
+ *len = offset + data_length;
}
+
calc_size_exit:
- ksmbd_debug(SMB, "SMB2 len %d\n", len);
- return len;
+ ksmbd_debug(SMB, "SMB2 len %u\n", *len);
+ return 0;
}
static inline int smb2_query_info_req_len(struct smb2_query_info_req *h)
@@ -391,9 +388,11 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
return 1;
}
- clc_len = smb2_calc_size(hdr);
+ if (smb2_calc_size(hdr, &clc_len))
+ return 1;
+
if (len != clc_len) {
- /* server can return one byte more due to implied bcc[0] */
+ /* client can return one byte more due to implied bcc[0] */
if (clc_len == len + 1)
return 0;
@@ -418,9 +417,6 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
return 0;
}
- if (command == SMB2_LOCK_HE && len == 88)
- return 0;
-
ksmbd_debug(SMB,
"cli req too short, len %d not %d. cmd:%d mid:%llu\n",
len, clc_len, command,
diff --git a/fs/ksmbd/smb2ops.c b/fs/ksmbd/smb2ops.c
index 197473871aa4..b06456eb587b 100644
--- a/fs/ksmbd/smb2ops.c
+++ b/fs/ksmbd/smb2ops.c
@@ -187,11 +187,6 @@ static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = {
[SMB2_CHANGE_NOTIFY_HE] = { .proc = smb2_notify},
};
-int init_smb2_0_server(struct ksmbd_conn *conn)
-{
- return -EOPNOTSUPP;
-}
-
/**
* init_smb2_1_server() - initialize a smb server connection with smb2.1
* command dispatcher
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index c86164dc70bb..005aa93a49d6 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -236,9 +236,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
if (conn->need_neg == false)
return -EINVAL;
- if (!(conn->dialect >= SMB20_PROT_ID &&
- conn->dialect <= SMB311_PROT_ID))
- return -EINVAL;
rsp_hdr = work->response_buf;
@@ -433,7 +430,7 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
work->compound_pfid = KSMBD_NO_FID;
}
memset((char *)rsp_hdr + 4, 0, sizeof(struct smb2_hdr) + 2);
- rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
+ rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->Command = rcv_hdr->Command;
@@ -459,13 +456,22 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
bool is_chained_smb2_message(struct ksmbd_work *work)
{
struct smb2_hdr *hdr = work->request_buf;
- unsigned int len;
+ unsigned int len, next_cmd;
if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
return false;
hdr = ksmbd_req_buf_next(work);
- if (le32_to_cpu(hdr->NextCommand) > 0) {
+ next_cmd = le32_to_cpu(hdr->NextCommand);
+ if (next_cmd > 0) {
+ if ((u64)work->next_smb2_rcv_hdr_off + next_cmd +
+ __SMB2_HEADER_STRUCTURE_SIZE >
+ get_rfc1002_len(work->request_buf)) {
+ pr_err("next command(%u) offset exceeds smb msg size\n",
+ next_cmd);
+ return false;
+ }
+
ksmbd_debug(SMB, "got SMB2 chained command\n");
init_chained_smb2_rsp(work);
return true;
@@ -634,7 +640,7 @@ static char *
smb2_get_name(struct ksmbd_share_config *share, const char *src,
const int maxlen, struct nls_table *local_nls)
{
- char *name, *unixname;
+ char *name;
name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
if (IS_ERR(name)) {
@@ -642,19 +648,9 @@ smb2_get_name(struct ksmbd_share_config *share, const char *src,
return name;
}
- /* change it to absolute unix name */
ksmbd_conv_path_to_unix(name);
ksmbd_strip_last_slash(name);
-
- unixname = convert_to_unix_name(share, name);
- kfree(name);
- if (!unixname) {
- pr_err("can not convert absolute name\n");
- return ERR_PTR(-ENOMEM);
- }
-
- ksmbd_debug(SMB, "absolute name = %s\n", unixname);
- return unixname;
+ return name;
}
int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
@@ -1068,6 +1064,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
struct smb2_negotiate_req *req = work->request_buf;
struct smb2_negotiate_rsp *rsp = work->response_buf;
int rc = 0;
+ unsigned int smb2_buf_len, smb2_neg_size;
__le32 status;
ksmbd_debug(SMB, "Received negotiate request\n");
@@ -1085,6 +1082,44 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
goto err_out;
}
+ smb2_buf_len = get_rfc1002_len(work->request_buf);
+ smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects) - 4;
+ if (smb2_neg_size > smb2_buf_len) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ if (conn->dialect == SMB311_PROT_ID) {
+ unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset);
+
+ if (smb2_buf_len < nego_ctxt_off) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ if (smb2_neg_size > nego_ctxt_off) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+ nego_ctxt_off) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+ } else {
+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+ smb2_buf_len) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+ }
+
conn->cli_cap = le32_to_cpu(req->Capabilities);
switch (conn->dialect) {
case SMB311_PROT_ID:
@@ -1128,13 +1163,6 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
case SMB21_PROT_ID:
init_smb2_1_server(conn);
break;
- case SMB20_PROT_ID:
- rc = init_smb2_0_server(conn);
- if (rc) {
- rsp->hdr.Status = STATUS_NOT_SUPPORTED;
- goto err_out;
- }
- break;
case SMB2X_PROT_ID:
case BAD_PROT_ID:
default:
@@ -1153,11 +1181,9 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
- if (conn->dialect > SMB20_PROT_ID) {
- memcpy(conn->ClientGUID, req->ClientGUID,
- SMB2_CLIENT_GUID_SIZE);
- conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
- }
+ memcpy(conn->ClientGUID, req->ClientGUID,
+ SMB2_CLIENT_GUID_SIZE);
+ conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
rsp->StructureSize = cpu_to_le16(65);
rsp->DialectRevision = cpu_to_le16(conn->dialect);
@@ -1499,11 +1525,9 @@ binding_session:
}
}
- if (conn->dialect > SMB20_PROT_ID) {
- if (!ksmbd_conn_lookup_dialect(conn)) {
- pr_err("fail to verify the dialect\n");
- return -ENOENT;
- }
+ if (!ksmbd_conn_lookup_dialect(conn)) {
+ pr_err("fail to verify the dialect\n");
+ return -ENOENT;
}
return 0;
}
@@ -1585,11 +1609,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
}
}
- if (conn->dialect > SMB20_PROT_ID) {
- if (!ksmbd_conn_lookup_dialect(conn)) {
- pr_err("fail to verify the dialect\n");
- return -ENOENT;
- }
+ if (!ksmbd_conn_lookup_dialect(conn)) {
+ pr_err("fail to verify the dialect\n");
+ return -ENOENT;
}
return 0;
}
@@ -2103,16 +2125,22 @@ out:
* smb2_set_ea() - handler for setting extended attributes using set
* info command
* @eabuf: set info command buffer
+ * @buf_len: set info command buffer length
* @path: dentry path for get ea
*
* Return: 0 on success, otherwise error
*/
-static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)
+static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ struct path *path)
{
struct user_namespace *user_ns = mnt_user_ns(path->mnt);
char *attr_name = NULL, *value;
int rc = 0;
- int next = 0;
+ unsigned int next = 0;
+
+ if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
+ le16_to_cpu(eabuf->EaValueLength))
+ return -EINVAL;
attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);
if (!attr_name)
@@ -2177,7 +2205,13 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)
next:
next = le32_to_cpu(eabuf->NextEntryOffset);
+ if (next == 0 || buf_len < next)
+ break;
+ buf_len -= next;
eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
+ if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
+ break;
+
} while (next != 0);
kfree(attr_name);
@@ -2348,7 +2382,7 @@ static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
return rc;
}
- rc = ksmbd_vfs_kern_path(name, 0, path, 0);
+ rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);
if (rc) {
pr_err("cannot get linux path (%s), err = %d\n",
name, rc);
@@ -2377,6 +2411,10 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
ksmbd_debug(SMB,
"Set ACLs using SMB2_CREATE_SD_BUFFER context\n");
sd_buf = (struct create_sd_buf_req *)context;
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_sd_buf_req))
+ return -EINVAL;
return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
le32_to_cpu(sd_buf->ccontext.DataLength), true);
}
@@ -2423,7 +2461,7 @@ int smb2_open(struct ksmbd_work *work)
struct oplock_info *opinfo;
__le32 *next_ptr = NULL;
int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
- int rc = 0, len = 0;
+ int rc = 0;
int contxt_cnt = 0, query_disk_id = 0;
int maximal_access_ctxt = 0, posix_ctxt = 0;
int s_type = 0;
@@ -2495,17 +2533,11 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
} else {
- len = strlen(share->path);
- ksmbd_debug(SMB, "share path len %d\n", len);
- name = kmalloc(len + 1, GFP_KERNEL);
+ name = kstrdup("", GFP_KERNEL);
if (!name) {
- rsp->hdr.Status = STATUS_NO_MEMORY;
rc = -ENOMEM;
goto err_out1;
}
-
- memcpy(name, share->path, len);
- *(name + len) = '\0';
}
req_op_level = req->RequestedOplockLevel;
@@ -2577,6 +2609,12 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
} else if (context) {
ea_buf = (struct create_ea_buf_req *)context;
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_ea_buf_req)) {
+ rc = -EINVAL;
+ goto err_out1;
+ }
if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
rsp->hdr.Status = STATUS_ACCESS_DENIED;
rc = -EACCES;
@@ -2615,6 +2653,12 @@ int smb2_open(struct ksmbd_work *work)
} else if (context) {
struct create_posix *posix =
(struct create_posix *)context;
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_posix)) {
+ rc = -EINVAL;
+ goto err_out1;
+ }
ksmbd_debug(SMB, "get posix context\n");
posix_mode = le32_to_cpu(posix->Mode);
@@ -2628,13 +2672,9 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
- if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
- /*
- * On delete request, instead of following up, need to
- * look the current entity
- */
- rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
- if (!rc) {
+ rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, 1);
+ if (!rc) {
+ if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
/*
* If file exists with under flags, return access
* denied error.
@@ -2653,34 +2693,16 @@ int smb2_open(struct ksmbd_work *work)
path_put(&path);
goto err_out;
}
- }
- } else {
- if (test_share_config_flag(work->tcon->share_conf,
- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) {
- /*
- * Use LOOKUP_FOLLOW to follow the path of
- * symlink in path buildup
- */
- rc = ksmbd_vfs_kern_path(name, LOOKUP_FOLLOW, &path, 1);
- if (rc) { /* Case for broken link ?*/
- rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
- }
- } else {
- rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
- if (!rc && d_is_symlink(path.dentry)) {
- rc = -EACCES;
- path_put(&path);
- goto err_out;
- }
+ } else if (d_is_symlink(path.dentry)) {
+ rc = -EACCES;
+ path_put(&path);
+ goto err_out;
}
}
if (rc) {
- if (rc == -EACCES) {
- ksmbd_debug(SMB,
- "User does not have right permission\n");
+ if (rc != -ENOENT)
goto err_out;
- }
ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n",
name, rc);
rc = 0;
@@ -2786,7 +2808,15 @@ int smb2_open(struct ksmbd_work *work)
created = true;
user_ns = mnt_user_ns(path.mnt);
if (ea_buf) {
- rc = smb2_set_ea(&ea_buf->ea, &path);
+ if (le32_to_cpu(ea_buf->ccontext.DataLength) <
+ sizeof(struct smb2_ea_info)) {
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ rc = smb2_set_ea(&ea_buf->ea,
+ le32_to_cpu(ea_buf->ccontext.DataLength),
+ &path);
if (rc == -EOPNOTSUPP)
rc = 0;
else if (rc)
@@ -3019,9 +3049,16 @@ int smb2_open(struct ksmbd_work *work)
rc = PTR_ERR(az_req);
goto err_out;
} else if (az_req) {
- loff_t alloc_size = le64_to_cpu(az_req->AllocationSize);
+ loff_t alloc_size;
int err;
+ if (le16_to_cpu(az_req->ccontext.DataOffset) +
+ le32_to_cpu(az_req->ccontext.DataLength) <
+ sizeof(struct create_alloc_size_req)) {
+ rc = -EINVAL;
+ goto err_out;
+ }
+ alloc_size = le64_to_cpu(az_req->AllocationSize);
ksmbd_debug(SMB,
"request smb2 create allocate size : %llu\n",
alloc_size);
@@ -3176,7 +3213,7 @@ err_out1:
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
else if (rc == -EOPNOTSUPP)
rsp->hdr.Status = STATUS_NOT_SUPPORTED;
- else if (rc == -EACCES || rc == -ESTALE)
+ else if (rc == -EACCES || rc == -ESTALE || rc == -EXDEV)
rsp->hdr.Status = STATUS_ACCESS_DENIED;
else if (rc == -ENOENT)
rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
@@ -4041,6 +4078,10 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
path = &fp->filp->f_path;
/* single EA entry is requested with given user.* name */
if (req->InputBufferLength) {
+ if (le32_to_cpu(req->InputBufferLength) <
+ sizeof(struct smb2_ea_info_req))
+ return -EINVAL;
+
ea_req = (struct smb2_ea_info_req *)req->Buffer;
} else {
/* need to send all EAs, if no specific EA is requested*/
@@ -4186,7 +4227,7 @@ static void get_file_access_info(struct smb2_query_info_rsp *rsp,
static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
struct ksmbd_file *fp, void *rsp_org)
{
- struct smb2_file_all_info *basic_info;
+ struct smb2_file_basic_info *basic_info;
struct kstat stat;
u64 time;
@@ -4196,7 +4237,7 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
return -EACCES;
}
- basic_info = (struct smb2_file_all_info *)rsp->Buffer;
+ basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
&stat);
basic_info->CreationTime = cpu_to_le64(fp->create_time);
@@ -4209,9 +4250,8 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
basic_info->Attributes = fp->f_ci->m_fattr;
basic_info->Pad1 = 0;
rsp->OutputBufferLength =
- cpu_to_le32(offsetof(struct smb2_file_all_info, AllocationSize));
- inc_rfc1001_len(rsp_org, offsetof(struct smb2_file_all_info,
- AllocationSize));
+ cpu_to_le32(sizeof(struct smb2_file_basic_info));
+ inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));
return 0;
}
@@ -4288,8 +4328,7 @@ static int get_file_all_info(struct ksmbd_work *work,
return -EACCES;
}
- filename = convert_to_nt_pathname(fp->filename,
- work->tcon->share_conf->path);
+ filename = convert_to_nt_pathname(fp->filename);
if (!filename)
return -ENOMEM;
@@ -4420,17 +4459,15 @@ static void get_file_stream_info(struct ksmbd_work *work,
file_info->NextEntryOffset = cpu_to_le32(next);
}
- if (nbytes) {
+ if (!S_ISDIR(stat.mode)) {
file_info = (struct smb2_file_stream_info *)
&rsp->Buffer[nbytes];
streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
"::$DATA", 7, conn->local_nls, 0);
streamlen *= 2;
file_info->StreamNameLength = cpu_to_le32(streamlen);
- file_info->StreamSize = S_ISDIR(stat.mode) ? 0 :
- cpu_to_le64(stat.size);
- file_info->StreamAllocationSize = S_ISDIR(stat.mode) ? 0 :
- cpu_to_le64(stat.size);
+ file_info->StreamSize = 0;
+ file_info->StreamAllocationSize = 0;
nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
}
@@ -4745,12 +4782,8 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
struct path path;
int rc = 0, len;
int fs_infoclass_size = 0;
- int lookup_flags = 0;
-
- if (test_share_config_flag(share, KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
- lookup_flags = LOOKUP_FOLLOW;
- rc = ksmbd_vfs_kern_path(share->path, lookup_flags, &path, 0);
+ rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
if (rc) {
pr_err("cannot create vfs path\n");
return -EIO;
@@ -5299,7 +5332,7 @@ static int smb2_rename(struct ksmbd_work *work,
goto out;
len = strlen(new_name);
- if (new_name[len - 1] != '/') {
+ if (len > 0 && new_name[len - 1] != '/') {
pr_err("not allow base filename in rename\n");
rc = -ESHARE;
goto out;
@@ -5327,11 +5360,14 @@ static int smb2_rename(struct ksmbd_work *work,
}
ksmbd_debug(SMB, "new name %s\n", new_name);
- rc = ksmbd_vfs_kern_path(new_name, 0, &path, 1);
- if (rc)
+ rc = ksmbd_vfs_kern_path(work, new_name, LOOKUP_NO_SYMLINKS, &path, 1);
+ if (rc) {
+ if (rc != -ENOENT)
+ goto out;
file_present = false;
- else
+ } else {
path_put(&path);
+ }
if (ksmbd_share_veto_filename(share, new_name)) {
rc = -ENOENT;
@@ -5371,7 +5407,7 @@ out:
static int smb2_create_link(struct ksmbd_work *work,
struct ksmbd_share_config *share,
struct smb2_file_link_info *file_info,
- struct file *filp,
+ unsigned int buf_len, struct file *filp,
struct nls_table *local_nls)
{
char *link_name = NULL, *target_name = NULL, *pathname = NULL;
@@ -5379,6 +5415,10 @@ static int smb2_create_link(struct ksmbd_work *work,
bool file_present = true;
int rc;
+ if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
+ le32_to_cpu(file_info->FileNameLength))
+ return -EINVAL;
+
ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");
pathname = kmalloc(PATH_MAX, GFP_KERNEL);
if (!pathname)
@@ -5401,11 +5441,14 @@ static int smb2_create_link(struct ksmbd_work *work,
}
ksmbd_debug(SMB, "target name is %s\n", target_name);
- rc = ksmbd_vfs_kern_path(link_name, 0, &path, 0);
- if (rc)
+ rc = ksmbd_vfs_kern_path(work, link_name, LOOKUP_NO_SYMLINKS, &path, 0);
+ if (rc) {
+ if (rc != -ENOENT)
+ goto out;
file_present = false;
- else
+ } else {
path_put(&path);
+ }
if (file_info->ReplaceIfExists) {
if (file_present) {
@@ -5435,12 +5478,11 @@ out:
return rc;
}
-static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
+static int set_file_basic_info(struct ksmbd_file *fp,
+ struct smb2_file_basic_info *file_info,
struct ksmbd_share_config *share)
{
- struct smb2_file_all_info *file_info;
struct iattr attrs;
- struct timespec64 ctime;
struct file *filp;
struct inode *inode;
struct user_namespace *user_ns;
@@ -5449,7 +5491,6 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))
return -EACCES;
- file_info = (struct smb2_file_all_info *)buf;
attrs.ia_valid = 0;
filp = fp->filp;
inode = file_inode(filp);
@@ -5463,13 +5504,11 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
}
- if (file_info->ChangeTime) {
+ attrs.ia_valid |= ATTR_CTIME;
+ if (file_info->ChangeTime)
attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
- ctime = attrs.ia_ctime;
- attrs.ia_valid |= ATTR_CTIME;
- } else {
- ctime = inode->i_ctime;
- }
+ else
+ attrs.ia_ctime = inode->i_ctime;
if (file_info->LastWriteTime) {
attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
@@ -5515,18 +5554,17 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
return -EACCES;
inode_lock(inode);
+ inode->i_ctime = attrs.ia_ctime;
+ attrs.ia_valid &= ~ATTR_CTIME;
rc = notify_change(user_ns, dentry, &attrs, NULL);
- if (!rc) {
- inode->i_ctime = ctime;
- mark_inode_dirty(inode);
- }
inode_unlock(inode);
}
return rc;
}
static int set_file_allocation_info(struct ksmbd_work *work,
- struct ksmbd_file *fp, char *buf)
+ struct ksmbd_file *fp,
+ struct smb2_file_alloc_info *file_alloc_info)
{
/*
* TODO : It's working fine only when store dos attributes
@@ -5534,7 +5572,6 @@ static int set_file_allocation_info(struct ksmbd_work *work,
* properly with any smb.conf option
*/
- struct smb2_file_alloc_info *file_alloc_info;
loff_t alloc_blks;
struct inode *inode;
int rc;
@@ -5542,7 +5579,6 @@ static int set_file_allocation_info(struct ksmbd_work *work,
if (!(fp->daccess & FILE_WRITE_DATA_LE))
return -EACCES;
- file_alloc_info = (struct smb2_file_alloc_info *)buf;
alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
inode = file_inode(fp->filp);
@@ -5565,7 +5601,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
* inode size is retained by backup inode size.
*/
size = i_size_read(inode);
- rc = ksmbd_vfs_truncate(work, NULL, fp, alloc_blks * 512);
+ rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
if (rc) {
pr_err("truncate failed! filename : %s, err %d\n",
fp->filename, rc);
@@ -5578,9 +5614,8 @@ static int set_file_allocation_info(struct ksmbd_work *work,
}
static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
- char *buf)
+ struct smb2_file_eof_info *file_eof_info)
{
- struct smb2_file_eof_info *file_eof_info;
loff_t newsize;
struct inode *inode;
int rc;
@@ -5588,7 +5623,6 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
if (!(fp->daccess & FILE_WRITE_DATA_LE))
return -EACCES;
- file_eof_info = (struct smb2_file_eof_info *)buf;
newsize = le64_to_cpu(file_eof_info->EndOfFile);
inode = file_inode(fp->filp);
@@ -5602,7 +5636,7 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n",
fp->filename, newsize);
- rc = ksmbd_vfs_truncate(work, NULL, fp, newsize);
+ rc = ksmbd_vfs_truncate(work, fp, newsize);
if (rc) {
ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n",
fp->filename, rc);
@@ -5615,7 +5649,8 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
}
static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
- char *buf)
+ struct smb2_file_rename_info *rename_info,
+ unsigned int buf_len)
{
struct user_namespace *user_ns;
struct ksmbd_file *parent_fp;
@@ -5628,6 +5663,10 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EACCES;
}
+ if (buf_len < (u64)sizeof(struct smb2_file_rename_info) +
+ le32_to_cpu(rename_info->FileNameLength))
+ return -EINVAL;
+
user_ns = file_mnt_user_ns(fp->filp);
if (ksmbd_stream_fd(fp))
goto next;
@@ -5650,14 +5689,13 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
}
}
next:
- return smb2_rename(work, fp, user_ns,
- (struct smb2_file_rename_info *)buf,
+ return smb2_rename(work, fp, user_ns, rename_info,
work->sess->conn->local_nls);
}
-static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
+static int set_file_disposition_info(struct ksmbd_file *fp,
+ struct smb2_file_disposition_info *file_info)
{
- struct smb2_file_disposition_info *file_info;
struct inode *inode;
if (!(fp->daccess & FILE_DELETE_LE)) {
@@ -5666,7 +5704,6 @@ static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
}
inode = file_inode(fp->filp);
- file_info = (struct smb2_file_disposition_info *)buf;
if (file_info->DeletePending) {
if (S_ISDIR(inode->i_mode) &&
ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY)
@@ -5678,15 +5715,14 @@ static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
return 0;
}
-static int set_file_position_info(struct ksmbd_file *fp, char *buf)
+static int set_file_position_info(struct ksmbd_file *fp,
+ struct smb2_file_pos_info *file_info)
{
- struct smb2_file_pos_info *file_info;
loff_t current_byte_offset;
unsigned long sector_size;
struct inode *inode;
inode = file_inode(fp->filp);
- file_info = (struct smb2_file_pos_info *)buf;
current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset);
sector_size = inode->i_sb->s_blocksize;
@@ -5702,12 +5738,11 @@ static int set_file_position_info(struct ksmbd_file *fp, char *buf)
return 0;
}
-static int set_file_mode_info(struct ksmbd_file *fp, char *buf)
+static int set_file_mode_info(struct ksmbd_file *fp,
+ struct smb2_file_mode_info *file_info)
{
- struct smb2_file_mode_info *file_info;
__le32 mode;
- file_info = (struct smb2_file_mode_info *)buf;
mode = file_info->Mode;
if ((mode & ~FILE_MODE_INFO_MASK) ||
@@ -5737,40 +5772,74 @@ static int set_file_mode_info(struct ksmbd_file *fp, char *buf)
* TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH
*/
static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
- int info_class, char *buf,
+ struct smb2_set_info_req *req,
struct ksmbd_share_config *share)
{
- switch (info_class) {
+ unsigned int buf_len = le32_to_cpu(req->BufferLength);
+
+ switch (req->FileInfoClass) {
case FILE_BASIC_INFORMATION:
- return set_file_basic_info(fp, buf, share);
+ {
+ if (buf_len < sizeof(struct smb2_file_basic_info))
+ return -EINVAL;
+ return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);
+ }
case FILE_ALLOCATION_INFORMATION:
- return set_file_allocation_info(work, fp, buf);
+ {
+ if (buf_len < sizeof(struct smb2_file_alloc_info))
+ return -EINVAL;
+ return set_file_allocation_info(work, fp,
+ (struct smb2_file_alloc_info *)req->Buffer);
+ }
case FILE_END_OF_FILE_INFORMATION:
- return set_end_of_file_info(work, fp, buf);
+ {
+ if (buf_len < sizeof(struct smb2_file_eof_info))
+ return -EINVAL;
+ return set_end_of_file_info(work, fp,
+ (struct smb2_file_eof_info *)req->Buffer);
+ }
case FILE_RENAME_INFORMATION:
+ {
if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
ksmbd_debug(SMB,
"User does not have write permission\n");
return -EACCES;
}
- return set_rename_info(work, fp, buf);
+ if (buf_len < sizeof(struct smb2_file_rename_info))
+ return -EINVAL;
+
+ return set_rename_info(work, fp,
+ (struct smb2_file_rename_info *)req->Buffer,
+ buf_len);
+ }
case FILE_LINK_INFORMATION:
+ {
+ if (buf_len < sizeof(struct smb2_file_link_info))
+ return -EINVAL;
+
return smb2_create_link(work, work->tcon->share_conf,
- (struct smb2_file_link_info *)buf, fp->filp,
+ (struct smb2_file_link_info *)req->Buffer,
+ buf_len, fp->filp,
work->sess->conn->local_nls);
-
+ }
case FILE_DISPOSITION_INFORMATION:
+ {
if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
ksmbd_debug(SMB,
"User does not have write permission\n");
return -EACCES;
}
- return set_file_disposition_info(fp, buf);
+ if (buf_len < sizeof(struct smb2_file_disposition_info))
+ return -EINVAL;
+
+ return set_file_disposition_info(fp,
+ (struct smb2_file_disposition_info *)req->Buffer);
+ }
case FILE_FULL_EA_INFORMATION:
{
if (!(fp->daccess & FILE_WRITE_EA_LE)) {
@@ -5779,18 +5848,29 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EACCES;
}
- return smb2_set_ea((struct smb2_ea_info *)buf,
- &fp->filp->f_path);
- }
+ if (buf_len < sizeof(struct smb2_ea_info))
+ return -EINVAL;
+ return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+ buf_len, &fp->filp->f_path);
+ }
case FILE_POSITION_INFORMATION:
- return set_file_position_info(fp, buf);
+ {
+ if (buf_len < sizeof(struct smb2_file_pos_info))
+ return -EINVAL;
+ return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);
+ }
case FILE_MODE_INFORMATION:
- return set_file_mode_info(fp, buf);
+ {
+ if (buf_len < sizeof(struct smb2_file_mode_info))
+ return -EINVAL;
+
+ return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);
+ }
}
- pr_err("Unimplemented Fileinfoclass :%d\n", info_class);
+ pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass);
return -EOPNOTSUPP;
}
@@ -5851,8 +5931,7 @@ int smb2_set_info(struct ksmbd_work *work)
switch (req->InfoType) {
case SMB2_O_INFO_FILE:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
- rc = smb2_set_info_file(work, fp, req->FileInfoClass,
- req->Buffer, work->tcon->share_conf);
+ rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf);
break;
case SMB2_O_INFO_SECURITY:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
@@ -5879,7 +5958,7 @@ int smb2_set_info(struct ksmbd_work *work)
return 0;
err_out:
- if (rc == -EACCES || rc == -EPERM)
+ if (rc == -EACCES || rc == -EPERM || rc == -EXDEV)
rsp->hdr.Status = STATUS_ACCESS_DENIED;
else if (rc == -EINVAL)
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
@@ -8206,7 +8285,8 @@ void smb3_preauth_hash_rsp(struct ksmbd_work *work)
WORK_BUFFERS(work, req, rsp);
- if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE)
+ if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&
+ conn->preauth_info)
ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
conn->preauth_info->Preauth_HashValue);
@@ -8310,31 +8390,29 @@ int smb3_decrypt_req(struct ksmbd_work *work)
struct smb2_hdr *hdr;
unsigned int pdu_length = get_rfc1002_len(buf);
struct kvec iov[2];
- unsigned int buf_data_size = pdu_length + 4 -
+ int buf_data_size = pdu_length + 4 -
sizeof(struct smb2_transform_hdr);
struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
- unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
int rc = 0;
- sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));
- if (!sess) {
- pr_err("invalid session id(%llx) in transform header\n",
- le64_to_cpu(tr_hdr->SessionId));
- return -ECONNABORTED;
- }
-
- if (pdu_length + 4 <
- sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {
+ if (buf_data_size < sizeof(struct smb2_hdr)) {
pr_err("Transform message is too small (%u)\n",
pdu_length);
return -ECONNABORTED;
}
- if (pdu_length + 4 < orig_len + sizeof(struct smb2_transform_hdr)) {
+ if (buf_data_size < le32_to_cpu(tr_hdr->OriginalMessageSize)) {
pr_err("Transform message is broken\n");
return -ECONNABORTED;
}
+ sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));
+ if (!sess) {
+ pr_err("invalid session id(%llx) in transform header\n",
+ le64_to_cpu(tr_hdr->SessionId));
+ return -ECONNABORTED;
+ }
+
iov[0].iov_base = buf;
iov[0].iov_len = sizeof(struct smb2_transform_hdr);
iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
index bcec845b03f3..a6dec5ec6a54 100644
--- a/fs/ksmbd/smb2pdu.h
+++ b/fs/ksmbd/smb2pdu.h
@@ -1464,6 +1464,15 @@ struct smb2_file_all_info { /* data block encoding of response to level 18 */
char FileName[1];
} __packed; /* level 18 Query */
+struct smb2_file_basic_info { /* data block encoding of response to level 18 */
+ __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le32 Attributes;
+ __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
+} __packed;
+
struct smb2_file_alt_name_info {
__le32 FileNameLength;
char FileName[0];
@@ -1628,7 +1637,6 @@ struct smb2_posix_info {
} __packed;
/* functions */
-int init_smb2_0_server(struct ksmbd_conn *conn);
void init_smb2_1_server(struct ksmbd_conn *conn);
void init_smb3_0_server(struct ksmbd_conn *conn);
void init_smb3_02_server(struct ksmbd_conn *conn);
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index 43d3123d8b62..707490ab1f4c 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -21,7 +21,6 @@ static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
#define MAGIC_CHAR '~'
#define PERIOD '.'
#define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
-#define KSMBD_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr))
struct smb_protocol {
int index;
@@ -89,7 +88,7 @@ unsigned int ksmbd_server_side_copy_max_total_size(void)
inline int ksmbd_min_protocol(void)
{
- return SMB2_PROT;
+ return SMB21_PROT;
}
inline int ksmbd_max_protocol(void)
@@ -129,16 +128,22 @@ int ksmbd_lookup_protocol_idx(char *str)
*
* check for valid smb signature and packet direction(request/response)
*
- * Return: 0 on success, otherwise 1
+ * Return: 0 on success, otherwise -EINVAL
*/
int ksmbd_verify_smb_message(struct ksmbd_work *work)
{
- struct smb2_hdr *smb2_hdr = work->request_buf;
+ struct smb2_hdr *smb2_hdr = work->request_buf + work->next_smb2_rcv_hdr_off;
+ struct smb_hdr *hdr;
if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
return ksmbd_smb2_check_message(work);
- return 0;
+ hdr = work->request_buf;
+ if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER &&
+ hdr->Command == SMB_COM_NEGOTIATE)
+ return 0;
+
+ return -EINVAL;
}
/**
@@ -149,20 +154,7 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
*/
bool ksmbd_smb_request(struct ksmbd_conn *conn)
{
- int type = *(char *)conn->request_buf;
-
- switch (type) {
- case RFC1002_SESSION_MESSAGE:
- /* Regular SMB request */
- return true;
- case RFC1002_SESSION_KEEP_ALIVE:
- ksmbd_debug(SMB, "RFC 1002 session keep alive\n");
- break;
- default:
- ksmbd_debug(SMB, "RFC 1002 unknown request type 0x%x\n", type);
- }
-
- return false;
+ return conn->request_buf[0] == 0;
}
static bool supported_protocol(int idx)
@@ -176,10 +168,12 @@ static bool supported_protocol(int idx)
idx <= server_conf.max_protocol);
}
-static char *next_dialect(char *dialect, int *next_off)
+static char *next_dialect(char *dialect, int *next_off, int bcount)
{
dialect = dialect + *next_off;
- *next_off = strlen(dialect);
+ *next_off = strnlen(dialect, bcount);
+ if (dialect[*next_off] != '\0')
+ return NULL;
return dialect;
}
@@ -194,7 +188,9 @@ static int ksmbd_lookup_dialect_by_name(char *cli_dialects, __le16 byte_count)
dialect = cli_dialects;
bcount = le16_to_cpu(byte_count);
do {
- dialect = next_dialect(dialect, &next);
+ dialect = next_dialect(dialect, &next, bcount);
+ if (!dialect)
+ break;
ksmbd_debug(SMB, "client requested dialect %s\n",
dialect);
if (!strcmp(dialect, smb1_protos[i].name)) {
@@ -242,13 +238,22 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count)
static int ksmbd_negotiate_smb_dialect(void *buf)
{
- __le32 proto;
+ int smb_buf_length = get_rfc1002_len(buf);
+ __le32 proto = ((struct smb2_hdr *)buf)->ProtocolId;
- proto = ((struct smb2_hdr *)buf)->ProtocolId;
if (proto == SMB2_PROTO_NUMBER) {
struct smb2_negotiate_req *req;
+ int smb2_neg_size =
+ offsetof(struct smb2_negotiate_req, Dialects) - 4;
req = (struct smb2_negotiate_req *)buf;
+ if (smb2_neg_size > smb_buf_length)
+ goto err_out;
+
+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+ smb_buf_length)
+ goto err_out;
+
return ksmbd_lookup_dialect_by_id(req->Dialects,
req->DialectCount);
}
@@ -258,14 +263,22 @@ static int ksmbd_negotiate_smb_dialect(void *buf)
struct smb_negotiate_req *req;
req = (struct smb_negotiate_req *)buf;
+ if (le16_to_cpu(req->ByteCount) < 2)
+ goto err_out;
+
+ if (offsetof(struct smb_negotiate_req, DialectsArray) - 4 +
+ le16_to_cpu(req->ByteCount) > smb_buf_length) {
+ goto err_out;
+ }
+
return ksmbd_lookup_dialect_by_name(req->DialectsArray,
req->ByteCount);
}
+err_out:
return BAD_PROT_ID;
}
-#define SMB_COM_NEGOTIATE 0x72
int ksmbd_init_smb_server(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
@@ -280,11 +293,6 @@ int ksmbd_init_smb_server(struct ksmbd_work *work)
return 0;
}
-bool ksmbd_pdu_size_has_room(unsigned int pdu)
-{
- return (pdu >= KSMBD_MIN_SUPPORTED_HEADER_SIZE - 4);
-}
-
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
struct ksmbd_file *dir,
struct ksmbd_dir_info *d_info,
@@ -419,7 +427,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
static int __smb2_negotiate(struct ksmbd_conn *conn)
{
- return (conn->dialect >= SMB20_PROT_ID &&
+ return (conn->dialect >= SMB21_PROT_ID &&
conn->dialect <= SMB311_PROT_ID);
}
@@ -449,7 +457,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
}
}
- if (command == SMB2_NEGOTIATE_HE) {
+ if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
ret = smb2_handle_negotiate(work);
init_smb2_neg_rsp(work);
return ret;
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index 57c667c1be06..6e79e7577f6b 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -48,13 +48,7 @@
#define CIFS_DEFAULT_IOSIZE (64 * 1024)
#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
-/* RFC 1002 session packet types */
-#define RFC1002_SESSION_MESSAGE 0x00
-#define RFC1002_SESSION_REQUEST 0x81
-#define RFC1002_POSITIVE_SESSION_RESPONSE 0x82
-#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83
-#define RFC1002_RETARGET_SESSION_RESPONSE 0x84
-#define RFC1002_SESSION_KEEP_ALIVE 0x85
+#define MAX_STREAM_PROT_LEN 0x00FFFFFF
/* Responses when opening a file. */
#define F_SUPERSEDED 0
@@ -210,6 +204,7 @@
FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES)
#define SMB1_PROTO_NUMBER cpu_to_le32(0x424d53ff)
+#define SMB_COM_NEGOTIATE 0x72
#define SMB1_CLIENT_GUID_SIZE (16)
struct smb_hdr {
@@ -500,8 +495,6 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
int ksmbd_init_smb_server(struct ksmbd_work *work);
-bool ksmbd_pdu_size_has_room(unsigned int pdu);
-
struct ksmbd_kstat;
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
int info_level,
diff --git a/fs/ksmbd/smbacl.c b/fs/ksmbd/smbacl.c
index 0a95cdec8c80..bd792db32623 100644
--- a/fs/ksmbd/smbacl.c
+++ b/fs/ksmbd/smbacl.c
@@ -380,7 +380,7 @@ static void parse_dacl(struct user_namespace *user_ns,
{
int i, ret;
int num_aces = 0;
- int acl_size;
+ unsigned int acl_size;
char *acl_base;
struct smb_ace **ppace;
struct posix_acl_entry *cf_pace, *cf_pdace;
@@ -392,7 +392,7 @@ static void parse_dacl(struct user_namespace *user_ns,
return;
/* validate that we do not go past end of acl */
- if (end_of_acl <= (char *)pdacl ||
+ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
pr_err("ACL too small to parse DACL\n");
return;
@@ -431,8 +431,22 @@ static void parse_dacl(struct user_namespace *user_ns,
* user/group/other have no permissions
*/
for (i = 0; i < num_aces; ++i) {
+ if (end_of_acl - acl_base < acl_size)
+ break;
+
ppace[i] = (struct smb_ace *)(acl_base + acl_size);
acl_base = (char *)ppace[i];
+ acl_size = offsetof(struct smb_ace, sid) +
+ offsetof(struct smb_sid, sub_auth);
+
+ if (end_of_acl - acl_base < acl_size ||
+ ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
+ (end_of_acl - acl_base <
+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
+ (le16_to_cpu(ppace[i]->size) <
+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
+ break;
+
acl_size = le16_to_cpu(ppace[i]->size);
ppace[i]->access_req =
smb_map_generic_desired_access(ppace[i]->access_req);
@@ -807,6 +821,9 @@ int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
if (!pntsd)
return -EIO;
+ if (acl_len < sizeof(struct smb_ntsd))
+ return -EINVAL;
+
owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
group_sid_ptr = (struct smb_sid *)((char *)pntsd +
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index 52b2556e76b1..3a7fa23ba850 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -20,7 +20,6 @@
#define SUBMOD_NAME "smb_direct"
#include <linux/kthread.h>
-#include <linux/rwlock.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/highmem.h>
diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
index dc15a5ecd2e0..c14320e03b69 100644
--- a/fs/ksmbd/transport_tcp.c
+++ b/fs/ksmbd/transport_tcp.c
@@ -215,7 +215,7 @@ out_error:
* ksmbd_kthread_fn() - listen to new SMB connections and callback server
* @p: arguments to forker thread
*
- * Return: Returns a task_struct or ERR_PTR
+ * Return: 0 on success, error number otherwise
*/
static int ksmbd_kthread_fn(void *p)
{
@@ -387,7 +387,7 @@ static void tcp_destroy_socket(struct socket *ksmbd_socket)
/**
* create_socket - create socket for ksmbd/0
*
- * Return: Returns a task_struct or ERR_PTR
+ * Return: 0 on success, error number otherwise
*/
static int create_socket(struct interface *iface)
{
diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
index b047f2980d96..b41954294d38 100644
--- a/fs/ksmbd/vfs.c
+++ b/fs/ksmbd/vfs.c
@@ -19,6 +19,8 @@
#include <linux/sched/xacct.h>
#include <linux/crc32c.h>
+#include "../internal.h" /* for vfs_path_lookup */
+
#include "glob.h"
#include "oplock.h"
#include "connection.h"
@@ -44,7 +46,6 @@ static char *extract_last_component(char *path)
p++;
} else {
p = NULL;
- pr_err("Invalid path %s\n", path);
}
return p;
}
@@ -155,7 +156,7 @@ int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
/**
* ksmbd_vfs_create() - vfs helper for smb create file
* @work: work
- * @name: file name
+ * @name: file name that is relative to share
* @mode: file create mode
*
* Return: 0 on success, otherwise error
@@ -166,7 +167,8 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
struct dentry *dentry;
int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, 0);
+ dentry = ksmbd_vfs_kern_path_create(work, name,
+ LOOKUP_NO_SYMLINKS, &path);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
if (err != -ENOENT)
@@ -191,7 +193,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
/**
* ksmbd_vfs_mkdir() - vfs helper for smb create directory
* @work: work
- * @name: directory name
+ * @name: directory name that is relative to share
* @mode: directory create mode
*
* Return: 0 on success, otherwise error
@@ -203,7 +205,9 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
struct dentry *dentry;
int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+ dentry = ksmbd_vfs_kern_path_create(work, name,
+ LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+ &path);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
if (err != -EEXIST)
@@ -578,7 +582,7 @@ int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
/**
* ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink
- * @name: absolute directory or file name
+ * @name: directory or file name that is relative to share
*
* Return: 0 on success, otherwise error
*/
@@ -588,16 +592,11 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
struct path path;
struct dentry *parent;
int err;
- int flags = 0;
if (ksmbd_override_fsids(work))
return -ENOMEM;
- if (test_share_config_flag(work->tcon->share_conf,
- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
- flags = LOOKUP_FOLLOW;
-
- err = kern_path(name, flags, &path);
+ err = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, false);
if (err) {
ksmbd_debug(VFS, "can't get %s, err %d\n", name, err);
ksmbd_revert_fsids(work);
@@ -642,7 +641,7 @@ out_err:
/**
* ksmbd_vfs_link() - vfs helper for creating smb hardlink
* @oldname: source file name
- * @newname: hardlink name
+ * @newname: hardlink name that is relative to share
*
* Return: 0 on success, otherwise error
*/
@@ -652,24 +651,20 @@ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
struct path oldpath, newpath;
struct dentry *dentry;
int err;
- int flags = 0;
if (ksmbd_override_fsids(work))
return -ENOMEM;
- if (test_share_config_flag(work->tcon->share_conf,
- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
- flags = LOOKUP_FOLLOW;
-
- err = kern_path(oldname, flags, &oldpath);
+ err = kern_path(oldname, LOOKUP_NO_SYMLINKS, &oldpath);
if (err) {
pr_err("cannot get linux path for %s, err = %d\n",
oldname, err);
goto out1;
}
- dentry = kern_path_create(AT_FDCWD, newname, &newpath,
- flags | LOOKUP_REVAL);
+ dentry = ksmbd_vfs_kern_path_create(work, newname,
+ LOOKUP_NO_SYMLINKS | LOOKUP_REVAL,
+ &newpath);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
pr_err("path create err for %s, err %d\n", newname, err);
@@ -788,21 +783,19 @@ int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
struct dentry *src_dent, *trap_dent, *src_child;
char *dst_name;
int err;
- int flags;
dst_name = extract_last_component(newname);
- if (!dst_name)
- return -EINVAL;
+ if (!dst_name) {
+ dst_name = newname;
+ newname = "";
+ }
src_dent_parent = dget_parent(fp->filp->f_path.dentry);
src_dent = fp->filp->f_path.dentry;
- flags = LOOKUP_DIRECTORY;
- if (test_share_config_flag(work->tcon->share_conf,
- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
- flags |= LOOKUP_FOLLOW;
-
- err = kern_path(newname, flags, &dst_path);
+ err = ksmbd_vfs_kern_path(work, newname,
+ LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+ &dst_path, false);
if (err) {
ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err);
goto out;
@@ -848,61 +841,43 @@ out:
/**
* ksmbd_vfs_truncate() - vfs helper for smb file truncate
* @work: work
- * @name: old filename
* @fid: file id of old file
* @size: truncate to given size
*
* Return: 0 on success, otherwise error
*/
-int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+int ksmbd_vfs_truncate(struct ksmbd_work *work,
struct ksmbd_file *fp, loff_t size)
{
- struct path path;
int err = 0;
+ struct file *filp;
- if (name) {
- err = kern_path(name, 0, &path);
- if (err) {
- pr_err("cannot get linux path for %s, err %d\n",
- name, err);
- return err;
- }
- err = vfs_truncate(&path, size);
- if (err)
- pr_err("truncate failed for %s err %d\n",
- name, err);
- path_put(&path);
- } else {
- struct file *filp;
-
- filp = fp->filp;
-
- /* Do we need to break any of a levelII oplock? */
- smb_break_all_levII_oplock(work, fp, 1);
+ filp = fp->filp;
- if (!work->tcon->posix_extensions) {
- struct inode *inode = file_inode(filp);
+ /* Do we need to break any of a levelII oplock? */
+ smb_break_all_levII_oplock(work, fp, 1);
- if (size < inode->i_size) {
- err = check_lock_range(filp, size,
- inode->i_size - 1, WRITE);
- } else {
- err = check_lock_range(filp, inode->i_size,
- size - 1, WRITE);
- }
+ if (!work->tcon->posix_extensions) {
+ struct inode *inode = file_inode(filp);
- if (err) {
- pr_err("failed due to lock\n");
- return -EAGAIN;
- }
+ if (size < inode->i_size) {
+ err = check_lock_range(filp, size,
+ inode->i_size - 1, WRITE);
+ } else {
+ err = check_lock_range(filp, inode->i_size,
+ size - 1, WRITE);
}
- err = vfs_truncate(&filp->f_path, size);
- if (err)
- pr_err("truncate failed for filename : %s err %d\n",
- fp->filename, err);
+ if (err) {
+ pr_err("failed due to lock\n");
+ return -EAGAIN;
+ }
}
+ err = vfs_truncate(&filp->f_path, size);
+ if (err)
+ pr_err("truncate failed for filename : %s err %d\n",
+ fp->filename, err);
return err;
}
@@ -1220,22 +1195,25 @@ static int ksmbd_vfs_lookup_in_dir(struct path *dir, char *name, size_t namelen)
/**
* ksmbd_vfs_kern_path() - lookup a file and get path info
- * @name: name of file for lookup
+ * @name: file path that is relative to share
* @flags: lookup flags
* @path: if lookup succeed, return path info
* @caseless: caseless filename lookup
*
* Return: 0 on success, otherwise error
*/
-int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
- bool caseless)
+int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+ unsigned int flags, struct path *path, bool caseless)
{
+ struct ksmbd_share_config *share_conf = work->tcon->share_conf;
int err;
- if (name[0] != '/')
- return -EINVAL;
-
- err = kern_path(name, flags, path);
+ flags |= LOOKUP_BENEATH;
+ err = vfs_path_lookup(share_conf->vfs_path.dentry,
+ share_conf->vfs_path.mnt,
+ name,
+ flags,
+ path);
if (!err)
return 0;
@@ -1249,11 +1227,10 @@ int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
return -ENOMEM;
path_len = strlen(filepath);
- remain_len = path_len - 1;
+ remain_len = path_len;
- err = kern_path("/", flags, &parent);
- if (err)
- goto out;
+ parent = share_conf->vfs_path;
+ path_get(&parent);
while (d_can_lookup(parent.dentry)) {
char *filename = filepath + path_len - remain_len;
@@ -1266,21 +1243,21 @@ int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
err = ksmbd_vfs_lookup_in_dir(&parent, filename,
filename_len);
- if (err) {
- path_put(&parent);
+ path_put(&parent);
+ if (err)
goto out;
- }
- path_put(&parent);
next[0] = '\0';
- err = kern_path(filepath, flags, &parent);
+ err = vfs_path_lookup(share_conf->vfs_path.dentry,
+ share_conf->vfs_path.mnt,
+ filepath,
+ flags,
+ &parent);
if (err)
goto out;
-
- if (is_last) {
- path->mnt = parent.mnt;
- path->dentry = parent.dentry;
+ else if (is_last) {
+ *path = parent;
goto out;
}
@@ -1296,6 +1273,23 @@ out:
return err;
}
+struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ const char *name,
+ unsigned int flags,
+ struct path *path)
+{
+ char *abs_name;
+ struct dentry *dent;
+
+ abs_name = convert_to_unix_name(work->tcon->share_conf, name);
+ if (!abs_name)
+ return ERR_PTR(-ENOMEM);
+
+ dent = kern_path_create(AT_FDCWD, abs_name, path, flags);
+ kfree(abs_name);
+ return dent;
+}
+
int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
struct dentry *dentry)
{
diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
index 85db50abdb24..7b1dcaa3fbdc 100644
--- a/fs/ksmbd/vfs.h
+++ b/fs/ksmbd/vfs.h
@@ -126,7 +126,7 @@ int ksmbd_vfs_link(struct ksmbd_work *work,
int ksmbd_vfs_getattr(struct path *path, struct kstat *stat);
int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
char *newname);
-int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+int ksmbd_vfs_truncate(struct ksmbd_work *work,
struct ksmbd_file *fp, loff_t size);
struct srv_copychunk;
int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
@@ -152,8 +152,13 @@ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
size_t *xattr_stream_name_size, int s_type);
int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
struct dentry *dentry, char *attr_name);
-int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
+int ksmbd_vfs_kern_path(struct ksmbd_work *work,
+ char *name, unsigned int flags, struct path *path,
bool caseless);
+struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ const char *name,
+ unsigned int flags,
+ struct path *path);
int ksmbd_vfs_empty_dir(struct ksmbd_file *fp);
void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option);
int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
diff --git a/fs/lockd/svcxdr.h b/fs/lockd/svcxdr.h
index c69a0bb76c94..4f1a451da5ba 100644
--- a/fs/lockd/svcxdr.h
+++ b/fs/lockd/svcxdr.h
@@ -134,18 +134,9 @@ svcxdr_decode_owner(struct xdr_stream *xdr, struct xdr_netobj *obj)
static inline bool
svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
{
- unsigned int quadlen = XDR_QUADLEN(obj->len);
- __be32 *p;
-
- if (xdr_stream_encode_u32(xdr, obj->len) < 0)
- return false;
- p = xdr_reserve_space(xdr, obj->len);
- if (!p)
+ if (obj->len > XDR_MAX_NETOBJ)
return false;
- p[quadlen - 1] = 0; /* XDR pad */
- memcpy(p, obj->data, obj->len);
-
- return true;
+ return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0;
}
#endif /* _LOCKD_SVCXDR_H_ */
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 0b6cd3b8734c..994ec22d4040 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -150,7 +150,7 @@ static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
{
struct iov_iter iter;
- iov_iter_xarray(&iter, WRITE, &subreq->rreq->mapping->i_pages,
+ iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
subreq->start + subreq->transferred,
subreq->len - subreq->transferred);
iov_iter_zero(iov_iter_count(&iter), &iter);
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
index edec45831585..0a9b72685f98 100644
--- a/fs/nfs_common/grace.c
+++ b/fs/nfs_common/grace.c
@@ -42,7 +42,6 @@ EXPORT_SYMBOL_GPL(locks_start_grace);
/**
* locks_end_grace
- * @net: net namespace that this lock manager belongs to
* @lm: who this grace period is for
*
* Call this function to state that the given lock manager is ready to
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index 7629248fdd53..be3c1aad50ea 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -542,7 +542,7 @@ nfsd_file_close_inode_sync(struct inode *inode)
}
/**
- * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
+ * nfsd_file_close_inode - attempt a delayed close of a nfsd_file
* @inode: inode of the file to attempt to remove
*
* Walk the whole hash bucket, looking for any files that correspond to "inode".
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 42356416f0a0..3f4027a5de88 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3570,7 +3570,7 @@ static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_s
}
static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
- struct nfsd4_session *session, u32 req)
+ struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
{
struct nfs4_client *clp = session->se_client;
struct svc_xprt *xpt = rqst->rq_xprt;
@@ -3593,6 +3593,8 @@ static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
else
status = nfserr_inval;
spin_unlock(&clp->cl_lock);
+ if (status == nfs_ok && conn)
+ *conn = c;
return status;
}
@@ -3617,8 +3619,16 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
- status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
- if (status == nfs_ok || status == nfserr_inval)
+ status = nfsd4_match_existing_connection(rqstp, session,
+ bcts->dir, &conn);
+ if (status == nfs_ok) {
+ if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
+ bcts->dir == NFS4_CDFC4_BACK)
+ conn->cn_flags |= NFS4_CDFC4_BACK;
+ nfsd4_probe_callback(session->se_client);
+ goto out;
+ }
+ if (status == nfserr_inval)
goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 7abeccb975b2..cf030ebe2827 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3544,15 +3544,18 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
goto fail;
cd->rd_maxcount -= entry_bytes;
/*
- * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
- * let's always let through the first entry, at least:
+ * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and
+ * notes that it could be zero. If it is zero, then the server
+ * should enforce only the rd_maxcount value.
*/
- if (!cd->rd_dircount)
- goto fail;
- name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
- if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
- goto fail;
- cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
+ if (cd->rd_dircount) {
+ name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
+ if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
+ goto fail;
+ cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
+ if (!cd->rd_dircount)
+ cd->rd_maxcount = 0;
+ }
cd->cookie_offset = cookie_offset;
skip_entry:
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index c2c3d9077dc5..070e5dd03e26 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -793,7 +793,10 @@ out_close:
svc_xprt_put(xprt);
}
out_err:
- nfsd_destroy(net);
+ if (!list_empty(&nn->nfsd_serv->sv_permsocks))
+ nn->nfsd_serv->sv_nrthreads--;
+ else
+ nfsd_destroy(net);
return err;
}
@@ -1545,7 +1548,7 @@ static int __init init_nfsd(void)
goto out_free_all;
return 0;
out_free_all:
- unregister_pernet_subsys(&nfsd_net_ops);
+ unregister_filesystem(&nfsd_fs_type);
out_free_exports:
remove_proc_entry("fs/nfs/exports", NULL);
remove_proc_entry("fs/nfs", NULL);
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index 34c4cbf7e29b..e8c00dda42ad 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -6,13 +6,9 @@
* TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/hash.h>
-#include <linux/nls.h>
-#include <linux/ratelimit.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
#include "debug.h"
#include "ntfs.h"
@@ -291,7 +287,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
if (!rsize) {
/* Empty resident -> Non empty nonresident. */
} else if (!is_data) {
- err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
+ err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
if (err)
goto out2;
} else if (!page) {
@@ -451,11 +447,8 @@ again:
again_1:
align = sbi->cluster_size;
- if (is_ext) {
+ if (is_ext)
align <<= attr_b->nres.c_unit;
- if (is_attr_sparsed(attr_b))
- keep_prealloc = false;
- }
old_valid = le64_to_cpu(attr_b->nres.valid_size);
old_size = le64_to_cpu(attr_b->nres.data_size);
@@ -465,9 +458,6 @@ again_1:
new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
new_alen = new_alloc >> cluster_bits;
- if (keep_prealloc && is_ext)
- keep_prealloc = false;
-
if (keep_prealloc && new_size < old_size) {
attr_b->nres.data_size = cpu_to_le64(new_size);
mi_b->dirty = true;
@@ -529,7 +519,7 @@ add_alloc_in_same_attr_seg:
} else if (pre_alloc == -1) {
pre_alloc = 0;
if (type == ATTR_DATA && !name_len &&
- sbi->options.prealloc) {
+ sbi->options->prealloc) {
CLST new_alen2 = bytes_to_cluster(
sbi, get_pre_allocated(new_size));
pre_alloc = new_alen2 - new_alen;
@@ -1966,7 +1956,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
return 0;
from = vbo;
- to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size;
+ to = min_t(u64, vbo + bytes, data_size);
memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
return 0;
}
diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
index fa32399eb517..bad6d8a849a2 100644
--- a/fs/ntfs3/attrlist.c
+++ b/fs/ntfs3/attrlist.c
@@ -5,10 +5,7 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/nls.h>
#include "debug.h"
#include "ntfs.h"
@@ -336,7 +333,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
if (attr && attr->non_res) {
err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
- al->size);
+ al->size, 0);
if (err)
return err;
al->dirty = false;
@@ -423,7 +420,7 @@ next:
return true;
}
-int al_update(struct ntfs_inode *ni)
+int al_update(struct ntfs_inode *ni, int sync)
{
int err;
struct ATTRIB *attr;
@@ -445,7 +442,7 @@ int al_update(struct ntfs_inode *ni)
memcpy(resident_data(attr), al->le, al->size);
} else {
err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
- al->size);
+ al->size, sync);
if (err)
goto out;
diff --git a/fs/ntfs3/bitfunc.c b/fs/ntfs3/bitfunc.c
index ce304d40b5e1..50d838093790 100644
--- a/fs/ntfs3/bitfunc.c
+++ b/fs/ntfs3/bitfunc.c
@@ -5,13 +5,8 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/types.h>
-#include "debug.h"
-#include "ntfs.h"
#include "ntfs_fs.h"
#define BITS_IN_SIZE_T (sizeof(size_t) * 8)
@@ -124,8 +119,7 @@ bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
pos = nbits & 7;
if (pos) {
- u8 mask = fill_mask[pos];
-
+ mask = fill_mask[pos];
if ((*map & mask) != mask)
return false;
}
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index 831501555009..aa184407520f 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -10,12 +10,10 @@
*
*/
-#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
-#include "debug.h"
#include "ntfs.h"
#include "ntfs_fs.h"
@@ -435,7 +433,7 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
;
} else {
n3 = rb_next(&e->count.node);
- max_new_len = len > new_len ? len : new_len;
+ max_new_len = max(len, new_len);
if (!n3) {
wnd->extent_max = max_new_len;
} else {
@@ -731,7 +729,7 @@ int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
wbits = wnd->bits_last;
tail = wbits - wbit;
- op = tail < bits ? tail : bits;
+ op = min_t(u32, tail, bits);
bh = wnd_map(wnd, iw);
if (IS_ERR(bh)) {
@@ -784,7 +782,7 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
wbits = wnd->bits_last;
tail = wbits - wbit;
- op = tail < bits ? tail : bits;
+ op = min_t(u32, tail, bits);
bh = wnd_map(wnd, iw);
if (IS_ERR(bh)) {
@@ -834,7 +832,7 @@ static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
wbits = wnd->bits_last;
tail = wbits - wbit;
- op = tail < bits ? tail : bits;
+ op = min_t(u32, tail, bits);
if (wbits != wnd->free_bits[iw]) {
bool ret;
@@ -926,7 +924,7 @@ use_wnd:
wbits = wnd->bits_last;
tail = wbits - wbit;
- op = tail < bits ? tail : bits;
+ op = min_t(u32, tail, bits);
if (wnd->free_bits[iw]) {
bool ret;
diff --git a/fs/ntfs3/debug.h b/fs/ntfs3/debug.h
index 31120569a87b..53ef7489c75f 100644
--- a/fs/ntfs3/debug.h
+++ b/fs/ntfs3/debug.h
@@ -11,6 +11,9 @@
#ifndef _LINUX_NTFS3_DEBUG_H
#define _LINUX_NTFS3_DEBUG_H
+struct super_block;
+struct inode;
+
#ifndef Add2Ptr
#define Add2Ptr(P, I) ((void *)((u8 *)(P) + (I)))
#define PtrOffset(B, O) ((size_t)((size_t)(O) - (size_t)(B)))
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index 93f6d485564e..fb438d604040 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -7,10 +7,7 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/iversion.h>
#include <linux/nls.h>
#include "debug.h"
@@ -18,30 +15,27 @@
#include "ntfs_fs.h"
/* Convert little endian UTF-16 to NLS string. */
-int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
+int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const __le16 *name, u32 len,
u8 *buf, int buf_len)
{
- int ret, uni_len, warn;
- const __le16 *ip;
+ int ret, warn;
u8 *op;
- struct nls_table *nls = sbi->options.nls;
+ struct nls_table *nls = sbi->options->nls;
static_assert(sizeof(wchar_t) == sizeof(__le16));
if (!nls) {
/* UTF-16 -> UTF-8 */
- ret = utf16s_to_utf8s((wchar_t *)uni->name, uni->len,
- UTF16_LITTLE_ENDIAN, buf, buf_len);
+ ret = utf16s_to_utf8s(name, len, UTF16_LITTLE_ENDIAN, buf,
+ buf_len);
buf[ret] = '\0';
return ret;
}
- ip = uni->name;
op = buf;
- uni_len = uni->len;
warn = 0;
- while (uni_len--) {
+ while (len--) {
u16 ec;
int charlen;
char dump[5];
@@ -52,7 +46,7 @@ int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
break;
}
- ec = le16_to_cpu(*ip++);
+ ec = le16_to_cpu(*name++);
charlen = nls->uni2char(ec, op, buf_len);
if (charlen > 0) {
@@ -186,7 +180,7 @@ int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
{
int ret, slen;
const u8 *end;
- struct nls_table *nls = sbi->options.nls;
+ struct nls_table *nls = sbi->options->nls;
u16 *uname = uni->name;
static_assert(sizeof(wchar_t) == sizeof(u16));
@@ -301,14 +295,14 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
return 0;
/* Skip meta files. Unless option to show metafiles is set. */
- if (!sbi->options.showmeta && ntfs_is_meta_file(sbi, ino))
+ if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino))
return 0;
- if (sbi->options.nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
+ if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
return 0;
- name_len = ntfs_utf16_to_nls(sbi, (struct le_str *)&fname->name_len,
- name, PATH_MAX);
+ name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
+ PATH_MAX);
if (name_len <= 0) {
ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
ino);
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 424450e77ad5..43b1451bff53 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -12,7 +12,6 @@
#include <linux/compat.h>
#include <linux/falloc.h>
#include <linux/fiemap.h>
-#include <linux/nls.h>
#include "debug.h"
#include "ntfs.h"
@@ -588,8 +587,11 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
truncate_pagecache(inode, vbo_down);
if (!is_sparsed(ni) && !is_compressed(ni)) {
- /* Normal file. */
- err = ntfs_zero_range(inode, vbo, end);
+ /*
+ * Normal file, can't make hole.
+ * TODO: Try to find way to save info about hole.
+ */
+ err = -EOPNOTSUPP;
goto out;
}
@@ -737,7 +739,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
umode_t mode = inode->i_mode;
int err;
- if (sbi->options.no_acs_rules) {
+ if (sbi->options->noacsrules) {
/* "No access rules" - Force any changes of time etc. */
attr->ia_valid |= ATTR_FORCE;
/* and disable for editing some attributes. */
@@ -1185,7 +1187,7 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
int err = 0;
/* If we are last writer on the inode, drop the block reservation. */
- if (sbi->options.prealloc && ((file->f_mode & FMODE_WRITE) &&
+ if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) &&
atomic_read(&inode->i_writecount) == 1)) {
ni_lock(ni);
down_write(&ni->file.run_lock);
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 938b12d56ca6..6f47a9c17f89 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -5,11 +5,8 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/fiemap.h>
#include <linux/fs.h>
-#include <linux/nls.h>
#include <linux/vmalloc.h>
#include "debug.h"
@@ -708,18 +705,35 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
continue;
mi = ni_find_mi(ni, ino_get(&le->ref));
+ if (!mi) {
+ /* Should never happened, 'cause already checked. */
+ goto bad;
+ }
attr = mi_find_attr(mi, NULL, le->type, le_name(le),
le->name_len, &le->id);
+ if (!attr) {
+ /* Should never happened, 'cause already checked. */
+ goto bad;
+ }
asize = le32_to_cpu(attr->size);
/* Insert into primary record. */
attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
le->name_len, asize,
le16_to_cpu(attr->name_off));
- id = attr_ins->id;
+ if (!attr_ins) {
+ /*
+ * Internal error.
+ * Either no space in primary record (already checked).
+ * Either tried to insert another
+ * non indexed attribute (logic error).
+ */
+ goto bad;
+ }
/* Copy all except id. */
+ id = attr_ins->id;
memcpy(attr_ins, attr, asize);
attr_ins->id = id;
@@ -735,6 +749,10 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
ni->attr_list.dirty = false;
return 0;
+bad:
+ ntfs_inode_err(&ni->vfs_inode, "Internal error");
+ make_bad_inode(&ni->vfs_inode);
+ return -EINVAL;
}
/*
@@ -956,6 +974,13 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
continue;
}
+ /*
+ * Do not try to insert this attribute
+ * if there is no room in record.
+ */
+ if (le32_to_cpu(mi->mrec->used) + asize > sbi->record_size)
+ continue;
+
/* Try to insert attribute into this subrecord. */
attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
name_off, svcn, ins_le);
@@ -1451,7 +1476,7 @@ int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
attr->res.flags = RESIDENT_FLAG_INDEXED;
/* is_attr_indexed(attr)) == true */
- le16_add_cpu(&ni->mi.mrec->hard_links, +1);
+ le16_add_cpu(&ni->mi.mrec->hard_links, 1);
ni->mi.dirty = true;
}
attr->res.res = 0;
@@ -1606,7 +1631,7 @@ struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
*le = NULL;
- if (FILE_NAME_POSIX == name_type)
+ if (name_type == FILE_NAME_POSIX)
return NULL;
/* Enumerate all names. */
@@ -1706,18 +1731,16 @@ out:
/*
* ni_parse_reparse
*
- * Buffer is at least 24 bytes.
+ * buffer - memory for reparse buffer header
*/
enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
- void *buffer)
+ struct REPARSE_DATA_BUFFER *buffer)
{
const struct REPARSE_DATA_BUFFER *rp = NULL;
u8 bits;
u16 len;
typeof(rp->CompressReparseBuffer) *cmpr;
- static_assert(sizeof(struct REPARSE_DATA_BUFFER) <= 24);
-
/* Try to estimate reparse point. */
if (!attr->non_res) {
rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
@@ -1803,6 +1826,9 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
return REPARSE_NONE;
}
+ if (buffer != rp)
+ memcpy(buffer, rp, sizeof(struct REPARSE_DATA_BUFFER));
+
/* Looks like normal symlink. */
return REPARSE_LINK;
}
@@ -2906,9 +2932,8 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de + 1, de_key_size);
mi_get_ref(&ni->mi, &de->ref);
- if (indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 1)) {
+ if (indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 1))
return false;
- }
}
return true;
@@ -3077,7 +3102,9 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
const struct EA_INFO *info;
info = resident_data_ex(attr, sizeof(struct EA_INFO));
- dup->ea_size = info->size_pack;
+ /* If ATTR_EA_INFO exists 'info' can't be NULL. */
+ if (info)
+ dup->ea_size = info->size_pack;
}
}
@@ -3205,7 +3232,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
goto out;
}
- err = al_update(ni);
+ err = al_update(ni, sync);
if (err)
goto out;
}
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index b5853aed0e25..06492f088d60 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -6,12 +6,8 @@
*/
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/hash.h>
-#include <linux/nls.h>
#include <linux/random.h>
-#include <linux/ratelimit.h>
#include <linux/slab.h>
#include "debug.h"
@@ -2219,7 +2215,7 @@ file_is_valid:
err = ntfs_sb_write_run(log->ni->mi.sbi,
&log->ni->file.run, off, page,
- log->page_size);
+ log->page_size, 0);
if (err)
goto out;
@@ -3710,7 +3706,7 @@ move_data:
if (a_dirty) {
attr = oa->attr;
- err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes);
+ err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 0);
if (err)
goto out;
}
@@ -5152,10 +5148,10 @@ end_reply:
ntfs_fix_pre_write(&rh->rhdr, log->page_size);
- err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size);
+ err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size, 0);
if (!err)
err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
- rh, log->page_size);
+ rh, log->page_size, 0);
kfree(rh);
if (err)
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 91e3743e1442..4de9acb16968 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -8,7 +8,7 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
#include "debug.h"
#include "ntfs.h"
@@ -358,7 +358,7 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
enum ALLOCATE_OPT opt)
{
int err;
- CLST alen = 0;
+ CLST alen;
struct super_block *sb = sbi->sb;
size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
struct wnd_bitmap *wnd = &sbi->used.bitmap;
@@ -370,27 +370,28 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
if (!zlen) {
err = ntfs_refresh_zone(sbi);
if (err)
- goto out;
+ goto up_write;
+
zlen = wnd_zone_len(wnd);
}
if (!zlen) {
ntfs_err(sbi->sb, "no free space to extend mft");
- goto out;
+ err = -ENOSPC;
+ goto up_write;
}
lcn = wnd_zone_bit(wnd);
- alen = zlen > len ? len : zlen;
+ alen = min_t(CLST, len, zlen);
wnd_zone_set(wnd, lcn + alen, zlen - alen);
err = wnd_set_used(wnd, lcn, alen);
- if (err) {
- up_write(&wnd->rw_lock);
- return err;
- }
+ if (err)
+ goto up_write;
+
alcn = lcn;
- goto out;
+ goto space_found;
}
/*
* 'Cause cluster 0 is always used this value means that we should use
@@ -404,49 +405,45 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
if (alen)
- goto out;
+ goto space_found;
/* Try to use clusters from MftZone. */
zlen = wnd_zone_len(wnd);
zeroes = wnd_zeroes(wnd);
/* Check too big request */
- if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE)
- goto out;
+ if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
+ err = -ENOSPC;
+ goto up_write;
+ }
/* How many clusters to cat from zone. */
zlcn = wnd_zone_bit(wnd);
zlen2 = zlen >> 1;
- ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
- new_zlen = zlen - ztrim;
-
- if (new_zlen < NTFS_MIN_MFT_ZONE) {
- new_zlen = NTFS_MIN_MFT_ZONE;
- if (new_zlen > zlen)
- new_zlen = zlen;
- }
+ ztrim = clamp_val(len, zlen2, zlen);
+ new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
wnd_zone_set(wnd, zlcn, new_zlen);
/* Allocate continues clusters. */
alen = wnd_find(wnd, len, 0,
BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
-
-out:
- if (alen) {
- err = 0;
- *new_len = alen;
- *new_lcn = alcn;
-
- ntfs_unmap_meta(sb, alcn, alen);
-
- /* Set hint for next requests. */
- if (!(opt & ALLOCATE_MFT))
- sbi->used.next_free_lcn = alcn + alen;
- } else {
+ if (!alen) {
err = -ENOSPC;
+ goto up_write;
}
+space_found:
+ err = 0;
+ *new_len = alen;
+ *new_lcn = alcn;
+
+ ntfs_unmap_meta(sb, alcn, alen);
+
+ /* Set hint for next requests. */
+ if (!(opt & ALLOCATE_MFT))
+ sbi->used.next_free_lcn = alcn + alen;
+up_write:
up_write(&wnd->rw_lock);
return err;
}
@@ -1080,7 +1077,7 @@ int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
}
int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
- u64 vbo, const void *buf, size_t bytes)
+ u64 vbo, const void *buf, size_t bytes, int sync)
{
struct super_block *sb = sbi->sb;
u8 cluster_bits = sbi->cluster_bits;
@@ -1099,8 +1096,8 @@ int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
len = ((u64)clen << cluster_bits) - off;
for (;;) {
- u32 op = len < bytes ? len : bytes;
- int err = ntfs_sb_write(sb, lbo, op, buf, 0);
+ u32 op = min_t(u64, len, bytes);
+ int err = ntfs_sb_write(sb, lbo, op, buf, sync);
if (err)
return err;
@@ -1300,7 +1297,7 @@ int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
nb->off = off = lbo & (blocksize - 1);
for (;;) {
- u32 len32 = len < bytes ? len : bytes;
+ u32 len32 = min_t(u64, len, bytes);
sector_t block = lbo >> sb->s_blocksize_bits;
do {
@@ -2175,7 +2172,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
/* Write main SDS bucket. */
err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
- d_security, aligned_sec_size);
+ d_security, aligned_sec_size, 0);
if (err)
goto out;
@@ -2193,7 +2190,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
/* Write copy SDS bucket. */
err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
- aligned_sec_size);
+ aligned_sec_size, 0);
if (err)
goto out;
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 0daca9adc54c..6f81e3a49abf 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -8,7 +8,7 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
#include "debug.h"
#include "ntfs.h"
@@ -671,138 +671,74 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
const struct INDEX_HDR *hdr, const void *key,
size_t key_len, const void *ctx, int *diff)
{
- struct NTFS_DE *e;
+ struct NTFS_DE *e, *found = NULL;
NTFS_CMP_FUNC cmp = indx->cmp;
+ int min_idx = 0, mid_idx, max_idx = 0;
+ int diff2;
+ int table_size = 8;
u32 e_size, e_key_len;
u32 end = le32_to_cpu(hdr->used);
u32 off = le32_to_cpu(hdr->de_off);
+ u16 offs[128];
-#ifdef NTFS3_INDEX_BINARY_SEARCH
- int max_idx = 0, fnd, min_idx;
- int nslots = 64;
- u16 *offs;
-
- if (end > 0x10000)
- goto next;
-
- offs = kmalloc(sizeof(u16) * nslots, GFP_NOFS);
- if (!offs)
- goto next;
+fill_table:
+ if (off + sizeof(struct NTFS_DE) > end)
+ return NULL;
- /* Use binary search algorithm. */
-next1:
- if (off + sizeof(struct NTFS_DE) > end) {
- e = NULL;
- goto out1;
- }
e = Add2Ptr(hdr, off);
e_size = le16_to_cpu(e->size);
- if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) {
- e = NULL;
- goto out1;
- }
-
- if (max_idx >= nslots) {
- u16 *ptr;
- int new_slots = ALIGN(2 * nslots, 8);
-
- ptr = kmalloc(sizeof(u16) * new_slots, GFP_NOFS);
- if (ptr)
- memcpy(ptr, offs, sizeof(u16) * max_idx);
- kfree(offs);
- offs = ptr;
- nslots = new_slots;
- if (!ptr)
- goto next;
- }
-
- /* Store entry table. */
- offs[max_idx] = off;
+ if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
+ return NULL;
if (!de_is_last(e)) {
+ offs[max_idx] = off;
off += e_size;
- max_idx += 1;
- goto next1;
- }
- /*
- * Table of pointers is created.
- * Use binary search to find entry that is <= to the search value.
- */
- fnd = -1;
- min_idx = 0;
+ max_idx++;
+ if (max_idx < table_size)
+ goto fill_table;
- while (min_idx <= max_idx) {
- int mid_idx = min_idx + ((max_idx - min_idx) >> 1);
- int diff2;
-
- e = Add2Ptr(hdr, offs[mid_idx]);
+ max_idx--;
+ }
- e_key_len = le16_to_cpu(e->key_size);
+binary_search:
+ e_key_len = le16_to_cpu(e->key_size);
- diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
+ diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
+ if (diff2 > 0) {
+ if (found) {
+ min_idx = mid_idx + 1;
+ } else {
+ if (de_is_last(e))
+ return NULL;
- if (!diff2) {
- *diff = 0;
- goto out1;
+ max_idx = 0;
+ table_size = min(table_size * 2,
+ (int)ARRAY_SIZE(offs));
+ goto fill_table;
}
-
- if (diff2 < 0) {
+ } else if (diff2 < 0) {
+ if (found)
max_idx = mid_idx - 1;
- fnd = mid_idx;
- if (!fnd)
- break;
- } else {
- min_idx = mid_idx + 1;
- }
- }
+ else
+ max_idx--;
- if (fnd == -1) {
- e = NULL;
- goto out1;
+ found = e;
+ } else {
+ *diff = 0;
+ return e;
}
- *diff = -1;
- e = Add2Ptr(hdr, offs[fnd]);
-
-out1:
- kfree(offs);
-
- return e;
-#endif
-
-next:
- /*
- * Entries index are sorted.
- * Enumerate all entries until we find entry
- * that is <= to the search value.
- */
- if (off + sizeof(struct NTFS_DE) > end)
- return NULL;
-
- e = Add2Ptr(hdr, off);
- e_size = le16_to_cpu(e->size);
-
- if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
- return NULL;
-
- off += e_size;
-
- e_key_len = le16_to_cpu(e->key_size);
-
- *diff = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
- if (!*diff)
- return e;
+ if (min_idx > max_idx) {
+ *diff = -1;
+ return found;
+ }
- if (*diff <= 0)
- return e;
+ mid_idx = (min_idx + max_idx) >> 1;
+ e = Add2Ptr(hdr, offs[mid_idx]);
- if (de_is_last(e)) {
- *diff = 1;
- return e;
- }
- goto next;
+ goto binary_search;
}
/*
@@ -1136,9 +1072,7 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
if (!e)
return -EINVAL;
- if (fnd)
- fnd->root_de = e;
-
+ fnd->root_de = e;
err = 0;
for (;;) {
@@ -1401,7 +1335,7 @@ ok:
static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
CLST *vbn)
{
- int err = -ENOMEM;
+ int err;
struct ntfs_sb_info *sbi = ni->mi.sbi;
struct ATTRIB *bitmap;
struct ATTRIB *alloc;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index db2a5a4c38e4..859951d785cb 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -5,10 +5,8 @@
*
*/
-#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/iversion.h>
#include <linux/mpage.h>
#include <linux/namei.h>
#include <linux/nls.h>
@@ -49,8 +47,8 @@ static struct inode *ntfs_read_mft(struct inode *inode,
inode->i_op = NULL;
/* Setup 'uid' and 'gid' */
- inode->i_uid = sbi->options.fs_uid;
- inode->i_gid = sbi->options.fs_gid;
+ inode->i_uid = sbi->options->fs_uid;
+ inode->i_gid = sbi->options->fs_gid;
err = mi_init(&ni->mi, sbi, ino);
if (err)
@@ -224,12 +222,9 @@ next_attr:
if (!attr->non_res) {
ni->i_valid = inode->i_size = rsize;
inode_set_bytes(inode, rsize);
- t32 = asize;
- } else {
- t32 = le16_to_cpu(attr->nres.run_off);
}
- mode = S_IFREG | (0777 & sbi->options.fs_fmask_inv);
+ mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
if (!attr->non_res) {
ni->ni_flags |= NI_FLAG_RESIDENT;
@@ -272,7 +267,7 @@ next_attr:
goto out;
mode = sb->s_root
- ? (S_IFDIR | (0777 & sbi->options.fs_dmask_inv))
+ ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv))
: (S_IFDIR | 0777);
goto next_attr;
@@ -315,17 +310,14 @@ next_attr:
rp_fa = ni_parse_reparse(ni, attr, &rp);
switch (rp_fa) {
case REPARSE_LINK:
- if (!attr->non_res) {
- inode->i_size = rsize;
- inode_set_bytes(inode, rsize);
- t32 = asize;
- } else {
- inode->i_size =
- le64_to_cpu(attr->nres.data_size);
- t32 = le16_to_cpu(attr->nres.run_off);
- }
+ /*
+ * Normal symlink.
+ * Assume one unicode symbol == one utf8.
+ */
+ inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
+ .PrintNameLength) /
+ sizeof(u16);
- /* Looks like normal symlink. */
ni->i_valid = inode->i_size;
/* Clear directory bit. */
@@ -422,7 +414,7 @@ end_enum:
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
inode->i_op = &ntfs_link_inode_operations;
inode->i_fop = NULL;
- inode_nohighmem(inode); // ??
+ inode_nohighmem(inode);
} else if (S_ISREG(mode)) {
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
inode->i_op = &ntfs_file_inode_operations;
@@ -443,7 +435,7 @@ end_enum:
goto out;
}
- if ((sbi->options.sys_immutable &&
+ if ((sbi->options->sys_immutable &&
(std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
!S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
inode->i_flags |= S_IMMUTABLE;
@@ -1200,9 +1192,13 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
struct REPARSE_DATA_BUFFER *rp = NULL;
bool rp_inserted = false;
+ ni_lock_dir(dir_ni);
+
dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
- if (!dir_root)
- return ERR_PTR(-EINVAL);
+ if (!dir_root) {
+ err = -EINVAL;
+ goto out1;
+ }
if (S_ISDIR(mode)) {
/* Use parent's directory attributes. */
@@ -1244,7 +1240,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
* }
*/
} else if (S_ISREG(mode)) {
- if (sbi->options.sparse) {
+ if (sbi->options->sparse) {
/* Sparsed regular file, cause option 'sparse'. */
fa = FILE_ATTRIBUTE_SPARSE_FILE |
FILE_ATTRIBUTE_ARCHIVE;
@@ -1486,7 +1482,10 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
t16 = PtrOffset(rec, attr);
- /* 0x78 - the size of EA + EAINFO to store WSL */
+ /*
+ * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
+ * It is good idea to keep extened attributes resident.
+ */
if (asize + t16 + 0x78 + 8 > sbi->record_size) {
CLST alen;
CLST clst = bytes_to_cluster(sbi, nsize);
@@ -1521,14 +1520,14 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
}
asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
- inode->i_size = nsize;
} else {
attr->res.data_off = SIZEOF_RESIDENT_LE;
attr->res.data_size = cpu_to_le32(nsize);
memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
- inode->i_size = nsize;
nsize = 0;
}
+ /* Size of symlink equals the length of input string. */
+ inode->i_size = size;
attr->size = cpu_to_le32(asize);
@@ -1551,6 +1550,9 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
if (err)
goto out6;
+ /* Unlock parent directory before ntfs_init_acl. */
+ ni_unlock(dir_ni);
+
inode->i_generation = le16_to_cpu(rec->seq);
dir->i_mtime = dir->i_ctime = inode->i_atime;
@@ -1562,6 +1564,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
inode->i_op = &ntfs_link_inode_operations;
inode->i_fop = NULL;
inode->i_mapping->a_ops = &ntfs_aops;
+ inode->i_size = size;
+ inode_nohighmem(inode);
} else if (S_ISREG(mode)) {
inode->i_op = &ntfs_file_inode_operations;
inode->i_fop = &ntfs_file_operations;
@@ -1577,7 +1581,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
err = ntfs_init_acl(mnt_userns, inode, dir);
if (err)
- goto out6;
+ goto out7;
} else
#endif
{
@@ -1586,7 +1590,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
/* Write non resident data. */
if (nsize) {
- err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize);
+ err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0);
if (err)
goto out7;
}
@@ -1607,8 +1611,10 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
out7:
/* Undo 'indx_insert_entry'. */
+ ni_lock_dir(dir_ni);
indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
le16_to_cpu(new_de->key_size), sbi);
+ /* ni_unlock(dir_ni); will be called later. */
out6:
if (rp_inserted)
ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
@@ -1632,8 +1638,10 @@ out2:
kfree(rp);
out1:
- if (err)
+ if (err) {
+ ni_unlock(dir_ni);
return ERR_PTR(err);
+ }
unlock_new_inode(inode);
@@ -1754,15 +1762,15 @@ void ntfs_evict_inode(struct inode *inode)
static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
int buflen)
{
- int i, err = 0;
+ int i, err = -EINVAL;
struct ntfs_inode *ni = ntfs_i(inode);
struct super_block *sb = inode->i_sb;
struct ntfs_sb_info *sbi = sb->s_fs_info;
- u64 i_size = inode->i_size;
- u16 nlen = 0;
+ u64 size;
+ u16 ulen = 0;
void *to_free = NULL;
struct REPARSE_DATA_BUFFER *rp;
- struct le_str *uni;
+ const __le16 *uname;
struct ATTRIB *attr;
/* Reparse data present. Try to parse it. */
@@ -1771,68 +1779,64 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
*buffer = 0;
- /* Read into temporal buffer. */
- if (i_size > sbi->reparse.max_size || i_size <= sizeof(u32)) {
- err = -EINVAL;
- goto out;
- }
-
attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
- if (!attr) {
- err = -EINVAL;
+ if (!attr)
goto out;
- }
if (!attr->non_res) {
- rp = resident_data_ex(attr, i_size);
- if (!rp) {
- err = -EINVAL;
+ rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
+ if (!rp)
goto out;
- }
+ size = le32_to_cpu(attr->res.data_size);
} else {
- rp = kmalloc(i_size, GFP_NOFS);
+ size = le64_to_cpu(attr->nres.data_size);
+ rp = NULL;
+ }
+
+ if (size > sbi->reparse.max_size || size <= sizeof(u32))
+ goto out;
+
+ if (!rp) {
+ rp = kmalloc(size, GFP_NOFS);
if (!rp) {
err = -ENOMEM;
goto out;
}
to_free = rp;
- err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, i_size, NULL);
+ /* Read into temporal buffer. */
+ err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
if (err)
goto out;
}
- err = -EINVAL;
-
/* Microsoft Tag. */
switch (rp->ReparseTag) {
case IO_REPARSE_TAG_MOUNT_POINT:
/* Mount points and junctions. */
/* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
- if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
- MountPointReparseBuffer.PathBuffer))
+ if (size <= offsetof(struct REPARSE_DATA_BUFFER,
+ MountPointReparseBuffer.PathBuffer))
goto out;
- uni = Add2Ptr(rp,
- offsetof(struct REPARSE_DATA_BUFFER,
- MountPointReparseBuffer.PathBuffer) +
- le16_to_cpu(rp->MountPointReparseBuffer
- .PrintNameOffset) -
- 2);
- nlen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
+ uname = Add2Ptr(rp,
+ offsetof(struct REPARSE_DATA_BUFFER,
+ MountPointReparseBuffer.PathBuffer) +
+ le16_to_cpu(rp->MountPointReparseBuffer
+ .PrintNameOffset));
+ ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
break;
case IO_REPARSE_TAG_SYMLINK:
/* FolderSymbolicLink */
/* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
- if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
- SymbolicLinkReparseBuffer.PathBuffer))
+ if (size <= offsetof(struct REPARSE_DATA_BUFFER,
+ SymbolicLinkReparseBuffer.PathBuffer))
goto out;
- uni = Add2Ptr(rp,
- offsetof(struct REPARSE_DATA_BUFFER,
- SymbolicLinkReparseBuffer.PathBuffer) +
- le16_to_cpu(rp->SymbolicLinkReparseBuffer
- .PrintNameOffset) -
- 2);
- nlen = le16_to_cpu(
+ uname = Add2Ptr(
+ rp, offsetof(struct REPARSE_DATA_BUFFER,
+ SymbolicLinkReparseBuffer.PathBuffer) +
+ le16_to_cpu(rp->SymbolicLinkReparseBuffer
+ .PrintNameOffset));
+ ulen = le16_to_cpu(
rp->SymbolicLinkReparseBuffer.PrintNameLength);
break;
@@ -1864,29 +1868,28 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
goto out;
}
if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
- i_size <= sizeof(struct REPARSE_POINT)) {
+ size <= sizeof(struct REPARSE_POINT)) {
goto out;
}
/* Users tag. */
- uni = Add2Ptr(rp, sizeof(struct REPARSE_POINT) - 2);
- nlen = le16_to_cpu(rp->ReparseDataLength) -
+ uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
+ ulen = le16_to_cpu(rp->ReparseDataLength) -
sizeof(struct REPARSE_POINT);
}
/* Convert nlen from bytes to UNICODE chars. */
- nlen >>= 1;
+ ulen >>= 1;
/* Check that name is available. */
- if (!nlen || &uni->name[nlen] > (__le16 *)Add2Ptr(rp, i_size))
+ if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
goto out;
/* If name is already zero terminated then truncate it now. */
- if (!uni->name[nlen - 1])
- nlen -= 1;
- uni->len = nlen;
+ if (!uname[ulen - 1])
+ ulen -= 1;
- err = ntfs_utf16_to_nls(sbi, uni, buffer, buflen);
+ err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
if (err < 0)
goto out;
diff --git a/fs/ntfs3/lib/decompress_common.h b/fs/ntfs3/lib/decompress_common.h
index 2d70ae42f1b5..dd7ced000d0e 100644
--- a/fs/ntfs3/lib/decompress_common.h
+++ b/fs/ntfs3/lib/decompress_common.h
@@ -5,6 +5,9 @@
* Copyright (C) 2015 Eric Biggers
*/
+#ifndef _LINUX_NTFS3_LIB_DECOMPRESS_COMMON_H
+#define _LINUX_NTFS3_LIB_DECOMPRESS_COMMON_H
+
#include <linux/string.h>
#include <linux/compiler.h>
#include <linux/types.h>
@@ -336,3 +339,5 @@ static forceinline u8 *lz_copy(u8 *dst, u32 length, u32 offset, const u8 *bufend
return dst;
}
+
+#endif /* _LINUX_NTFS3_LIB_DECOMPRESS_COMMON_H */
diff --git a/fs/ntfs3/lib/lib.h b/fs/ntfs3/lib/lib.h
index f508fbad2e71..90309a5ae59c 100644
--- a/fs/ntfs3/lib/lib.h
+++ b/fs/ntfs3/lib/lib.h
@@ -7,6 +7,10 @@
* - linux kernel code style
*/
+#ifndef _LINUX_NTFS3_LIB_LIB_H
+#define _LINUX_NTFS3_LIB_LIB_H
+
+#include <linux/types.h>
/* globals from xpress_decompress.c */
struct xpress_decompressor *xpress_allocate_decompressor(void);
@@ -24,3 +28,5 @@ int lzx_decompress(struct lzx_decompressor *__restrict d,
const void *__restrict compressed_data,
size_t compressed_size, void *__restrict uncompressed_data,
size_t uncompressed_size);
+
+#endif /* _LINUX_NTFS3_LIB_LIB_H */
diff --git a/fs/ntfs3/lznt.c b/fs/ntfs3/lznt.c
index f1f691a67cc4..28f654561f27 100644
--- a/fs/ntfs3/lznt.c
+++ b/fs/ntfs3/lznt.c
@@ -5,13 +5,13 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
#include "debug.h"
-#include "ntfs.h"
#include "ntfs_fs.h"
// clang-format off
@@ -292,7 +292,7 @@ next:
/*
* get_lznt_ctx
* @level: 0 - Standard compression.
- * !0 - Best compression, requires a lot of cpu.
+ * !0 - Best compression, requires a lot of cpu.
*/
struct lznt *get_lznt_ctx(int level)
{
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index e58415d07132..bc741213ad84 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -5,11 +5,7 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/iversion.h>
-#include <linux/namei.h>
#include <linux/nls.h>
#include "debug.h"
@@ -99,16 +95,11 @@ static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
- struct ntfs_inode *ni = ntfs_i(dir);
struct inode *inode;
- ni_lock_dir(ni);
-
inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFREG | mode,
0, NULL, 0, NULL);
- ni_unlock(ni);
-
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -120,16 +111,11 @@ static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
static int ntfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
- struct ntfs_inode *ni = ntfs_i(dir);
struct inode *inode;
- ni_lock_dir(ni);
-
inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, mode, rdev,
NULL, 0, NULL);
- ni_unlock(ni);
-
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -200,15 +186,10 @@ static int ntfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
{
u32 size = strlen(symname);
struct inode *inode;
- struct ntfs_inode *ni = ntfs_i(dir);
-
- ni_lock_dir(ni);
inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFLNK | 0777,
0, symname, size, NULL);
- ni_unlock(ni);
-
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -219,15 +200,10 @@ static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
struct inode *inode;
- struct ntfs_inode *ni = ntfs_i(dir);
-
- ni_lock_dir(ni);
inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFDIR | mode,
0, NULL, 0, NULL);
- ni_unlock(ni);
-
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 6bb3e595263b..9cc396b117bf 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -10,19 +10,27 @@
#ifndef _LINUX_NTFS3_NTFS_H
#define _LINUX_NTFS3_NTFS_H
-/* TODO: Check 4K MFT record and 512 bytes cluster. */
+#include <linux/blkdev.h>
+#include <linux/build_bug.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "debug.h"
-/* Activate this define to use binary search in indexes. */
-#define NTFS3_INDEX_BINARY_SEARCH
+/* TODO: Check 4K MFT record and 512 bytes cluster. */
/* Check each run for marked clusters. */
#define NTFS3_CHECK_FREE_CLST
#define NTFS_NAME_LEN 255
-/* ntfs.sys used 500 maximum links on-disk struct allows up to 0xffff. */
-#define NTFS_LINK_MAX 0x400
-//#define NTFS_LINK_MAX 0xffff
+/*
+ * ntfs.sys used 500 maximum links on-disk struct allows up to 0xffff.
+ * xfstest generic/041 creates 3003 hardlinks.
+ */
+#define NTFS_LINK_MAX 4000
/*
* Activate to use 64 bit clusters instead of 32 bits in ntfs.sys.
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index dc71c59fd445..8aaec7e0804e 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -9,6 +9,37 @@
#ifndef _LINUX_NTFS3_NTFS_FS_H
#define _LINUX_NTFS3_NTFS_FS_H
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/cleancache.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/uidgid.h>
+#include <asm/div64.h>
+#include <asm/page.h>
+
+#include "debug.h"
+#include "ntfs.h"
+
+struct dentry;
+struct fiemap_extent_info;
+struct user_namespace;
+struct page;
+struct writeback_control;
+enum utf16_endian;
+
+
#define MINUS_ONE_T ((size_t)(-1))
/* Biggest MFT / smallest cluster */
#define MAXIMUM_BYTES_PER_MFT 4096
@@ -52,6 +83,7 @@
// clang-format on
struct ntfs_mount_options {
+ char *nls_name;
struct nls_table *nls;
kuid_t fs_uid;
@@ -59,19 +91,16 @@ struct ntfs_mount_options {
u16 fs_fmask_inv;
u16 fs_dmask_inv;
- unsigned uid : 1, /* uid was set. */
- gid : 1, /* gid was set. */
- fmask : 1, /* fmask was set. */
- dmask : 1, /* dmask was set. */
- sys_immutable : 1, /* Immutable system files. */
- discard : 1, /* Issue discard requests on deletions. */
- sparse : 1, /* Create sparse files. */
- showmeta : 1, /* Show meta files. */
- nohidden : 1, /* Do not show hidden files. */
- force : 1, /* Rw mount dirty volume. */
- no_acs_rules : 1, /*Exclude acs rules. */
- prealloc : 1 /* Preallocate space when file is growing. */
- ;
+ unsigned fmask : 1; /* fmask was set. */
+ unsigned dmask : 1; /*dmask was set. */
+ unsigned sys_immutable : 1; /* Immutable system files. */
+ unsigned discard : 1; /* Issue discard requests on deletions. */
+ unsigned sparse : 1; /* Create sparse files. */
+ unsigned showmeta : 1; /* Show meta files. */
+ unsigned nohidden : 1; /* Do not show hidden files. */
+ unsigned force : 1; /* RW mount dirty volume. */
+ unsigned noacsrules : 1; /* Exclude acs rules. */
+ unsigned prealloc : 1; /* Preallocate space when file is growing. */
};
/* Special value to unpack and deallocate. */
@@ -182,10 +211,8 @@ struct ntfs_sb_info {
u32 blocks_per_cluster; // cluster_size / sb->s_blocksize
u32 record_size;
- u32 sector_size;
u32 index_size;
- u8 sector_bits;
u8 cluster_bits;
u8 record_bits;
@@ -279,7 +306,7 @@ struct ntfs_sb_info {
#endif
} compress;
- struct ntfs_mount_options options;
+ struct ntfs_mount_options *options;
struct ratelimit_state msg_ratelimit;
};
@@ -436,7 +463,7 @@ bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le);
bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
const __le16 *name, size_t name_len,
const struct MFT_REF *ref);
-int al_update(struct ntfs_inode *ni);
+int al_update(struct ntfs_inode *ni, int sync);
static inline size_t al_aligned(size_t size)
{
return (size + 1023) & ~(size_t)1023;
@@ -448,7 +475,7 @@ bool are_bits_set(const ulong *map, size_t bit, size_t nbits);
size_t get_set_bits_ex(const ulong *map, size_t bit, size_t nbits);
/* Globals from dir.c */
-int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
+int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const __le16 *name, u32 len,
u8 *buf, int buf_len);
int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
struct cpu_str *uni, u32 max_ulen,
@@ -520,7 +547,7 @@ struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
struct ATTR_LIST_ENTRY **entry);
int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa);
enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
- void *buffer);
+ struct REPARSE_DATA_BUFFER *buffer);
int ni_write_inode(struct inode *inode, int sync, const char *hint);
#define _ni_write_inode(i, w) ni_write_inode(i, w, __func__)
int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
@@ -577,7 +604,7 @@ int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
const void *buffer, int wait);
int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
- u64 vbo, const void *buf, size_t bytes);
+ u64 vbo, const void *buf, size_t bytes, int sync);
struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
const struct runs_tree *run, u64 vbo);
int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 103705c86772..861e35791506 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -5,10 +5,7 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/nls.h>
#include "debug.h"
#include "ntfs.h"
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
index 26ed2b64345e..a8fec651f973 100644
--- a/fs/ntfs3/run.c
+++ b/fs/ntfs3/run.c
@@ -7,10 +7,8 @@
*/
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/log2.h>
-#include <linux/nls.h>
#include "debug.h"
#include "ntfs.h"
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 55bbc9200a10..d41d76979e12 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -23,16 +23,15 @@
*
*/
-#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
#include <linux/fs.h>
-#include <linux/iversion.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/nls.h>
-#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/statfs.h>
@@ -205,9 +204,11 @@ void *ntfs_put_shared(void *ptr)
return ret;
}
-static inline void clear_mount_options(struct ntfs_mount_options *options)
+static inline void put_mount_options(struct ntfs_mount_options *options)
{
+ kfree(options->nls_name);
unload_nls(options->nls);
+ kfree(options);
}
enum Opt {
@@ -223,218 +224,175 @@ enum Opt {
Opt_nohidden,
Opt_showmeta,
Opt_acl,
- Opt_noatime,
- Opt_nls,
+ Opt_iocharset,
Opt_prealloc,
- Opt_no_acs_rules,
+ Opt_noacsrules,
Opt_err,
};
-static const match_table_t ntfs_tokens = {
- { Opt_uid, "uid=%u" },
- { Opt_gid, "gid=%u" },
- { Opt_umask, "umask=%o" },
- { Opt_dmask, "dmask=%o" },
- { Opt_fmask, "fmask=%o" },
- { Opt_immutable, "sys_immutable" },
- { Opt_discard, "discard" },
- { Opt_force, "force" },
- { Opt_sparse, "sparse" },
- { Opt_nohidden, "nohidden" },
- { Opt_acl, "acl" },
- { Opt_noatime, "noatime" },
- { Opt_showmeta, "showmeta" },
- { Opt_nls, "nls=%s" },
- { Opt_prealloc, "prealloc" },
- { Opt_no_acs_rules, "no_acs_rules" },
- { Opt_err, NULL },
+static const struct fs_parameter_spec ntfs_fs_parameters[] = {
+ fsparam_u32("uid", Opt_uid),
+ fsparam_u32("gid", Opt_gid),
+ fsparam_u32oct("umask", Opt_umask),
+ fsparam_u32oct("dmask", Opt_dmask),
+ fsparam_u32oct("fmask", Opt_fmask),
+ fsparam_flag_no("sys_immutable", Opt_immutable),
+ fsparam_flag_no("discard", Opt_discard),
+ fsparam_flag_no("force", Opt_force),
+ fsparam_flag_no("sparse", Opt_sparse),
+ fsparam_flag_no("hidden", Opt_nohidden),
+ fsparam_flag_no("acl", Opt_acl),
+ fsparam_flag_no("showmeta", Opt_showmeta),
+ fsparam_flag_no("prealloc", Opt_prealloc),
+ fsparam_flag_no("acsrules", Opt_noacsrules),
+ fsparam_string("iocharset", Opt_iocharset),
+ {}
};
-static noinline int ntfs_parse_options(struct super_block *sb, char *options,
- int silent,
- struct ntfs_mount_options *opts)
+/*
+ * Load nls table or if @nls is utf8 then return NULL.
+ */
+static struct nls_table *ntfs_load_nls(char *nls)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- char nls_name[30];
- struct nls_table *nls;
+ struct nls_table *ret;
- opts->fs_uid = current_uid();
- opts->fs_gid = current_gid();
- opts->fs_fmask_inv = opts->fs_dmask_inv = ~current_umask();
- nls_name[0] = 0;
+ if (!nls)
+ nls = CONFIG_NLS_DEFAULT;
- if (!options)
- goto out;
+ if (strcmp(nls, "utf8") == 0)
+ return NULL;
- while ((p = strsep(&options, ","))) {
- int token;
+ if (strcmp(nls, CONFIG_NLS_DEFAULT) == 0)
+ return load_nls_default();
- if (!*p)
- continue;
+ ret = load_nls(nls);
+ if (ret)
+ return ret;
- token = match_token(p, ntfs_tokens, args);
- switch (token) {
- case Opt_immutable:
- opts->sys_immutable = 1;
- break;
- case Opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- opts->fs_uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(opts->fs_uid))
- return -EINVAL;
- opts->uid = 1;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- opts->fs_gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(opts->fs_gid))
- return -EINVAL;
- opts->gid = 1;
- break;
- case Opt_umask:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->fs_fmask_inv = opts->fs_dmask_inv = ~option;
- opts->fmask = opts->dmask = 1;
- break;
- case Opt_dmask:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->fs_dmask_inv = ~option;
- opts->dmask = 1;
- break;
- case Opt_fmask:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->fs_fmask_inv = ~option;
- opts->fmask = 1;
- break;
- case Opt_discard:
- opts->discard = 1;
- break;
- case Opt_force:
- opts->force = 1;
- break;
- case Opt_sparse:
- opts->sparse = 1;
- break;
- case Opt_nohidden:
- opts->nohidden = 1;
- break;
- case Opt_acl:
+ return ERR_PTR(-EINVAL);
+}
+
+static int ntfs_fs_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct ntfs_mount_options *opts = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, ntfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ opts->fs_uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(opts->fs_uid))
+ return invalf(fc, "ntfs3: Invalid value for uid.");
+ break;
+ case Opt_gid:
+ opts->fs_gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(opts->fs_gid))
+ return invalf(fc, "ntfs3: Invalid value for gid.");
+ break;
+ case Opt_umask:
+ if (result.uint_32 & ~07777)
+ return invalf(fc, "ntfs3: Invalid value for umask.");
+ opts->fs_fmask_inv = ~result.uint_32;
+ opts->fs_dmask_inv = ~result.uint_32;
+ opts->fmask = 1;
+ opts->dmask = 1;
+ break;
+ case Opt_dmask:
+ if (result.uint_32 & ~07777)
+ return invalf(fc, "ntfs3: Invalid value for dmask.");
+ opts->fs_dmask_inv = ~result.uint_32;
+ opts->dmask = 1;
+ break;
+ case Opt_fmask:
+ if (result.uint_32 & ~07777)
+ return invalf(fc, "ntfs3: Invalid value for fmask.");
+ opts->fs_fmask_inv = ~result.uint_32;
+ opts->fmask = 1;
+ break;
+ case Opt_immutable:
+ opts->sys_immutable = result.negated ? 0 : 1;
+ break;
+ case Opt_discard:
+ opts->discard = result.negated ? 0 : 1;
+ break;
+ case Opt_force:
+ opts->force = result.negated ? 0 : 1;
+ break;
+ case Opt_sparse:
+ opts->sparse = result.negated ? 0 : 1;
+ break;
+ case Opt_nohidden:
+ opts->nohidden = result.negated ? 1 : 0;
+ break;
+ case Opt_acl:
+ if (!result.negated)
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
- sb->s_flags |= SB_POSIXACL;
- break;
+ fc->sb_flags |= SB_POSIXACL;
#else
- ntfs_err(sb, "support for ACL not compiled in!");
- return -EINVAL;
+ return invalf(fc, "ntfs3: Support for ACL not compiled in!");
#endif
- case Opt_noatime:
- sb->s_flags |= SB_NOATIME;
- break;
- case Opt_showmeta:
- opts->showmeta = 1;
- break;
- case Opt_nls:
- match_strlcpy(nls_name, &args[0], sizeof(nls_name));
- break;
- case Opt_prealloc:
- opts->prealloc = 1;
- break;
- case Opt_no_acs_rules:
- opts->no_acs_rules = 1;
- break;
- default:
- if (!silent)
- ntfs_err(
- sb,
- "Unrecognized mount option \"%s\" or missing value",
- p);
- //return -EINVAL;
- }
- }
-
-out:
- if (!strcmp(nls_name[0] ? nls_name : CONFIG_NLS_DEFAULT, "utf8")) {
- /*
- * For UTF-8 use utf16s_to_utf8s()/utf8s_to_utf16s()
- * instead of NLS.
- */
- nls = NULL;
- } else if (nls_name[0]) {
- nls = load_nls(nls_name);
- if (!nls) {
- ntfs_err(sb, "failed to load \"%s\"", nls_name);
- return -EINVAL;
- }
- } else {
- nls = load_nls_default();
- if (!nls) {
- ntfs_err(sb, "failed to load default nls");
- return -EINVAL;
- }
+ else
+ fc->sb_flags &= ~SB_POSIXACL;
+ break;
+ case Opt_showmeta:
+ opts->showmeta = result.negated ? 0 : 1;
+ break;
+ case Opt_iocharset:
+ kfree(opts->nls_name);
+ opts->nls_name = param->string;
+ param->string = NULL;
+ break;
+ case Opt_prealloc:
+ opts->prealloc = result.negated ? 0 : 1;
+ break;
+ case Opt_noacsrules:
+ opts->noacsrules = result.negated ? 1 : 0;
+ break;
+ default:
+ /* Should not be here unless we forget add case. */
+ return -EINVAL;
}
- opts->nls = nls;
-
return 0;
}
-static int ntfs_remount(struct super_block *sb, int *flags, char *data)
+static int ntfs_fs_reconfigure(struct fs_context *fc)
{
- int err, ro_rw;
+ struct super_block *sb = fc->root->d_sb;
struct ntfs_sb_info *sbi = sb->s_fs_info;
- struct ntfs_mount_options old_opts;
- char *orig_data = kstrdup(data, GFP_KERNEL);
-
- if (data && !orig_data)
- return -ENOMEM;
+ struct ntfs_mount_options *new_opts = fc->fs_private;
+ int ro_rw;
- /* Store original options. */
- memcpy(&old_opts, &sbi->options, sizeof(old_opts));
- clear_mount_options(&sbi->options);
- memset(&sbi->options, 0, sizeof(sbi->options));
-
- err = ntfs_parse_options(sb, data, 0, &sbi->options);
- if (err)
- goto restore_opts;
-
- ro_rw = sb_rdonly(sb) && !(*flags & SB_RDONLY);
+ ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY);
if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
- ntfs_warn(
- sb,
- "Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
- err = -EINVAL;
- goto restore_opts;
+ errorf(fc, "ntfs3: Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
+ return -EINVAL;
}
+ new_opts->nls = ntfs_load_nls(new_opts->nls_name);
+ if (IS_ERR(new_opts->nls)) {
+ new_opts->nls = NULL;
+ errorf(fc, "ntfs3: Cannot load iocharset %s", new_opts->nls_name);
+ return -EINVAL;
+ }
+ if (new_opts->nls != sbi->options->nls)
+ return invalf(fc, "ntfs3: Cannot use different iocharset when remounting!");
+
sync_filesystem(sb);
if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
- !sbi->options.force) {
- ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
- err = -EINVAL;
- goto restore_opts;
+ !new_opts->force) {
+ errorf(fc, "ntfs3: Volume is dirty and \"force\" flag is not set!");
+ return -EINVAL;
}
- clear_mount_options(&old_opts);
-
- *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME) |
- SB_NODIRATIME | SB_NOATIME;
- ntfs_info(sb, "re-mounted. Opts: %s", orig_data);
- err = 0;
- goto out;
+ memcpy(sbi->options, new_opts, sizeof(*new_opts));
-restore_opts:
- clear_mount_options(&sbi->options);
- memcpy(&sbi->options, &old_opts, sizeof(old_opts));
-
-out:
- kfree(orig_data);
- return err;
+ return 0;
}
static struct kmem_cache *ntfs_inode_cachep;
@@ -513,8 +471,6 @@ static noinline void put_ntfs(struct ntfs_sb_info *sbi)
xpress_free_decompressor(sbi->compress.xpress);
lzx_free_decompressor(sbi->compress.lzx);
#endif
- clear_mount_options(&sbi->options);
-
kfree(sbi);
}
@@ -525,7 +481,9 @@ static void ntfs_put_super(struct super_block *sb)
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
+ put_mount_options(sbi->options);
put_ntfs(sbi);
+ sb->s_fs_info = NULL;
sync_blockdev(sb->s_bdev);
}
@@ -552,23 +510,21 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
{
struct super_block *sb = root->d_sb;
struct ntfs_sb_info *sbi = sb->s_fs_info;
- struct ntfs_mount_options *opts = &sbi->options;
+ struct ntfs_mount_options *opts = sbi->options;
struct user_namespace *user_ns = seq_user_ns(m);
- if (opts->uid)
- seq_printf(m, ",uid=%u",
- from_kuid_munged(user_ns, opts->fs_uid));
- if (opts->gid)
- seq_printf(m, ",gid=%u",
- from_kgid_munged(user_ns, opts->fs_gid));
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(user_ns, opts->fs_uid));
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(user_ns, opts->fs_gid));
if (opts->fmask)
seq_printf(m, ",fmask=%04o", ~opts->fs_fmask_inv);
if (opts->dmask)
seq_printf(m, ",dmask=%04o", ~opts->fs_dmask_inv);
if (opts->nls)
- seq_printf(m, ",nls=%s", opts->nls->charset);
+ seq_printf(m, ",iocharset=%s", opts->nls->charset);
else
- seq_puts(m, ",nls=utf8");
+ seq_puts(m, ",iocharset=utf8");
if (opts->sys_immutable)
seq_puts(m, ",sys_immutable");
if (opts->discard)
@@ -581,14 +537,12 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",nohidden");
if (opts->force)
seq_puts(m, ",force");
- if (opts->no_acs_rules)
- seq_puts(m, ",no_acs_rules");
+ if (opts->noacsrules)
+ seq_puts(m, ",noacsrules");
if (opts->prealloc)
seq_puts(m, ",prealloc");
if (sb->s_flags & SB_POSIXACL)
seq_puts(m, ",acl");
- if (sb->s_flags & SB_NOATIME)
- seq_puts(m, ",noatime");
return 0;
}
@@ -643,7 +597,6 @@ static const struct super_operations ntfs_sops = {
.statfs = ntfs_statfs,
.show_options = ntfs_show_options,
.sync_fs = ntfs_sync_fs,
- .remount_fs = ntfs_remount,
.write_inode = ntfs3_write_inode,
};
@@ -729,7 +682,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
struct ntfs_sb_info *sbi = sb->s_fs_info;
int err;
u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
- u64 sectors, clusters, fs_size, mlcn, mlcn2;
+ u64 sectors, clusters, mlcn, mlcn2;
struct NTFS_BOOT *boot;
struct buffer_head *bh;
struct MFT_REC *rec;
@@ -787,20 +740,20 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
goto out;
}
- sbi->sector_size = boot_sector_size;
- sbi->sector_bits = blksize_bits(boot_sector_size);
- fs_size = (sectors + 1) << sbi->sector_bits;
+ sbi->volume.size = sectors * boot_sector_size;
- gb = format_size_gb(fs_size, &mb);
+ gb = format_size_gb(sbi->volume.size + boot_sector_size, &mb);
/*
* - Volume formatted and mounted with the same sector size.
* - Volume formatted 4K and mounted as 512.
* - Volume formatted 512 and mounted as 4K.
*/
- if (sbi->sector_size != sector_size) {
- ntfs_warn(sb,
- "Different NTFS' sector size and media sector size");
+ if (boot_sector_size != sector_size) {
+ ntfs_warn(
+ sb,
+ "Different NTFS' sector size (%u) and media sector size (%u)",
+ boot_sector_size, sector_size);
dev_size += sector_size - 1;
}
@@ -810,8 +763,19 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->mft.lbo = mlcn << sbi->cluster_bits;
sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits;
- if (sbi->cluster_size < sbi->sector_size)
+ /* Compare boot's cluster and sector. */
+ if (sbi->cluster_size < boot_sector_size)
+ goto out;
+
+ /* Compare boot's cluster and media sector. */
+ if (sbi->cluster_size < sector_size) {
+ /* No way to use ntfs_get_block in this case. */
+ ntfs_err(
+ sb,
+ "Failed to mount 'cause NTFS's cluster size (%u) is less than media sector size (%u)",
+ sbi->cluster_size, sector_size);
goto out;
+ }
sbi->cluster_mask = sbi->cluster_size - 1;
sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask;
@@ -836,10 +800,9 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
: (u32)boot->index_size << sbi->cluster_bits;
sbi->volume.ser_num = le64_to_cpu(boot->serial_num);
- sbi->volume.size = sectors << sbi->sector_bits;
/* Warning if RAW volume. */
- if (dev_size < fs_size) {
+ if (dev_size < sbi->volume.size + boot_sector_size) {
u32 mb0, gb0;
gb0 = format_size_gb(dev_size, &mb0);
@@ -883,8 +846,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
rec->total = cpu_to_le32(sbi->record_size);
((struct ATTRIB *)Add2Ptr(rec, ao))->type = ATTR_END;
- if (sbi->cluster_size < PAGE_SIZE)
- sb_set_blocksize(sb, sbi->cluster_size);
+ sb_set_blocksize(sb, min_t(u32, sbi->cluster_size, PAGE_SIZE));
sbi->block_mask = sb->s_blocksize - 1;
sbi->blocks_per_cluster = sbi->cluster_size >> sb->s_blocksize_bits;
@@ -897,9 +859,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
if (clusters >= (1ull << (64 - sbi->cluster_bits)))
sbi->maxbytes = -1;
sbi->maxbytes_sparse = -1;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
#else
/* Maximum size for sparse file. */
sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1;
+ sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
#endif
err = 0;
@@ -913,14 +877,13 @@ out:
/*
* ntfs_fill_super - Try to mount.
*/
-static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
+static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
int err;
- struct ntfs_sb_info *sbi;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
struct block_device *bdev = sb->s_bdev;
- struct inode *bd_inode = bdev->bd_inode;
- struct request_queue *rq = bdev_get_queue(bdev);
- struct inode *inode = NULL;
+ struct request_queue *rq;
+ struct inode *inode;
struct ntfs_inode *ni;
size_t i, tt;
CLST vcn, lcn, len;
@@ -928,18 +891,11 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
const struct VOLUME_INFO *info;
u32 idx, done, bytes;
struct ATTR_DEF_ENTRY *t;
- u16 *upcase = NULL;
u16 *shared;
- bool is_ro;
struct MFT_REF ref;
ref.high = 0;
- sbi = kzalloc(sizeof(struct ntfs_sb_info), GFP_NOFS);
- if (!sbi)
- return -ENOMEM;
-
- sb->s_fs_info = sbi;
sbi->sb = sb;
sb->s_flags |= SB_NODIRATIME;
sb->s_magic = 0x7366746e; // "ntfs"
@@ -948,41 +904,27 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
sb->s_xattr = ntfs_xattr_handlers;
- ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
-
- err = ntfs_parse_options(sb, data, silent, &sbi->options);
- if (err)
+ sbi->options->nls = ntfs_load_nls(sbi->options->nls_name);
+ if (IS_ERR(sbi->options->nls)) {
+ sbi->options->nls = NULL;
+ errorf(fc, "Cannot load nls %s", sbi->options->nls_name);
+ err = -EINVAL;
goto out;
+ }
- if (!rq || !blk_queue_discard(rq) || !rq->limits.discard_granularity) {
- ;
- } else {
+ rq = bdev_get_queue(bdev);
+ if (blk_queue_discard(rq) && rq->limits.discard_granularity) {
sbi->discard_granularity = rq->limits.discard_granularity;
sbi->discard_granularity_mask_inv =
~(u64)(sbi->discard_granularity - 1);
}
- sb_set_blocksize(sb, PAGE_SIZE);
-
/* Parse boot. */
err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
- bd_inode->i_size);
+ bdev->bd_inode->i_size);
if (err)
goto out;
-#ifdef CONFIG_NTFS3_64BIT_CLUSTER
- sb->s_maxbytes = MAX_LFS_FILESIZE;
-#else
- sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
-#endif
-
- mutex_init(&sbi->compress.mtx_lznt);
-#ifdef CONFIG_NTFS3_LZX_XPRESS
- mutex_init(&sbi->compress.mtx_xpress);
- mutex_init(&sbi->compress.mtx_lzx);
-#endif
-
/*
* Load $Volume. This should be done before $LogFile
* 'cause 'sbi->volume.ni' is used 'ntfs_set_state'.
@@ -991,9 +933,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
ref.seq = cpu_to_le16(MFT_REC_VOL);
inode = ntfs_iget5(sb, &ref, &NAME_VOLUME);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
ntfs_err(sb, "Failed to load $Volume.");
- inode = NULL;
+ err = PTR_ERR(inode);
goto out;
}
@@ -1015,36 +956,33 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
} else {
/* Should we break mounting here? */
//err = -EINVAL;
- //goto out;
+ //goto put_inode_out;
}
attr = ni_find_attr(ni, attr, NULL, ATTR_VOL_INFO, NULL, 0, NULL, NULL);
if (!attr || is_attr_ext(attr)) {
err = -EINVAL;
- goto out;
+ goto put_inode_out;
}
info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
if (!info) {
err = -EINVAL;
- goto out;
+ goto put_inode_out;
}
sbi->volume.major_ver = info->major_ver;
sbi->volume.minor_ver = info->minor_ver;
sbi->volume.flags = info->flags;
-
sbi->volume.ni = ni;
- inode = NULL;
/* Load $MFTMirr to estimate recs_mirr. */
ref.low = cpu_to_le32(MFT_REC_MIRR);
ref.seq = cpu_to_le16(MFT_REC_MIRR);
inode = ntfs_iget5(sb, &ref, &NAME_MIRROR);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
ntfs_err(sb, "Failed to load $MFTMirr.");
- inode = NULL;
+ err = PTR_ERR(inode);
goto out;
}
@@ -1058,9 +996,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
ref.seq = cpu_to_le16(MFT_REC_LOG);
inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
ntfs_err(sb, "Failed to load \x24LogFile.");
- inode = NULL;
+ err = PTR_ERR(inode);
goto out;
}
@@ -1068,22 +1005,19 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
err = ntfs_loadlog_and_replay(ni, sbi);
if (err)
- goto out;
+ goto put_inode_out;
iput(inode);
- inode = NULL;
-
- is_ro = sb_rdonly(sbi->sb);
if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
- if (!is_ro) {
+ if (!sb_rdonly(sb)) {
ntfs_warn(sb,
"failed to replay log file. Can't mount rw!");
err = -EINVAL;
goto out;
}
} else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
- if (!is_ro && !sbi->options.force) {
+ if (!sb_rdonly(sb) && !sbi->options->force) {
ntfs_warn(
sb,
"volume is dirty and \"force\" flag is not set!");
@@ -1098,9 +1032,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
inode = ntfs_iget5(sb, &ref, &NAME_MFT);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
ntfs_err(sb, "Failed to load $MFT.");
- inode = NULL;
+ err = PTR_ERR(inode);
goto out;
}
@@ -1112,11 +1045,11 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
err = wnd_init(&sbi->mft.bitmap, sb, tt);
if (err)
- goto out;
+ goto put_inode_out;
err = ni_load_all_mi(ni);
if (err)
- goto out;
+ goto put_inode_out;
sbi->mft.ni = ni;
@@ -1125,9 +1058,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
ntfs_err(sb, "Failed to load $BadClus.");
- inode = NULL;
+ err = PTR_ERR(inode);
goto out;
}
@@ -1150,18 +1082,15 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
ref.seq = cpu_to_le16(MFT_REC_BITMAP);
inode = ntfs_iget5(sb, &ref, &NAME_BITMAP);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
ntfs_err(sb, "Failed to load $Bitmap.");
- inode = NULL;
+ err = PTR_ERR(inode);
goto out;
}
- ni = ntfs_i(inode);
-
#ifndef CONFIG_NTFS3_64BIT_CLUSTER
if (inode->i_size >> 32) {
err = -EINVAL;
- goto out;
+ goto put_inode_out;
}
#endif
@@ -1169,14 +1098,14 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
tt = sbi->used.bitmap.nbits;
if (inode->i_size < bitmap_size(tt)) {
err = -EINVAL;
- goto out;
+ goto put_inode_out;
}
/* Not necessary. */
sbi->used.bitmap.set_tail = true;
- err = wnd_init(&sbi->used.bitmap, sbi->sb, tt);
+ err = wnd_init(&sbi->used.bitmap, sb, tt);
if (err)
- goto out;
+ goto put_inode_out;
iput(inode);
@@ -1188,23 +1117,22 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
/* Load $AttrDef. */
ref.low = cpu_to_le32(MFT_REC_ATTR);
ref.seq = cpu_to_le16(MFT_REC_ATTR);
- inode = ntfs_iget5(sbi->sb, &ref, &NAME_ATTRDEF);
+ inode = ntfs_iget5(sb, &ref, &NAME_ATTRDEF);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
ntfs_err(sb, "Failed to load $AttrDef -> %d", err);
- inode = NULL;
+ err = PTR_ERR(inode);
goto out;
}
if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY)) {
err = -EINVAL;
- goto out;
+ goto put_inode_out;
}
bytes = inode->i_size;
sbi->def_table = t = kmalloc(bytes, GFP_NOFS);
if (!t) {
err = -ENOMEM;
- goto out;
+ goto put_inode_out;
}
for (done = idx = 0; done < bytes; done += PAGE_SIZE, idx++) {
@@ -1213,7 +1141,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
if (IS_ERR(page)) {
err = PTR_ERR(page);
- goto out;
+ goto put_inode_out;
}
memcpy(Add2Ptr(t, done), page_address(page),
min(PAGE_SIZE, tail));
@@ -1221,7 +1149,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
if (!idx && ATTR_STD != t->type) {
err = -EINVAL;
- goto out;
+ goto put_inode_out;
}
}
@@ -1254,33 +1182,24 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
ref.seq = cpu_to_le16(MFT_REC_UPCASE);
inode = ntfs_iget5(sb, &ref, &NAME_UPCASE);
if (IS_ERR(inode)) {
+ ntfs_err(sb, "Failed to load $UpCase.");
err = PTR_ERR(inode);
- ntfs_err(sb, "Failed to load \x24LogFile.");
- inode = NULL;
goto out;
}
- ni = ntfs_i(inode);
-
if (inode->i_size != 0x10000 * sizeof(short)) {
err = -EINVAL;
- goto out;
- }
-
- sbi->upcase = upcase = kvmalloc(0x10000 * sizeof(short), GFP_KERNEL);
- if (!upcase) {
- err = -ENOMEM;
- goto out;
+ goto put_inode_out;
}
for (idx = 0; idx < (0x10000 * sizeof(short) >> PAGE_SHIFT); idx++) {
const __le16 *src;
- u16 *dst = Add2Ptr(upcase, idx << PAGE_SHIFT);
+ u16 *dst = Add2Ptr(sbi->upcase, idx << PAGE_SHIFT);
struct page *page = ntfs_map_page(inode->i_mapping, idx);
if (IS_ERR(page)) {
err = PTR_ERR(page);
- goto out;
+ goto put_inode_out;
}
src = page_address(page);
@@ -1294,14 +1213,13 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
ntfs_unmap_page(page);
}
- shared = ntfs_set_shared(upcase, 0x10000 * sizeof(short));
- if (shared && upcase != shared) {
+ shared = ntfs_set_shared(sbi->upcase, 0x10000 * sizeof(short));
+ if (shared && sbi->upcase != shared) {
+ kvfree(sbi->upcase);
sbi->upcase = shared;
- kvfree(upcase);
}
iput(inode);
- inode = NULL;
if (is_ntfs3(sbi)) {
/* Load $Secure. */
@@ -1331,34 +1249,31 @@ load_root:
ref.seq = cpu_to_le16(MFT_REC_ROOT);
inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
ntfs_err(sb, "Failed to load root.");
- inode = NULL;
+ err = PTR_ERR(inode);
goto out;
}
- ni = ntfs_i(inode);
-
sb->s_root = d_make_root(inode);
-
if (!sb->s_root) {
- err = -EINVAL;
- goto out;
+ err = -ENOMEM;
+ goto put_inode_out;
}
+ fc->fs_private = NULL;
+
return 0;
-out:
+put_inode_out:
iput(inode);
-
- if (sb->s_root) {
- d_drop(sb->s_root);
- sb->s_root = NULL;
- }
-
+out:
+ /*
+ * Free resources here.
+ * ntfs_fs_free will be called with fc->s_fs_info = NULL
+ */
put_ntfs(sbi);
-
sb->s_fs_info = NULL;
+
return err;
}
@@ -1403,7 +1318,7 @@ int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
if (sbi->flags & NTFS_FLAGS_NODISCARD)
return -EOPNOTSUPP;
- if (!sbi->options.discard)
+ if (!sbi->options->discard)
return -EOPNOTSUPP;
lbo = (u64)lcn << sbi->cluster_bits;
@@ -1428,19 +1343,99 @@ int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
return err;
}
-static struct dentry *ntfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int ntfs_fs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, ntfs_fill_super);
+}
+
+/*
+ * ntfs_fs_free - Free fs_context.
+ *
+ * Note that this will be called after fill_super and reconfigure
+ * even when they pass. So they have to take pointers if they pass.
+ */
+static void ntfs_fs_free(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
+ struct ntfs_mount_options *opts = fc->fs_private;
+ struct ntfs_sb_info *sbi = fc->s_fs_info;
+
+ if (sbi)
+ put_ntfs(sbi);
+
+ if (opts)
+ put_mount_options(opts);
+}
+
+static const struct fs_context_operations ntfs_context_ops = {
+ .parse_param = ntfs_fs_parse_param,
+ .get_tree = ntfs_fs_get_tree,
+ .reconfigure = ntfs_fs_reconfigure,
+ .free = ntfs_fs_free,
+};
+
+/*
+ * ntfs_init_fs_context - Initialize spi and opts
+ *
+ * This will called when mount/remount. We will first initiliaze
+ * options so that if remount we can use just that.
+ */
+static int ntfs_init_fs_context(struct fs_context *fc)
+{
+ struct ntfs_mount_options *opts;
+ struct ntfs_sb_info *sbi;
+
+ opts = kzalloc(sizeof(struct ntfs_mount_options), GFP_NOFS);
+ if (!opts)
+ return -ENOMEM;
+
+ /* Default options. */
+ opts->fs_uid = current_uid();
+ opts->fs_gid = current_gid();
+ opts->fs_fmask_inv = ~current_umask();
+ opts->fs_dmask_inv = ~current_umask();
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
+ goto ok;
+
+ sbi = kzalloc(sizeof(struct ntfs_sb_info), GFP_NOFS);
+ if (!sbi)
+ goto free_opts;
+
+ sbi->upcase = kvmalloc(0x10000 * sizeof(short), GFP_KERNEL);
+ if (!sbi->upcase)
+ goto free_sbi;
+
+ ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ mutex_init(&sbi->compress.mtx_lznt);
+#ifdef CONFIG_NTFS3_LZX_XPRESS
+ mutex_init(&sbi->compress.mtx_xpress);
+ mutex_init(&sbi->compress.mtx_lzx);
+#endif
+
+ sbi->options = opts;
+ fc->s_fs_info = sbi;
+ok:
+ fc->fs_private = opts;
+ fc->ops = &ntfs_context_ops;
+
+ return 0;
+free_sbi:
+ kfree(sbi);
+free_opts:
+ kfree(opts);
+ return -ENOMEM;
}
// clang-format off
static struct file_system_type ntfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "ntfs3",
- .mount = ntfs_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .owner = THIS_MODULE,
+ .name = "ntfs3",
+ .init_fs_context = ntfs_init_fs_context,
+ .parameters = ntfs_fs_parameters,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
// clang-format on
diff --git a/fs/ntfs3/upcase.c b/fs/ntfs3/upcase.c
index bbeba778237e..b5e8256fd710 100644
--- a/fs/ntfs3/upcase.c
+++ b/fs/ntfs3/upcase.c
@@ -5,13 +5,9 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/module.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
-#include "debug.h"
-#include "ntfs.h"
#include "ntfs_fs.h"
static inline u16 upcase_unicode_char(const u16 *upcase, u16 chr)
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index 7282d85c4ece..afd0ddad826f 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -5,10 +5,7 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
-#include <linux/nls.h>
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
@@ -78,6 +75,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
size_t add_bytes, const struct EA_INFO **info)
{
int err;
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
struct ATTR_LIST_ENTRY *le = NULL;
struct ATTRIB *attr_info, *attr_ea;
void *ea_p;
@@ -102,10 +100,10 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
/* Check Ea limit. */
size = le32_to_cpu((*info)->size);
- if (size > ni->mi.sbi->ea_max_size)
+ if (size > sbi->ea_max_size)
return -EFBIG;
- if (attr_size(attr_ea) > ni->mi.sbi->ea_max_size)
+ if (attr_size(attr_ea) > sbi->ea_max_size)
return -EFBIG;
/* Allocate memory for packed Ea. */
@@ -113,15 +111,16 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
if (!ea_p)
return -ENOMEM;
- if (attr_ea->non_res) {
+ if (!size) {
+ ;
+ } else if (attr_ea->non_res) {
struct runs_tree run;
run_init(&run);
err = attr_load_runs(attr_ea, ni, &run, NULL);
if (!err)
- err = ntfs_read_run_nb(ni->mi.sbi, &run, 0, ea_p, size,
- NULL);
+ err = ntfs_read_run_nb(sbi, &run, 0, ea_p, size, NULL);
run_close(&run);
if (err)
@@ -260,7 +259,7 @@ out:
static noinline int ntfs_set_ea(struct inode *inode, const char *name,
size_t name_len, const void *value,
- size_t val_size, int flags, int locked)
+ size_t val_size, int flags)
{
struct ntfs_inode *ni = ntfs_i(inode);
struct ntfs_sb_info *sbi = ni->mi.sbi;
@@ -279,8 +278,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
u64 new_sz;
void *p;
- if (!locked)
- ni_lock(ni);
+ ni_lock(ni);
run_init(&ea_run);
@@ -370,21 +368,22 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
new_ea->name[name_len] = 0;
memcpy(new_ea->name + name_len + 1, value, val_size);
new_pack = le16_to_cpu(ea_info.size_pack) + packed_ea_size(new_ea);
-
- /* Should fit into 16 bits. */
- if (new_pack > 0xffff) {
- err = -EFBIG; // -EINVAL?
- goto out;
- }
ea_info.size_pack = cpu_to_le16(new_pack);
-
/* New size of ATTR_EA. */
size += add;
- if (size > sbi->ea_max_size) {
+ ea_info.size = cpu_to_le32(size);
+
+ /*
+ * 1. Check ea_info.size_pack for overflow.
+ * 2. New attibute size must fit value from $AttrDef
+ */
+ if (new_pack > 0xffff || size > sbi->ea_max_size) {
+ ntfs_inode_warn(
+ inode,
+ "The size of extended attributes must not exceed 64KiB");
err = -EFBIG; // -EINVAL?
goto out;
}
- ea_info.size = cpu_to_le32(size);
update_ea:
@@ -444,7 +443,7 @@ update_ea:
/* Delete xattr, ATTR_EA */
ni_remove_attr_le(ni, attr, mi, le);
} else if (attr->non_res) {
- err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size);
+ err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size, 0);
if (err)
goto out;
} else {
@@ -468,8 +467,7 @@ update_ea:
mark_inode_dirty(&ni->vfs_inode);
out:
- if (!locked)
- ni_unlock(ni);
+ ni_unlock(ni);
run_close(&ea_run);
kfree(ea_all);
@@ -478,12 +476,6 @@ out:
}
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
-static inline void ntfs_posix_acl_release(struct posix_acl *acl)
-{
- if (acl && refcount_dec_and_test(&acl->a_refcount))
- kfree(acl);
-}
-
static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
struct inode *inode, int type,
int locked)
@@ -521,12 +513,15 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
/* Translate extended attribute to acl. */
if (err >= 0) {
acl = posix_acl_from_xattr(mnt_userns, buf, err);
- if (!IS_ERR(acl))
- set_cached_acl(inode, type, acl);
+ } else if (err == -ENODATA) {
+ acl = NULL;
} else {
- acl = err == -ENODATA ? NULL : ERR_PTR(err);
+ acl = ERR_PTR(err);
}
+ if (!IS_ERR(acl))
+ set_cached_acl(inode, type, acl);
+
__putname(buf);
return acl;
@@ -546,12 +541,13 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
struct inode *inode, struct posix_acl *acl,
- int type, int locked)
+ int type)
{
const char *name;
size_t size, name_len;
void *value = NULL;
int err = 0;
+ int flags;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
@@ -561,22 +557,15 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
if (acl) {
umode_t mode = inode->i_mode;
- err = posix_acl_equiv_mode(acl, &mode);
- if (err < 0)
- return err;
+ err = posix_acl_update_mode(mnt_userns, inode, &mode,
+ &acl);
+ if (err)
+ goto out;
if (inode->i_mode != mode) {
inode->i_mode = mode;
mark_inode_dirty(inode);
}
-
- if (!err) {
- /*
- * ACL can be exactly represented in the
- * traditional file mode permission bits.
- */
- acl = NULL;
- }
}
name = XATTR_NAME_POSIX_ACL_ACCESS;
name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
@@ -594,20 +583,24 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
}
if (!acl) {
+ /* Remove xattr if it can be presented via mode. */
size = 0;
value = NULL;
+ flags = XATTR_REPLACE;
} else {
size = posix_acl_xattr_size(acl->a_count);
value = kmalloc(size, GFP_NOFS);
if (!value)
return -ENOMEM;
-
err = posix_acl_to_xattr(mnt_userns, acl, value, size);
if (err < 0)
goto out;
+ flags = 0;
}
- err = ntfs_set_ea(inode, name, name_len, value, size, 0, locked);
+ err = ntfs_set_ea(inode, name, name_len, value, size, flags);
+ if (err == -ENODATA && !size)
+ err = 0; /* Removing non existed xattr. */
if (!err)
set_cached_acl(inode, type, acl);
@@ -623,68 +616,7 @@ out:
int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
struct posix_acl *acl, int type)
{
- return ntfs_set_acl_ex(mnt_userns, inode, acl, type, 0);
-}
-
-static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
- struct inode *inode, int type, void *buffer,
- size_t size)
-{
- struct posix_acl *acl;
- int err;
-
- if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
- ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
- return -EOPNOTSUPP;
- }
-
- acl = ntfs_get_acl(inode, type, false);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
-
- if (!acl)
- return -ENODATA;
-
- err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
- ntfs_posix_acl_release(acl);
-
- return err;
-}
-
-static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
- struct inode *inode, int type, const void *value,
- size_t size)
-{
- struct posix_acl *acl;
- int err;
-
- if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
- ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
- return -EOPNOTSUPP;
- }
-
- if (!inode_owner_or_capable(mnt_userns, inode))
- return -EPERM;
-
- if (!value) {
- acl = NULL;
- } else {
- acl = posix_acl_from_xattr(mnt_userns, value, size);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
-
- if (acl) {
- err = posix_acl_valid(mnt_userns, acl);
- if (err)
- goto release_and_out;
- }
- }
-
- err = ntfs_set_acl(mnt_userns, inode, acl, type);
-
-release_and_out:
- ntfs_posix_acl_release(acl);
- return err;
+ return ntfs_set_acl_ex(mnt_userns, inode, acl, type);
}
/*
@@ -698,54 +630,27 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
struct posix_acl *default_acl, *acl;
int err;
- /*
- * TODO: Refactoring lock.
- * ni_lock(dir) ... -> posix_acl_create(dir,...) -> ntfs_get_acl -> ni_lock(dir)
- */
- inode->i_default_acl = NULL;
-
- default_acl = ntfs_get_acl_ex(mnt_userns, dir, ACL_TYPE_DEFAULT, 1);
-
- if (!default_acl || default_acl == ERR_PTR(-EOPNOTSUPP)) {
- inode->i_mode &= ~current_umask();
- err = 0;
- goto out;
- }
-
- if (IS_ERR(default_acl)) {
- err = PTR_ERR(default_acl);
- goto out;
- }
-
- acl = default_acl;
- err = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
- if (err < 0)
- goto out1;
- if (!err) {
- posix_acl_release(acl);
- acl = NULL;
- }
+ err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
+ if (err)
+ return err;
- if (!S_ISDIR(inode->i_mode)) {
+ if (default_acl) {
+ err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
+ ACL_TYPE_DEFAULT);
posix_acl_release(default_acl);
- default_acl = NULL;
+ } else {
+ inode->i_default_acl = NULL;
}
- if (default_acl)
- err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
- ACL_TYPE_DEFAULT, 1);
-
if (!acl)
inode->i_acl = NULL;
- else if (!err)
- err = ntfs_set_acl_ex(mnt_userns, inode, acl, ACL_TYPE_ACCESS,
- 1);
-
- posix_acl_release(acl);
-out1:
- posix_acl_release(default_acl);
+ else {
+ if (!err)
+ err = ntfs_set_acl_ex(mnt_userns, inode, acl,
+ ACL_TYPE_ACCESS);
+ posix_acl_release(acl);
+ }
-out:
return err;
}
#endif
@@ -772,7 +677,7 @@ int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode)
int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
int mask)
{
- if (ntfs_sb(inode->i_sb)->options.no_acs_rules) {
+ if (ntfs_sb(inode->i_sb)->options->noacsrules) {
/* "No access rules" mode - Allow all changes. */
return 0;
}
@@ -880,23 +785,6 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
goto out;
}
-#ifdef CONFIG_NTFS3_FS_POSIX_ACL
- if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
- !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
- sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
- (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
- !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
- sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
- /* TODO: init_user_ns? */
- err = ntfs_xattr_get_acl(
- &init_user_ns, inode,
- name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
- ? ACL_TYPE_ACCESS
- : ACL_TYPE_DEFAULT,
- buffer, size);
- goto out;
- }
-#endif
/* Deal with NTFS extended attribute. */
err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
@@ -1009,24 +897,8 @@ set_new_fa:
goto out;
}
-#ifdef CONFIG_NTFS3_FS_POSIX_ACL
- if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
- !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
- sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
- (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
- !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
- sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
- err = ntfs_xattr_set_acl(
- mnt_userns, inode,
- name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
- ? ACL_TYPE_ACCESS
- : ACL_TYPE_DEFAULT,
- value, size);
- goto out;
- }
-#endif
/* Deal with NTFS extended attribute. */
- err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
+ err = ntfs_set_ea(inode, name, name_len, value, size, flags);
out:
return err;
@@ -1042,28 +914,29 @@ int ntfs_save_wsl_perm(struct inode *inode)
int err;
__le32 value;
+ /* TODO: refactor this, so we don't lock 4 times in ntfs_set_ea */
value = cpu_to_le32(i_uid_read(inode));
err = ntfs_set_ea(inode, "$LXUID", sizeof("$LXUID") - 1, &value,
- sizeof(value), 0, 0);
+ sizeof(value), 0);
if (err)
goto out;
value = cpu_to_le32(i_gid_read(inode));
err = ntfs_set_ea(inode, "$LXGID", sizeof("$LXGID") - 1, &value,
- sizeof(value), 0, 0);
+ sizeof(value), 0);
if (err)
goto out;
value = cpu_to_le32(inode->i_mode);
err = ntfs_set_ea(inode, "$LXMOD", sizeof("$LXMOD") - 1, &value,
- sizeof(value), 0, 0);
+ sizeof(value), 0);
if (err)
goto out;
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
value = cpu_to_le32(inode->i_rdev);
err = ntfs_set_ea(inode, "$LXDEV", sizeof("$LXDEV") - 1, &value,
- sizeof(value), 0, 0);
+ sizeof(value), 0);
if (err)
goto out;
}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f1cc8258d34a..5d9ae17bd443 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7045,7 +7045,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh)
{
- int ret, i, has_data, num_pages = 0;
+ int ret, has_data, num_pages = 0;
int need_free = 0;
u32 bit_off, num;
handle_t *handle;
@@ -7054,26 +7054,17 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_alloc_context *data_ac = NULL;
- struct page **pages = NULL;
- loff_t end = osb->s_clustersize;
+ struct page *page = NULL;
struct ocfs2_extent_tree et;
int did_quota = 0;
has_data = i_size_read(inode) ? 1 : 0;
if (has_data) {
- pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
- sizeof(struct page *), GFP_NOFS);
- if (pages == NULL) {
- ret = -ENOMEM;
- mlog_errno(ret);
- return ret;
- }
-
ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
if (ret) {
mlog_errno(ret);
- goto free_pages;
+ goto out;
}
}
@@ -7093,7 +7084,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
if (has_data) {
- unsigned int page_end;
+ unsigned int page_end = min_t(unsigned, PAGE_SIZE,
+ osb->s_clustersize);
u64 phys;
ret = dquot_alloc_space_nodirty(inode,
@@ -7117,15 +7109,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
*/
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
- /*
- * Non sparse file systems zero on extend, so no need
- * to do that now.
- */
- if (!ocfs2_sparse_alloc(osb) &&
- PAGE_SIZE < osb->s_clustersize)
- end = PAGE_SIZE;
-
- ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
+ ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
+ &num_pages);
if (ret) {
mlog_errno(ret);
need_free = 1;
@@ -7136,20 +7121,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
* This should populate the 1st page for us and mark
* it up to date.
*/
- ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
+ ret = ocfs2_read_inline_data(inode, page, di_bh);
if (ret) {
mlog_errno(ret);
need_free = 1;
goto out_unlock;
}
- page_end = PAGE_SIZE;
- if (PAGE_SIZE > osb->s_clustersize)
- page_end = osb->s_clustersize;
-
- for (i = 0; i < num_pages; i++)
- ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
- pages[i], i > 0, &phys);
+ ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
+ &phys);
}
spin_lock(&oi->ip_lock);
@@ -7180,8 +7160,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
out_unlock:
- if (pages)
- ocfs2_unlock_and_free_pages(pages, num_pages);
+ if (page)
+ ocfs2_unlock_and_free_pages(&page, num_pages);
out_commit:
if (ret < 0 && did_quota)
@@ -7205,8 +7185,6 @@ out_commit:
out:
if (data_ac)
ocfs2_free_alloc_context(data_ac);
-free_pages:
- kfree(pages);
return ret;
}
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 359524b7341f..801e60bab955 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3951,7 +3951,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
oi = OCFS2_I(inode);
oi->ip_dir_lock_gen++;
mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
- goto out;
+ goto out_forget;
}
if (!S_ISREG(inode->i_mode))
@@ -3982,6 +3982,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
filemap_fdatawait(mapping);
}
+out_forget:
forget_all_cached_acls(inode);
out:
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c86bd4e60e20..5c914ce9b3ac 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -2167,11 +2167,17 @@ static int ocfs2_initialize_super(struct super_block *sb,
}
if (ocfs2_clusterinfo_valid(osb)) {
+ /*
+ * ci_stack and ci_cluster in ocfs2_cluster_info may not be null
+ * terminated, so make sure no overflow happens here by using
+ * memcpy. Destination strings will always be null terminated
+ * because osb is allocated using kzalloc.
+ */
osb->osb_stackflags =
OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
- strlcpy(osb->osb_cluster_stack,
+ memcpy(osb->osb_cluster_stack,
OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
- OCFS2_STACK_LABEL_LEN + 1);
+ OCFS2_STACK_LABEL_LEN);
if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
mlog(ML_ERROR,
"couldn't mount because of an invalid "
@@ -2180,9 +2186,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
status = -EINVAL;
goto bail;
}
- strlcpy(osb->osb_cluster_name,
+ memcpy(osb->osb_cluster_name,
OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
- OCFS2_CLUSTER_NAME_LEN + 1);
+ OCFS2_CLUSTER_NAME_LEN);
} else {
/* The empty string is identical with classic tools that
* don't know about s_cluster_info. */
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 1fefb2b8960e..93c7c267de93 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -1219,9 +1219,13 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
goto out_dput;
}
} else {
- if (!d_is_negative(newdentry) &&
- (!new_opaque || !ovl_is_whiteout(newdentry)))
- goto out_dput;
+ if (!d_is_negative(newdentry)) {
+ if (!new_opaque || !ovl_is_whiteout(newdentry))
+ goto out_dput;
+ } else {
+ if (flags & RENAME_EXCHANGE)
+ goto out_dput;
+ }
}
if (olddentry == trap)
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index d081faa55e83..c88ac571593d 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -296,6 +296,12 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (ret)
return ret;
+ ret = -EINVAL;
+ if (iocb->ki_flags & IOCB_DIRECT &&
+ (!real.file->f_mapping->a_ops ||
+ !real.file->f_mapping->a_ops->direct_IO))
+ goto out_fdput;
+
old_cred = ovl_override_creds(file_inode(file)->i_sb);
if (is_sync_kiocb(iocb)) {
ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
@@ -320,7 +326,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
out:
revert_creds(old_cred);
ovl_file_accessed(file);
-
+out_fdput:
fdput(real);
return ret;
@@ -349,6 +355,12 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (ret)
goto out_unlock;
+ ret = -EINVAL;
+ if (iocb->ki_flags & IOCB_DIRECT &&
+ (!real.file->f_mapping->a_ops ||
+ !real.file->f_mapping->a_ops->direct_IO))
+ goto out_fdput;
+
if (!ovl_should_sync(OVL_FS(inode->i_sb)))
ifl &= ~(IOCB_DSYNC | IOCB_SYNC);
@@ -384,6 +396,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
}
out:
revert_creds(old_cred);
+out_fdput:
fdput(real);
out_unlock:
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 2a66844b7ff8..66645a5a35f3 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -20,12 +20,33 @@
* depending on the status field in the last byte. The
* first byte is where the name start either way, and a
* zero means it's empty.
+ *
+ * Also, due to a bug in gcc, we don't want to use the
+ * real (differently sized) name arrays in the inode and
+ * link entries, but always the 'de_name[]' one in the
+ * fake struct entry.
+ *
+ * See
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
+ *
+ * for details, but basically gcc will take the size of the
+ * 'name' array from one of the used union entries randomly.
+ *
+ * This use of 'de_name[]' (48 bytes) avoids the false positive
+ * warnings that would happen if gcc decides to use 'inode.di_name'
+ * (16 bytes) even when the pointer and size were to come from
+ * 'link.dl_name' (48 bytes).
+ *
+ * In all cases the actual name pointer itself is the same, it's
+ * only the gcc internal 'what is the size of this field' logic
+ * that can get confused.
*/
union qnx4_directory_entry {
struct {
- char de_name;
- char de_pad[62];
- char de_status;
+ const char de_name[48];
+ u8 de_pad[15];
+ u8 de_status;
};
struct qnx4_inode_entry inode;
struct qnx4_link_info link;
@@ -53,29 +74,26 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
union qnx4_directory_entry *de;
- const char *name;
offset = ix * QNX4_DIR_ENTRY_SIZE;
de = (union qnx4_directory_entry *) (bh->b_data + offset);
- if (!de->de_name)
+ if (!de->de_name[0])
continue;
if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
continue;
if (!(de->de_status & QNX4_FILE_LINK)) {
size = sizeof(de->inode.di_fname);
- name = de->inode.di_fname;
ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
} else {
size = sizeof(de->link.dl_fname);
- name = de->link.dl_fname;
ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
QNX4_INODES_PER_BLOCK +
de->link.dl_inode_ndx;
}
- size = strnlen(name, size);
+ size = strnlen(de->de_name, size);
QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
- if (!dir_emit(ctx, name, size, ino, DT_UNKNOWN)) {
+ if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
brelse(bh);
return 0;
}
diff --git a/fs/smbfs_common/smbfsctl.h b/fs/smbfs_common/smbfsctl.h
index d01e8c9d7a31..926f87cd6af0 100644
--- a/fs/smbfs_common/smbfsctl.h
+++ b/fs/smbfs_common/smbfsctl.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1+ */
/*
- * fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ * SMB, CIFS, SMB2 FSCTL definitions
*
* Copyright (c) International Business Machines Corp., 2002,2013
* Author(s): Steve French ([email protected])
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 003f0d31743e..22bf14ab2d16 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1827,9 +1827,15 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
if (mode_wp && mode_dontwake)
return -EINVAL;
- ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
- uffdio_wp.range.len, mode_wp,
- &ctx->mmap_changing);
+ if (mmget_not_zero(ctx->mm)) {
+ ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
+ uffdio_wp.range.len, mode_wp,
+ &ctx->mmap_changing);
+ mmput(ctx->mm);
+ } else {
+ return -ESRCH;
+ }
+
if (ret)
return ret;
diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
index 4f5e59f06284..37dd3fe5b1e9 100644
--- a/fs/vboxsf/super.c
+++ b/fs/vboxsf/super.c
@@ -21,10 +21,7 @@
#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
+static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
static int follow_symlinks;
module_param(follow_symlinks, int, 0444);
@@ -386,12 +383,7 @@ fail_nomem:
static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
{
- unsigned char *options = data;
-
- if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
- options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
- options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
- options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
+ if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {
vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
return -EINVAL;
}
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
index 77e159a0346b..60a4372aa4d7 100644
--- a/fs/verity/enable.c
+++ b/fs/verity/enable.c
@@ -177,7 +177,7 @@ static int build_merkle_tree(struct file *filp,
* (level 0) and ascending to the root node (level 'num_levels - 1').
* Then at the end (level 'num_levels'), calculate the root hash.
*/
- blocks = (inode->i_size + params->block_size - 1) >>
+ blocks = ((u64)inode->i_size + params->block_size - 1) >>
params->log_blocksize;
for (level = 0; level <= params->num_levels; level++) {
err = build_merkle_tree_level(filp, level, blocks, params,
diff --git a/fs/verity/open.c b/fs/verity/open.c
index 60ff8af7219f..92df87f5fa38 100644
--- a/fs/verity/open.c
+++ b/fs/verity/open.c
@@ -89,7 +89,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
*/
/* Compute number of levels and the number of blocks in each level */
- blocks = (inode->i_size + params->block_size - 1) >> log_blocksize;
+ blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;
pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);
while (blocks > 1) {
if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index a0212e67d6f4..027faa8883aa 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -14,14 +14,6 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
}
#endif
-#ifndef acpi_os_memmap
-static inline void __iomem *acpi_os_memmap(acpi_physical_address phys,
- acpi_size size)
-{
- return ioremap_cache(phys, size);
-}
-#endif
-
extern bool acpi_permanent_mmap;
void __iomem __ref
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index e93375c710b9..7ce93aaf69f8 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -957,7 +957,7 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
#ifndef iounmap
#define iounmap iounmap
-static inline void iounmap(void __iomem *addr)
+static inline void iounmap(volatile void __iomem *addr)
{
}
#endif
@@ -1023,16 +1023,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
port &= IO_SPACE_LIMIT;
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
-#define __pci_ioport_unmap __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p)
-{
- uintptr_t start = (uintptr_t) PCI_IOBASE;
- uintptr_t addr = (uintptr_t) p;
-
- if (addr >= start && addr < start + IO_SPACE_LIMIT)
- return;
- iounmap(p);
-}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
#endif
#ifndef ioport_unmap
@@ -1048,21 +1039,10 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p) {}
-#endif
-
#ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
- __pci_ioport_unmap(p);
-}
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
#endif
-#endif /* CONFIG_GENERIC_IOMAP */
#ifndef xlate_dev_mem_ptr
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 9b3eb6d86200..08237ae8b840 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -110,16 +110,6 @@ static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
}
#endif
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
-#endif
-
#include <asm-generic/pci_iomap.h>
#endif
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index df636c6d8e6c..5a2f9bf53384 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -18,6 +18,7 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
@@ -50,6 +51,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
{
return NULL;
}
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
#endif
#endif /* __ASM_GENERIC_PCI_IOMAP_H */
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 24b40e5c160b..018e776a34b9 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -613,7 +613,7 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
* and is automatically cleaned up after the test case concludes. See &struct
* kunit_resource for more information.
*/
-void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags);
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
/**
* kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
@@ -657,9 +657,9 @@ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
*
* See kcalloc() and kunit_kmalloc_array() for more information.
*/
-static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags)
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
- return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO);
+ return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
}
void kunit_cleanup(struct kunit *test);
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 864b9997efb2..90f21898aad8 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -61,7 +61,6 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
-int kvm_pmu_probe_pmuver(void);
#else
struct kvm_pmu {
};
@@ -118,8 +117,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
return 0;
}
-static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
-
#endif
#endif
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 7d1cabe15262..63ccb5252190 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -321,10 +321,20 @@ asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
* from register 0 to 3 on return from the SMC instruction. An optional
* quirk structure provides vendor specific behavior.
*/
+#ifdef CONFIG_HAVE_ARM_SMCCC
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+ *res = (struct arm_smccc_res){};
+}
+#endif
/**
* __arm_smccc_hvc() - make HVC calls
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f4c16f19f83e..1c7fd7c4c6d3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -48,6 +48,7 @@ extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
@@ -142,7 +143,8 @@ struct bpf_map_ops {
int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
- int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
+ int (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
void *callback_ctx, u64 flags);
/* BTF name and id of struct allocated by map_alloc */
@@ -578,11 +580,12 @@ struct btf_func_model {
* programs only. Should not be used with normal calls and indirect calls.
*/
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
-
/* Store IP address of the caller on the trampoline stack,
* so it's available for trampoline's programs.
*/
#define BPF_TRAMP_F_IP_ARG BIT(3)
+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
+#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
@@ -1088,6 +1091,7 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
@@ -2216,6 +2220,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+#define MAX_BPRINTF_VARARGS 12
+
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index b119d6819d6c..27d9b6683f0e 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -67,7 +67,7 @@
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008
#define PHY_BRCM_EN_MASTER_MODE 0x00000010
-#define PHY_BRCM_IDDQ_SUSPEND 0x000000220
+#define PHY_BRCM_IDDQ_SUSPEND 0x00000020
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 6486d3c19463..36f33685c8c0 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -194,7 +194,7 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
-void invalidate_bh_lrus_cpu(int cpu);
+void invalidate_bh_lrus_cpu(void);
bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
@@ -408,7 +408,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
-static inline void invalidate_bh_lrus_cpu(int cpu) {}
+static inline void invalidate_bh_lrus_cpu(void) {}
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
#define buffer_heads_over_limit 0
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 832d8a74fa59..991911048857 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -72,6 +72,8 @@ enum cpuhp_state {
CPUHP_SLUB_DEAD,
CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
+ /* Must be after CPUHP_MM_VMSTAT_DEAD */
+ CPUHP_MM_DEMOTION_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
@@ -240,6 +242,8 @@ enum cpuhp_state {
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
+ CPUHP_AP_MM_DEMOTION_ONLINE,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_DTPM_CPU_ONLINE,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 5d4d07a9e1ed..1e7399fc69c0 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -996,14 +996,15 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
* cpumask; Typically used by bin_attribute to export cpumask bitmask
* ABI.
*
- * Returns the length of how many bytes have been copied.
+ * Returns the length of how many bytes have been copied, excluding
+ * terminating '\0'.
*/
static inline ssize_t
cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
- nr_cpu_ids, off, count);
+ nr_cpu_ids, off, count) - 1;
}
/**
@@ -1018,7 +1019,7 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
- nr_cpu_ids, off, count);
+ nr_cpu_ids, off, count) - 1;
}
#if NR_CPUS <= BITS_PER_LONG
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index c7fa4a3498fe..254b165f2b44 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
struct dsa_switch;
+struct dsa_port;
struct sk_buff;
struct net_device;
@@ -45,9 +46,9 @@ void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num);
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
+u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp);
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
+u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp);
int dsa_8021q_rx_switch_id(u16 vid);
diff --git a/include/linux/dsa/mv88e6xxx.h b/include/linux/dsa/mv88e6xxx.h
new file mode 100644
index 000000000000..8c3d45eca46b
--- /dev/null
+++ b/include/linux/dsa/mv88e6xxx.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_MV88E6XXX_H
+#define _NET_DSA_TAG_MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+
+#define MV88E6XXX_VID_STANDALONE 0
+#define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1)
+
+#endif
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
index c6bc45ae5e03..d42010cf5468 100644
--- a/include/linux/dsa/ocelot.h
+++ b/include/linux/dsa/ocelot.h
@@ -1,11 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0
- * Copyright 2019-2021 NXP Semiconductors
+ * Copyright 2019-2021 NXP
*/
#ifndef _NET_DSA_TAG_OCELOT_H
#define _NET_DSA_TAG_OCELOT_H
+#include <linux/kthread.h>
#include <linux/packing.h>
+#include <linux/skbuff.h>
+
+struct ocelot_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_class; /* valid only for clones */
+ u8 ptp_cmd;
+ u8 ts_id;
+};
+
+#define OCELOT_SKB_CB(skb) \
+ ((struct ocelot_skb_cb *)((skb)->cb))
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
#define OCELOT_TAG_LEN 16
#define OCELOT_SHORT_PREFIX_LEN 4
@@ -140,6 +161,17 @@
* +------+------+------+------+------+------+------+------+
*/
+struct felix_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct felix_port {
+ void (*xmit_work_fn)(struct kthread_work *work);
+ struct kthread_worker *xmit_worker;
+};
+
static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
{
packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
@@ -210,9 +242,26 @@ static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type)
packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0);
}
-static inline void ocelot_ifh_set_vid(void *injection, u64 vid)
+static inline void ocelot_ifh_set_vlan_tci(void *injection, u64 vlan_tci)
{
- packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &vlan_tci, 15, 0, OCELOT_TAG_LEN, PACK, 0);
+}
+
+/* Determine the PTP REW_OP to use for injecting the given skb */
+static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+ u32 rew_op = 0;
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+ rew_op = ptp_cmd;
+ rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+ } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ rew_op = ptp_cmd;
+ }
+
+ return rew_op;
}
#endif
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 171106202fe5..e6c78be40bde 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -48,6 +48,10 @@ struct sja1105_tagger_data {
spinlock_t meta_lock;
unsigned long state;
u8 ts_id;
+ /* Used on SJA1110 where meta frames are generated only for
+ * 2-step TX timestamps
+ */
+ struct sk_buff_head skb_txtstamp_queue;
};
struct sja1105_skb_cb {
@@ -65,46 +69,27 @@ struct sja1105_port {
struct kthread_work xmit_work;
struct sk_buff_head xmit_queue;
struct sja1105_tagger_data *data;
- struct dsa_port *dp;
bool hwts_tx_en;
};
-enum sja1110_meta_tstamp {
- SJA1110_META_TSTAMP_TX = 0,
- SJA1110_META_TSTAMP_RX = 1,
-};
-
-#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
-
-void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
- enum sja1110_meta_tstamp dir, u64 tstamp);
-
-#else
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
-static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
- u8 ts_id, enum sja1110_meta_tstamp dir,
- u64 tstamp)
+static inline s64 ns_to_sja1105_ticks(s64 ns)
{
+ return ns / SJA1105_TICK_NS;
}
-#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
-
-#if IS_ENABLED(CONFIG_NET_DSA_SJA1105)
-
-extern const struct dsa_switch_ops sja1105_switch_ops;
-
-static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
{
- return dp->ds->ops == &sja1105_switch_ops;
+ return ticks * SJA1105_TICK_NS;
}
-#else
-
static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
{
- return false;
+ return true;
}
-#endif
-
#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 2aaa15779d50..957ebec35aad 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -109,7 +109,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
#endif
}
-#if defined(CONFIG_UM) || defined(CONFIG_IA64)
+#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 928c411bd509..2ad71cc90b37 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -26,9 +26,16 @@
#ifdef __KERNEL__
struct device;
+struct fwnode_handle;
+
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
+int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
unsigned char *arch_get_platform_mac_address(void);
int nvmem_get_mac_address(struct device *dev, void *addrbuf);
+int device_get_mac_address(struct device *dev, char *addr);
+int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
+int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
+
u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
@@ -227,8 +234,6 @@ static inline void eth_random_addr(u8 *addr)
addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
}
-#define random_ether_addr(addr) eth_random_addr(addr)
-
/**
* eth_broadcast_addr - Assign broadcast address
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -262,8 +267,11 @@ static inline void eth_zero_addr(u8 *addr)
*/
static inline void eth_hw_addr_random(struct net_device *dev)
{
+ u8 addr[ETH_ALEN];
+
+ eth_random_addr(addr);
+ __dev_addr_set(dev, addr, ETH_ALEN);
dev->addr_assign_type = NET_ADDR_RANDOM;
- eth_random_addr(dev->dev_addr);
}
/**
@@ -308,7 +316,7 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
*/
static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
{
- ether_addr_copy(dev->dev_addr, addr);
+ __dev_addr_set(dev, addr, ETH_ALEN);
}
/**
@@ -323,7 +331,7 @@ static inline void eth_hw_addr_inherit(struct net_device *dst,
struct net_device *src)
{
dst->addr_assign_type = src->addr_assign_type;
- ether_addr_copy(dst->dev_addr, src->dev_addr);
+ eth_hw_addr_set(dst, src->dev_addr);
}
/**
@@ -544,6 +552,27 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
}
/**
+ * eth_hw_addr_gen - Generate and assign Ethernet address to a port
+ * @dev: pointer to port's net_device structure
+ * @base_addr: base Ethernet address
+ * @id: offset to add to the base address
+ *
+ * Generate a MAC address using a base address and an offset and assign it
+ * to a net_device. Commonly used by switch drivers which need to compute
+ * addresses for all their ports. addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
+ unsigned int id)
+{
+ u64 u = ether_addr_to_u64(base_addr);
+ u8 addr[ETH_ALEN];
+
+ u += id;
+ u64_to_ether_addr(u, addr);
+ eth_hw_addr_set(dev, addr);
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 849524b55d89..845a0ffc16ee 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -94,6 +94,7 @@ struct ethtool_link_ext_state_info {
enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch;
enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
enum ethtool_link_ext_substate_cable_issue cable_issue;
+ enum ethtool_link_ext_substate_module module;
u8 __link_ext_substate;
};
};
@@ -416,6 +417,17 @@ struct ethtool_module_eeprom {
};
/**
+ * struct ethtool_module_power_mode_params - module power mode parameters
+ * @policy: The power mode policy enforced by the host for the plug-in module.
+ * @mode: The operational power mode of the plug-in module. Should be filled by
+ * device drivers on get operations.
+ */
+struct ethtool_module_power_mode_params {
+ enum ethtool_module_power_mode_policy policy;
+ enum ethtool_module_power_mode mode;
+};
+
+/**
* struct ethtool_ops - optional netdev operations
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
@@ -580,6 +592,11 @@ struct ethtool_module_eeprom {
* @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics.
* @get_rmon_stats: Query some of the RMON (RFC 2819) statistics.
* Set %ranges to a pointer to zero-terminated array of byte ranges.
+ * @get_module_power_mode: Get the power mode policy for the plug-in module
+ * used by the network device and its operational power mode, if
+ * plugged-in.
+ * @set_module_power_mode: Set the power mode policy for the plug-in module
+ * used by the network device.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -705,6 +722,12 @@ struct ethtool_ops {
void (*get_rmon_stats)(struct net_device *dev,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges);
+ int (*get_module_power_mode)(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*set_module_power_mode)(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
};
int ethtool_check_ops(const struct ethtool_ops *ops);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 4a93c12543ee..47f80adbe744 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -360,10 +360,9 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = TGT })
-/* Function call */
+/* Convert function address to BPF immediate */
-#define BPF_CAST_CALL(x) \
- ((u64 (*)(u64, u64, u64, u64, u64))(x))
+#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@ -371,7 +370,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 59828516ebaf..9f4ad719bfe3 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -22,10 +22,15 @@ struct device;
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
* NOT_DEVICE: The fwnode will never be populated as a struct device.
* INITIALIZED: The hardware corresponding to fwnode has been initialized.
+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
+ * driver needs its child devices to be bound with
+ * their respective drivers as soon as they are
+ * added.
*/
-#define FWNODE_FLAG_LINKS_ADDED BIT(0)
-#define FWNODE_FLAG_NOT_DEVICE BIT(1)
-#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_LINKS_ADDED BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE BIT(1)
+#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
struct fwnode_handle {
struct fwnode_handle *secondary;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index c68d83c87f83..0f5315c2b5a3 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -149,6 +149,7 @@ struct gendisk {
unsigned long state;
#define GD_NEED_PART_SCAN 0
#define GD_READ_ONLY 1
+#define GD_DEAD 2
struct mutex open_mutex; /* open/close mutex */
unsigned open_partitions; /* number of open partitions */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 420dea9aa648..e5c65eedb133 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2122,6 +2122,7 @@ enum ieee80211_client_reg_power {
#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
+#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
/* 802.11ax HE PHY capabilities */
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 23e4ee523576..9ee238ad29ce 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -251,7 +251,7 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 041ca7f15ea4..0f18df7fe874 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -608,7 +608,6 @@ struct kvm {
unsigned long mmu_notifier_range_start;
unsigned long mmu_notifier_range_end;
#endif
- long tlbs_dirty;
struct list_head devices;
u64 manual_dirty_log_protect;
struct dentry *debugfs_dentry;
@@ -721,11 +720,6 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
-{
- return vcpu->vcpu_idx;
-}
-
#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + slots->used_slots; memslot++) \
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ffb787d5ebde..f622888a4ba8 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -80,6 +80,9 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
+
+ /* Quiesces the device on system shutdown, turns off interrupts etc */
+ void (*shutdown)(struct mdio_device *mdiodev);
};
static inline struct mdio_driver *
@@ -346,6 +349,8 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
u16 set);
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
static inline u32 mdiobus_c45_addr(int devad, u16 regnum)
{
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 7efc0a7c14c9..182c606adb06 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -160,7 +160,10 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
#else
-#define hotplug_memory_notifier(fn, pri) ({ 0; })
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
+}
/* These aren't inline functions due to a GCC bug. */
#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
index 92d763230bdf..a18c1539a152 100644
--- a/include/linux/mfd/idt8a340_reg.h
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -506,6 +506,10 @@
#define STATE_MODE_SHIFT (0)
#define STATE_MODE_MASK (0x7)
+/* Bit definitions for the DPLL_MANU_REF_CFG register */
+#define MANUAL_REFERENCE_SHIFT (0)
+#define MANUAL_REFERENCE_MASK (0x1f)
+
/* Bit definitions for the GPIO_CFG_GBL register */
#define SUPPLY_MODE_SHIFT (0)
#define SUPPLY_MODE_MASK (0x3)
@@ -654,7 +658,7 @@
/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
enum pll_mode {
PLL_MODE_MIN = 0,
- PLL_MODE_NORMAL = PLL_MODE_MIN,
+ PLL_MODE_PLL = PLL_MODE_MIN,
PLL_MODE_WRITE_PHASE = 1,
PLL_MODE_WRITE_FREQUENCY = 2,
PLL_MODE_GPIO_INC_DEC = 3,
@@ -664,6 +668,31 @@ enum pll_mode {
PLL_MODE_MAX = PLL_MODE_DISABLED,
};
+/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
+enum manual_reference {
+ MANU_REF_MIN = 0,
+ MANU_REF_CLK0 = MANU_REF_MIN,
+ MANU_REF_CLK1,
+ MANU_REF_CLK2,
+ MANU_REF_CLK3,
+ MANU_REF_CLK4,
+ MANU_REF_CLK5,
+ MANU_REF_CLK6,
+ MANU_REF_CLK7,
+ MANU_REF_CLK8,
+ MANU_REF_CLK9,
+ MANU_REF_CLK10,
+ MANU_REF_CLK11,
+ MANU_REF_CLK12,
+ MANU_REF_CLK13,
+ MANU_REF_CLK14,
+ MANU_REF_CLK15,
+ MANU_REF_WRITE_PHASE,
+ MANU_REF_WRITE_FREQUENCY,
+ MANU_REF_XO_DPLL,
+ MANU_REF_MAX = MANU_REF_XO_DPLL,
+};
+
enum hw_tod_write_trig_sel {
HW_TOD_WR_TRIG_SEL_MIN = 0,
HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 3d43c60b49fa..1f7c33b2f5a3 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -28,6 +28,7 @@
#define PHY_ID_KSZ9031 0x00221620
#define PHY_ID_KSZ9131 0x00221640
#define PHY_ID_LAN8814 0x00221660
+#define PHY_ID_LAN8804 0x00221670
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 326250996b4e..c8077e936691 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -19,6 +19,11 @@ struct migration_target_control;
*/
#define MIGRATEPAGE_SUCCESS 0
+/*
+ * Keep sync with:
+ * - macro MIGRATE_REASON in include/trace/events/migrate.h
+ * - migrate_reason_names[MR_TYPES] in mm/debug.c
+ */
enum migrate_reason {
MR_COMPACTION,
MR_MEMORY_FAILURE,
@@ -32,7 +37,6 @@ enum migrate_reason {
MR_TYPES
};
-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 30bb59fe970c..6646634a0b9d 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1436,7 +1436,7 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index a858bcb6220b..1834c8fad12e 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -92,26 +92,4 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
-static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
-{
- int i;
-
- for (i = ETH_ALEN; i > 0; i--) {
- addr[i - 1] = mac & 0xFF;
- mac >>= 8;
- }
-}
-
#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 66eaf0aa7f69..347167c18802 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -577,7 +577,9 @@ struct mlx5_init_seg {
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[880];
+ __be32 rsvd2[878];
+ __be32 cmd_exec_to;
+ __be32 cmd_q_init_to;
__be32 internal_timer_h;
__be32 internal_timer_l;
__be32 rsvd3[2];
@@ -1183,6 +1185,7 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1340,6 +1343,20 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
+#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
+
+#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
+
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e23417424373..3f4c0f2314a5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -59,15 +59,13 @@
#define MLX5_ADEV_NAME "mlx5_core"
+#define MLX5_IRQ_EQ_CTRL (U8_MAX)
+
enum {
MLX5_BOARD_ID_LEN = 64,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -154,6 +152,7 @@ enum {
MLX5_REG_MIRC = 0x9162,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_DTOR = 0xC00E,
};
enum mlx5_qpts_trust_state {
@@ -752,6 +751,7 @@ struct mlx5_core_dev {
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
} caps;
+ struct mlx5_timeouts *timeouts;
u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
@@ -1138,7 +1138,6 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
-bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
@@ -1243,6 +1242,16 @@ static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
return MLX5_CAP_GEN(dev, native_port_num);
}
+static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
+{
+ int idx = MLX5_CAP_GEN(dev, native_port_num);
+
+ if (idx >= 1 && idx <= MLX5_MAX_PORTS)
+ return idx - 1;
+ else
+ return PCI_FUNC(dev->pdev->devfn);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
@@ -1251,11 +1260,12 @@ static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
{
struct devlink *devlink = priv_to_devlink(dev);
union devlink_param_value val;
+ int err;
- devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
- return val.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+ return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
}
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
index cea6ecb4b73e..ea3ff5a8ced3 100644
--- a/include/linux/mlx5/eq.h
+++ b/include/linux/mlx5/eq.h
@@ -4,7 +4,6 @@
#ifndef MLX5_CORE_EQ_H
#define MLX5_CORE_EQ_H
-#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_NUM_CMD_EQE (32)
#define MLX5_NUM_ASYNC_EQE (0x1000)
#define MLX5_NUM_SPARE_EQE (0x80)
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 4ab5c1fc1270..97afcea39a7b 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -130,11 +130,20 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
+#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
/* 0x7FF is a reserved mapping */
#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
+/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
+#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
+ ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
+ GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
+ ESW_TUN_OPTS_OFFSET + 1)
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 0106c67e8ccb..7a43fec63a35 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -83,6 +83,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
};
enum {
@@ -97,6 +98,7 @@ enum {
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
+struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@@ -257,6 +259,13 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask);
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
struct mlx5_pkt_reformat_params {
int type;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f3638d09ba77..09e43019d877 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -94,6 +94,7 @@ enum {
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@@ -767,6 +768,18 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_at_20c0[0x5f40];
};
+struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ u8 reserved_at_400[0x7c00];
+};
+
enum {
MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
@@ -1306,7 +1319,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vhca_resource_manager[0x1];
u8 hca_cap_2[0x1];
- u8 reserved_at_21[0x2];
+ u8 reserved_at_21[0x1];
+ u8 dtor[0x1];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
u8 event_on_vhca_state_active[0x1];
@@ -1514,7 +1528,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uar_4k[0x1];
u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
- u8 reserved_at_248[0x2];
+ u8 port_selection_cap[0x1];
+ u8 reserved_at_248[0x1];
u8 umem_uid_0[0x1];
u8 reserved_at_250[0x5];
u8 log_pg_sz[0x8];
@@ -1587,7 +1602,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1];
- u8 reserved_at_3a1[0x2];
+ u8 roce_rw_supported[0x1];
+ u8 reserved_at_3a2[0x1];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@ -1716,7 +1732,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
- u8 reserved_at_6e0[0x10];
+ u8 max_num_match_definer[0x10];
u8 sf_base_id[0x10];
u8 flex_parser_id_gtpu_dw_2[0x4];
@@ -1731,7 +1747,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_760[0x20];
u8 vhca_tunnel_commands[0x40];
- u8 reserved_at_7c0[0x40];
+ u8 match_definer_format_supported[0x40];
};
struct mlx5_ifc_cmd_hca_cap_2_bits {
@@ -1750,6 +1766,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
@@ -2807,6 +2824,40 @@ struct mlx5_ifc_dropped_packet_logged_bits {
u8 reserved_at_0[0xe0];
};
+struct mlx5_ifc_default_timeout_bits {
+ u8 to_multiplier[0x3];
+ u8 reserved_at_3[0x9];
+ u8 to_value[0x14];
+};
+
+struct mlx5_ifc_dtor_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+ u8 reserved_at_40[0x60];
+
+ struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+ struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+ struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+ struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+ struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+ struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+ struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+ u8 reserved_at_1c0[0x40];
+};
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@ -3128,6 +3179,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
@@ -5616,6 +5668,236 @@ struct mlx5_ifc_query_fte_in_bits {
u8 reserved_at_120[0xe0];
};
+struct mlx5_ifc_match_definer_format_0_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_dmac_15_0[0x10];
+ u8 outer_ethertype[0x10];
+
+ u8 reserved_at_180[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 outer_l4_type_ext[0x4];
+ u8 reserved_at_1a4[0x2];
+ u8 outer_ipsec_layer[0x2];
+ u8 outer_l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 outer_l2_ok[0x1];
+ u8 outer_l3_ok[0x1];
+ u8 outer_l4_ok[0x1];
+ u8 outer_second_vlan_type[0x2];
+ u8 outer_second_vlan_prio[0x3];
+ u8 outer_second_vlan_cfi[0x1];
+ u8 outer_second_vlan_vid[0xc];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 inner_ipv4_checksum_ok[0x1];
+ u8 inner_l4_checksum_ok[0x1];
+ u8 outer_ipv4_checksum_ok[0x1];
+ u8 outer_l4_checksum_ok[0x1];
+ u8 inner_l3_ok[0x1];
+ u8 inner_l4_ok[0x1];
+ u8 outer_l3_ok_duplicate[0x1];
+ u8 outer_l4_ok_duplicate[0x1];
+ u8 outer_tcp_cwr[0x1];
+ u8 outer_tcp_ece[0x1];
+ u8 outer_tcp_urg[0x1];
+ u8 outer_tcp_ack[0x1];
+ u8 outer_tcp_psh[0x1];
+ u8 outer_tcp_rst[0x1];
+ u8 outer_tcp_syn[0x1];
+ u8 outer_tcp_fin[0x1];
+};
+
+struct mlx5_ifc_match_definer_format_22_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 outer_ip_src_addr[0x20];
+
+ u8 outer_ip_dest_addr[0x20];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_23_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 inner_ip_src_addr[0x20];
+
+ u8 inner_ip_dest_addr[0x20];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 inner_ip_frag[0x1];
+ u8 inner_qp_type[0x2];
+ u8 inner_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 inner_l3_type[0x2];
+ u8 inner_l4_type[0x2];
+ u8 inner_first_vlan_type[0x2];
+ u8 inner_first_vlan_prio[0x3];
+ u8 inner_first_vlan_cfi[0x1];
+ u8 inner_first_vlan_vid[0xc];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_29_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_30_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_31_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_32_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x10];
+ u8 format_id[0x10];
+
+ u8 reserved_at_a0[0x160];
+
+ u8 match_mask[16][0x20];
+};
+
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 vhca_tunnel_id[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_match_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_match_definer_bits obj_context;
+};
+
+struct mlx5_ifc_create_match_definer_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@@ -8090,6 +8372,11 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
+};
+
+enum {
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
@@ -8110,7 +8397,9 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8125,7 +8414,10 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
+
+ u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@@ -9475,16 +9767,22 @@ struct mlx5_ifc_pcmr_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
u8 reserved_at_10[0x10];
+
u8 entropy_force_cap[0x1];
u8 entropy_calc_cap[0x1];
u8 entropy_gre_calc_cap[0x1];
- u8 reserved_at_23[0x1b];
+ u8 reserved_at_23[0xf];
+ u8 rx_ts_over_crc_cap[0x1];
+ u8 reserved_at_33[0xb];
u8 fcs_cap[0x1];
u8 reserved_at_3f[0x1];
+
u8 entropy_force[0x1];
u8 entropy_calc[0x1];
u8 entropy_gre_calc[0x1];
- u8 reserved_at_43[0x1b];
+ u8 reserved_at_43[0xf];
+ u8 rx_ts_over_crc[0x1];
+ u8 reserved_at_53[0xb];
u8 fcs_chk[0x1];
u8 reserved_at_5f[0x1];
};
@@ -10392,9 +10690,16 @@ struct mlx5_ifc_dcbx_param_bits {
u8 reserved_at_a0[0x160];
};
+enum {
+ MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT,
+};
+
struct mlx5_ifc_lagc_bits {
u8 fdb_selection_mode[0x1];
- u8 reserved_at_1[0x1c];
+ u8 reserved_at_1[0x14];
+ u8 port_select_mode[0x3];
+ u8 reserved_at_18[0x5];
u8 lag_state[0x3];
u8 reserved_at_20[0x14];
@@ -10608,29 +10913,6 @@ struct mlx5_ifc_dealloc_memic_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
-
- u8 vhca_tunnel_id[0x10];
- u8 obj_type[0x10];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
struct mlx5_ifc_umem_bits {
u8 reserved_at_0[0x80];
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7f8ee09c711f..436e0946d691 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -104,18 +104,7 @@ struct page {
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
- union {
- /**
- * dma_addr_upper: might require a 64-bit
- * value on 32-bit architectures.
- */
- unsigned long dma_addr_upper;
- /**
- * For frag page support, not supported in
- * 32-bit architectures with 64-bit DMA.
- */
- atomic_long_t pp_frag_count;
- };
+ atomic_long_t pp_frag_count;
};
struct { /* slab, slob and slub */
union {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d79163208dfd..3ec42495a43a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1861,6 +1861,7 @@ enum netdev_ml_priv_type {
* @xps_maps: XXX: need comments on this one
* @miniq_egress: clsact qdisc specific data for
* egress processing
+ * @nf_hooks_egress: netfilter hooks executed for egress packets
* @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
@@ -1916,7 +1917,6 @@ enum netdev_ml_priv_type {
* @sfp_bus: attached &struct sfp_bus structure.
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
@@ -2161,6 +2161,9 @@ struct net_device {
#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc __rcu *miniq_egress;
#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ struct nf_hook_entries __rcu *nf_hooks_egress;
+#endif
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
@@ -2250,7 +2253,6 @@ struct net_device {
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
- struct lock_class_key *qdisc_running_key;
bool proto_down;
unsigned wol_enabled:1;
unsigned threaded:1;
@@ -2360,13 +2362,11 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
#define netdev_lockdep_set_classes(dev) \
{ \
static struct lock_class_key qdisc_tx_busylock_key; \
- static struct lock_class_key qdisc_running_key; \
static struct lock_class_key qdisc_xmit_lock_key; \
static struct lock_class_key dev_addr_list_lock_key; \
unsigned int i; \
\
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
- (dev)->qdisc_running_key = &qdisc_running_key; \
lockdep_set_class(&(dev)->addr_list_lock, \
&dev_addr_list_lock_key); \
for (i = 0; i < (dev)->num_tx_queues; i++) \
@@ -2955,6 +2955,7 @@ struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
+bool netdev_name_in_use(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void dev_close(struct net_device *dev);
@@ -4642,7 +4643,7 @@ void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
static inline void
-__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len)
+__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
{
memcpy(dev->dev_addr, addr, len);
}
@@ -4654,7 +4655,7 @@ static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
static inline void
dev_addr_mod(struct net_device *dev, unsigned int offset,
- const u8 *addr, size_t len)
+ const void *addr, size_t len)
{
memcpy(&dev->dev_addr[offset], addr, len);
}
@@ -4800,8 +4801,6 @@ struct netdev_nested_priv {
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
-struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
- struct list_head **iter);
#ifdef CONFIG_LOCKDEP
static LIST_HEAD(net_unlink_list);
@@ -5236,7 +5235,7 @@ static inline void netif_keep_dst(struct net_device *dev)
static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
{
/* TODO: reserve and use an additional IFF bit, if we get more users */
- return dev->priv_flags & IFF_MACSEC;
+ return netif_is_macsec(dev);
}
extern struct pernet_operations __net_initdata loopback_net_ops;
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 4f9a4b3c5892..a40aaf645fa4 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -54,9 +54,8 @@ int arpt_register_table(struct net *net, const struct xt_table *table,
const struct nf_hook_ops *ops);
void arpt_unregister_table(struct net *net, const char *name);
void arpt_unregister_table_pre_exit(struct net *net, const char *name);
-extern unsigned int arpt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 10a01978bc0d..a13296d6c7ce 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -112,9 +112,8 @@ extern int ebt_register_table(struct net *net,
const struct nf_hook_ops *ops);
extern void ebt_unregister_table(struct net *net, const char *tablename);
void ebt_unregister_table_pre_exit(struct net *net, const char *tablename);
-extern unsigned int ebt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct ebt_table *table);
+extern unsigned int ebt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
/* True if the hook mask denotes that the rule is in a base chain,
* used in the check() functions */
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
deleted file mode 100644
index a13774be2eb5..000000000000
--- a/include/linux/netfilter_ingress.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NETFILTER_INGRESS_H_
-#define _NETFILTER_INGRESS_H_
-
-#include <linux/netfilter.h>
-#include <linux/netdevice.h>
-
-#ifdef CONFIG_NETFILTER_INGRESS
-static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
-{
-#ifdef CONFIG_JUMP_LABEL
- if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
- return false;
-#endif
- return rcu_access_pointer(skb->dev->nf_hooks_ingress);
-}
-
-/* caller must hold rcu_read_lock */
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
- struct nf_hook_state state;
- int ret;
-
- /* Must recheck the ingress hook head, in the event it became NULL
- * after the check in nf_hook_ingress_active evaluated to true.
- */
- if (unlikely(!e))
- return 0;
-
- nf_hook_state_init(&state, NF_NETDEV_INGRESS,
- NFPROTO_NETDEV, skb->dev, NULL, NULL,
- dev_net(skb->dev), NULL);
- ret = nf_hook_slow(skb, &state, e, 0);
- if (ret == 0)
- return -1;
-
- return ret;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev)
-{
- RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
-}
-#else /* CONFIG_NETFILTER_INGRESS */
-static inline int nf_hook_ingress_active(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev) {}
-#endif /* CONFIG_NETFILTER_INGRESS */
-#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 8d09bfe850dc..132b0e4a6d4d 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -63,9 +63,9 @@ struct ipt_error {
}
extern void *ipt_alloc_initial_table(const struct xt_table *);
-extern unsigned int ipt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int ipt_do_table(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 79e73fd7d965..8b8885a73c76 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -29,9 +29,8 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
const struct nf_hook_ops *ops);
void ip6t_unregister_table_pre_exit(struct net *net, const char *name);
void ip6t_unregister_table_exit(struct net *net, const char *name);
-extern unsigned int ip6t_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
new file mode 100644
index 000000000000..b71b57a83bb4
--- /dev/null
+++ b/include/linux/netfilter_netdev.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NETFILTER_NETDEV_H_
+#define _NETFILTER_NETDEV_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+ return false;
+#endif
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
+}
+
+/* caller must hold rcu_read_lock */
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
+ struct nf_hook_state state;
+ int ret;
+
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, NF_NETDEV_INGRESS,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
+ ret = nf_hook_slow(skb, &state, e, 0);
+ if (ret == 0)
+ return -1;
+
+ return ret;
+}
+
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+#endif /* CONFIG_NETFILTER_INGRESS */
+
+#ifdef CONFIG_NETFILTER_EGRESS
+static inline bool nf_hook_egress_active(void)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS]))
+ return false;
+#endif
+ return true;
+}
+
+/**
+ * nf_hook_egress - classify packets before transmission
+ * @skb: packet to be classified
+ * @rc: result code which shall be returned by __dev_queue_xmit() on failure
+ * @dev: netdev whose egress hooks shall be applied to @skb
+ *
+ * Returns @skb on success or %NULL if the packet was consumed or filtered.
+ * Caller must hold rcu_read_lock.
+ *
+ * On ingress, packets are classified first by tc, then by netfilter.
+ * On egress, the order is reversed for symmetry. Conceptually, tc and
+ * netfilter can be thought of as layers, with netfilter layered above tc:
+ * When tc redirects a packet to another interface, netfilter is not applied
+ * because the packet is on the tc layer.
+ *
+ * The nf_skip_egress flag controls whether netfilter is applied on egress.
+ * It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the
+ * packet passes through tc and netfilter. Because __dev_queue_xmit() may be
+ * called recursively by tunnel drivers such as vxlan, the flag is reverted to
+ * false after sch_handle_egress(). This ensures that netfilter is applied
+ * both on the overlay and underlying network.
+ */
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ struct nf_hook_entries *e;
+ struct nf_hook_state state;
+ int ret;
+
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ if (skb->nf_skip_egress)
+ return skb;
+#endif
+
+ e = rcu_dereference(dev->nf_hooks_egress);
+ if (!e)
+ return skb;
+
+ nf_hook_state_init(&state, NF_NETDEV_EGRESS,
+ NFPROTO_NETDEV, dev, NULL, NULL,
+ dev_net(dev), NULL);
+ ret = nf_hook_slow(skb, &state, e, 0);
+
+ if (ret == 1) {
+ return skb;
+ } else if (ret < 0) {
+ *rc = NET_XMIT_DROP;
+ return NULL;
+ } else { /* ret == 0 */
+ *rc = NET_XMIT_SUCCESS;
+ return NULL;
+ }
+}
+#else /* CONFIG_NETFILTER_EGRESS */
+static inline bool nf_hook_egress_active(void)
+{
+ return false;
+}
+
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ return skb;
+}
+#endif /* CONFIG_NETFILTER_EGRESS */
+
+static inline void nf_skip_egress(struct sk_buff *skb, bool skip)
+{
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ skb->nf_skip_egress = skip;
+#endif
+}
+
+static inline void nf_hook_netdev_init(struct net_device *dev)
+{
+#ifdef CONFIG_NETFILTER_INGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_egress, NULL);
+#endif
+}
+
+#endif /* _NETFILTER_NETDEV_H_ */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 61b1c7fcc401..1ec631838af9 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -156,10 +156,6 @@ bool netlink_strict_get_check(struct sk_buff *skb);
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
-int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
- __u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
- void *filter_data);
int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
int netlink_register_notifier(struct notifier_block *nb);
int netlink_unregister_notifier(struct notifier_block *nb);
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 923dada24eb4..c0c0cefc3b92 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -150,6 +150,20 @@ static inline int nvmem_cell_read_u64(struct device *dev,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
+ const char *cell_id,
+ u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
+ const char *cell_id,
+ u64 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index daef3b0d9270..0484b613ca64 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -8,12 +8,13 @@
#include <linux/phy.h>
-#ifdef CONFIG_OF_NET
+#if defined(CONFIG_OF) && defined(CONFIG_NET)
#include <linux/of.h>
struct net_device;
extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
extern int of_get_mac_address(struct device_node *np, u8 *mac);
+int of_get_ethdev_address(struct device_node *np, struct net_device *dev);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np,
@@ -27,6 +28,11 @@ static inline int of_get_mac_address(struct device_node *np, u8 *mac)
return -ENODEV;
}
+static inline int of_get_ethdev_address(struct device_node *np, struct net_device *dev)
+{
+ return -ENODEV;
+}
+
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
{
return NULL;
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 54667735cc67..8d6571feb95d 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#ifndef _LINUX_PACKING_H
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 505480217cf1..2512e2f9cd4e 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -163,6 +163,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
+#ifdef CONFIG_KVM
+void kvm_host_pmu_init(struct arm_pmu *pmu);
+#else
+#define kvm_host_pmu_init(x) do { } while(0)
+#endif
+
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 0cbc5dfe1110..2c41c8da1515 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -684,7 +684,9 @@ struct perf_event {
/*
* timestamp shadows the actual context timing but it can
* be safely used in NMI interrupt context. It reflects the
- * context time as it was when the event was last scheduled in.
+ * context time as it was when the event was last scheduled in,
+ * or when ctx_sched_in failed to schedule the event because we
+ * run out of PMC.
*
* ctx_time already accounts for ctx->timestamp. Therefore to
* compute ctx_time for a sample, simply add perf_clock().
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 237291196ce2..f7b5ed06a815 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -484,6 +484,7 @@ int phylink_speed_up(struct phylink *pl);
#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
void phylink_set_port_modes(unsigned long *bits);
+void phylink_set_10g_modes(unsigned long *mask);
void phylink_helper_basex_speed(struct phylink_link_state *state);
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index 6beb26b7151d..86be8bf27b41 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -4,6 +4,8 @@
#include <linux/mm.h>
+#define ARCH_DEFAULT_PKEY 0
+
#ifdef CONFIG_ARCH_HAS_PKEYS
#include <asm/pkeys.h>
#else /* ! CONFIG_ARCH_HAS_PKEYS */
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
index 1d30bf278231..2b5676ff35be 100644
--- a/include/linux/platform_data/brcmfmac.h
+++ b/include/linux/platform_data/brcmfmac.h
@@ -125,7 +125,7 @@ struct brcmfmac_pd_cc_entry {
*/
struct brcmfmac_pd_cc {
int table_size;
- struct brcmfmac_pd_cc_entry table[0];
+ struct brcmfmac_pd_cc_entry table[];
};
/**
diff --git a/include/linux/platform_data/usb-omap1.h b/include/linux/platform_data/usb-omap1.h
index 43b5ce139c37..878e572a78bf 100644
--- a/include/linux/platform_data/usb-omap1.h
+++ b/include/linux/platform_data/usb-omap1.h
@@ -48,6 +48,8 @@ struct omap_usb_config {
u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
int (*ocpi_enable)(void);
+
+ void (*lb_reset)(void);
};
#endif /* __LINUX_USB_OMAP1_H */
diff --git a/include/linux/property.h b/include/linux/property.h
index 357513a977e5..88fa726a76df 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
struct device;
+struct net_device;
enum dev_prop_type {
DEV_PROP_U8,
@@ -389,11 +390,7 @@ const void *device_get_match_data(struct device *dev);
int device_get_phy_mode(struct device *dev);
-void *device_get_mac_address(struct device *dev, char *addr, int alen);
-
int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
-void *fwnode_get_mac_address(struct fwnode_handle *fwnode,
- char *addr, int alen);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index c0475d1c9885..81cad9e1e412 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -61,7 +61,6 @@ enum qcom_scm_ice_cipher {
#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
-#if IS_ENABLED(CONFIG_QCOM_SCM)
extern bool qcom_scm_is_available(void);
extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
@@ -115,74 +114,4 @@ extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
extern int qcom_scm_lmh_profile_change(u32 profile_id);
extern bool qcom_scm_lmh_dcvsh_available(void);
-#else
-
-#include <linux/errno.h>
-
-static inline bool qcom_scm_is_available(void) { return false; }
-
-static inline int qcom_scm_set_cold_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline int qcom_scm_set_warm_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
- { return -ENODEV; }
-
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
- { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
- { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
- { return -ENODEV; }
-
-static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
- { return -ENODEV; }
-extern inline int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
- u32 cp_nonpixel_start,
- u32 cp_nonpixel_size)
- { return -ENODEV; }
-static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- unsigned int *src, const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt) { return -ENODEV; }
-
-static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
-static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size, u32 mode) { return -ENODEV; }
-static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
- u32 offset, u32 size) { return -ENODEV; }
-
-static inline bool qcom_scm_ice_available(void) { return false; }
-static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; }
-static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
- enum qcom_scm_ice_cipher cipher,
- u32 data_unit_size) { return -ENODEV; }
-
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp) { return -ENODEV; }
-
-static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
- { return -ENODEV; }
-
-static inline int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
- u64 limit_node, u32 node_id, u64 version)
- { return -ENODEV; }
-
-static inline int qcom_scm_lmh_profile_change(u32 profile_id) { return -ENODEV; }
-
-static inline bool qcom_scm_lmh_dcvsh_available(void) { return -ENODEV; }
-#endif
#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 0a3807e927c5..827624840ee2 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2016 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _COMMON_HSI_H
@@ -47,10 +47,10 @@
#define ISCSI_CDU_TASK_SEG_TYPE 0
#define FCOE_CDU_TASK_SEG_TYPE 0
#define RDMA_CDU_TASK_SEG_TYPE 1
+#define ETH_CDU_TASK_SEG_TYPE 2
#define FW_ASSERT_GENERAL_ATTN_IDX 32
-
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
#define MSTORM_QZONE_SIZE 16
@@ -60,9 +60,12 @@
#define PSTORM_QZONE_SIZE 0
#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+#define ETH_MAX_RXQ_VF_DEFAULT 16
+#define ETH_MAX_RXQ_VF_DOUBLE 48
+#define ETH_MAX_RXQ_VF_QUAD 112
+
+#define ETH_RGSRC_CTX_SIZE 6
+#define ETH_TGSRC_CTX_SIZE 6
/********************************/
/* CORE (LIGHT L2) FW CONSTANTS */
@@ -89,8 +92,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 42
-#define FW_REVISION_VERSION 2
+#define FW_MINOR_VERSION 59
+#define FW_REVISION_VERSION 1
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -112,6 +115,7 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
@@ -133,7 +137,7 @@
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_CONNECTION_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
@@ -144,7 +148,7 @@
#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
/* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION 11
/*****************/
/* CDU CONSTANTS */
@@ -162,6 +166,7 @@
#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+#define CDU_CONTEXT_VALIDATION_DEFAULT_CFG (0x3d)
/*****************/
/* DQ CONSTANTS */
@@ -302,6 +307,9 @@
/* PWM address mapping */
#define DQ_PWM_OFFSET_DPM_BASE 0x0
#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM32_24ICID_BASE 0x28
+#define DQ_PWM_OFFSET_UCM32_24ICID_BASE 0x30
+#define DQ_PWM_OFFSET_TCM32_24ICID_BASE 0x38
#define DQ_PWM_OFFSET_XCM16_BASE 0x40
#define DQ_PWM_OFFSET_XCM32_BASE 0x44
#define DQ_PWM_OFFSET_UCM16_BASE 0x48
@@ -325,6 +333,13 @@
#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
(DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
+#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \
+ (DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \
+ (DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4)
+#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD \
+ (DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1)
+
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -360,6 +375,7 @@
/* Number of global Vport/QCN rate limiters */
#define MAX_QM_GLOBAL_RLS 256
+#define COMMON_MAX_QM_GLOBAL_RLS MAX_QM_GLOBAL_RLS
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
@@ -379,7 +395,7 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB_E4 12
+#define PIS_PER_SB 12
#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
@@ -700,6 +716,13 @@ enum mf_mode {
MAX_MF_MODE
};
+/* Per protocol packet duplication enable bit vector. If set, duplicate
+ * offloaded traffic to LL2 debug queueu.
+ */
+struct offload_pkt_dup_enable {
+ __le16 enable_vector;
+};
+
/* Per-protocol connection types */
enum protocol_type {
PROTOCOLID_TCP_ULP,
@@ -717,6 +740,12 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+/* Pstorm packet duplication config */
+struct pstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved[3];
+};
+
struct regpair {
__le32 lo;
__le32 hi;
@@ -728,10 +757,24 @@ struct rdma_eqe_destroy_qp {
u8 reserved[4];
};
+/* RoCE Suspend Event Data */
+struct rdma_eqe_suspend_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
/* RDMA Event Data Union */
union rdma_eqe_data {
struct regpair async_handle;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+ struct rdma_eqe_suspend_qp rdma_suspend_qp_data;
+};
+
+/* Tstorm packet duplication config */
+struct tstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved;
+ __le32 cid;
};
struct tstorm_queue_zone {
@@ -891,6 +934,15 @@ struct db_legacy_addr {
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
+/* Structure for doorbell address, in legacy mode, without DEMS */
+struct db_legacy_wo_dems_addr {
+ __le32 addr;
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_MASK 0x3FFFFFFF
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_SHIFT 2
+};
+
/* Structure for doorbell address, in PWM mode */
struct db_pwm_addr {
__le32 addr;
@@ -907,6 +959,31 @@ struct db_pwm_addr {
};
/* Parameters to RDMA firmware, passed in EDPM doorbell */
+struct db_rdma_24b_icid_dpm_params {
+ __le32 params;
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT 16
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK 0x7
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_SHIFT 24
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT 27
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+};
+
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
struct db_rdma_dpm_params {
__le32 params;
#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
@@ -1220,21 +1297,41 @@ struct rdif_task_context {
__le32 reserved2;
};
+/* Searcher Table struct */
+struct src_entry_header {
+ __le32 flags;
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_MASK 0x1
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_SHIFT 0
+#define SRC_ENTRY_HEADER_EMPTY_MASK 0x1
+#define SRC_ENTRY_HEADER_EMPTY_SHIFT 1
+#define SRC_ENTRY_HEADER_RESERVED_MASK 0x3FFFFFFF
+#define SRC_ENTRY_HEADER_RESERVED_SHIFT 2
+ __le32 magic_number;
+ struct regpair next_ptr;
+};
+
+/* Enumeration for address type */
+enum src_header_next_ptr_type_enum {
+ e_physical_addr,
+ e_logical_addr,
+ MAX_SRC_HEADER_NEXT_PTR_TYPE_ENUM
+};
+
/* Status block structure */
-struct status_block_e4 {
- __le16 pi_array[PIS_PER_SB_E4];
+struct status_block {
+ __le16 pi_array[PIS_PER_SB];
__le32 sb_num;
-#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
/* Tdif context */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cd1207ad4ada..c84e08bc6802 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -67,6 +67,7 @@
/* Ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
#define ETH_RSS_KEY_SIZE_REGS 10
#define ETH_RSS_ENGINE_NUM_K2 207
#define ETH_RSS_ENGINE_NUM_BB 127
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 68eda1c21cde..7ba0abc867f1 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -150,49 +150,49 @@ struct ystorm_fcoe_task_st_ctx {
u8 reserved2[8];
};
-struct e4_ystorm_fcoe_task_ag_ctx {
+struct ystorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -206,73 +206,73 @@ struct e4_ystorm_fcoe_task_ag_ctx {
__le32 reg2;
};
-struct e4_tstorm_fcoe_task_ag_ctx {
+struct tstorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
u8 flags1;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
u8 flags3;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 cleanup_state;
__le16 last_sent_tid;
__le32 rec_rr_tov_exp_timeout;
@@ -352,49 +352,49 @@ struct tstorm_fcoe_task_st_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
};
-struct e4_mstorm_fcoe_task_ag_ctx {
+struct mstorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 cleanup_state;
__le32 received_bytes;
u8 byte3;
@@ -440,56 +440,56 @@ struct mstorm_fcoe_task_st_ctx {
struct scsi_cached_sges data_desc;
};
-struct e4_ustorm_fcoe_task_ag_ctx {
+struct ustorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 global_cq_num;
@@ -499,18 +499,18 @@ struct e4_ustorm_fcoe_task_ag_ctx {
};
/* FCoE task context */
-struct e4_fcoe_task_context {
+struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
- struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
- struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+ struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+ struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
struct tstorm_fcoe_task_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_task_st_ctx mstorm_st_context;
- struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+ struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
struct rdif_task_context rdif_context;
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 157019f716f1..1a60285a01e3 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -714,49 +714,49 @@ struct ystorm_iscsi_task_st_ctx {
union iscsi_task_hdr pdu_hdr;
};
-struct e4_ystorm_iscsi_task_ag_ctx {
+struct ystorm_iscsi_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 TTT;
u8 byte3;
@@ -764,49 +764,49 @@ struct e4_ystorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_mstorm_iscsi_task_ag_ctx {
+struct mstorm_iscsi_task_ag_ctx {
u8 cdu_validation;
u8 byte1;
__le16 task_cid;
u8 flags0;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
u8 flags1;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -814,56 +814,56 @@ struct e4_mstorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_ustorm_iscsi_task_ag_ctx {
+struct ustorm_iscsi_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
u8 flags1;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 rcv_cont_len;
@@ -952,14 +952,14 @@ struct ustorm_iscsi_task_st_ctx {
};
/* iscsi task context */
-struct e4_iscsi_task_context {
+struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
- struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
struct regpair mstorm_ag_padding[2];
- struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
struct mstorm_iscsi_task_st_ctx mstorm_st_context;
struct ustorm_iscsi_task_st_ctx ustorm_st_context;
struct rdif_task_context rdif_context;
@@ -1431,73 +1431,73 @@ struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_tcp_pkt_cnt;
};
-struct e4_tstorm_iscsi_task_ag_ctx {
+struct tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h
index 5a2ab0606308..cc7c7481a0e0 100644
--- a/include/linux/qed/nvmetcp_common.h
+++ b/include/linux/qed/nvmetcp_common.h
@@ -410,7 +410,7 @@ struct e5_ystorm_nvmetcp_task_ag_ctx {
u8 byte2;
u8 byte3;
u8 byte4;
- u8 e4_reserved7;
+ u8 reserved7;
};
struct e5_mstorm_nvmetcp_task_ag_ctx {
@@ -445,7 +445,7 @@ struct e5_mstorm_nvmetcp_task_ag_ctx {
u8 byte2;
u8 byte3;
u8 byte4;
- u8 e4_reserved7;
+ u8 reserved7;
};
struct e5_ustorm_nvmetcp_task_ag_ctx {
@@ -489,17 +489,17 @@ struct e5_ustorm_nvmetcp_task_ag_ctx {
#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
u8 flags3;
u8 flags4;
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_MASK 0x3
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_SHIFT 0
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_MASK 0x1
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_SHIFT 2
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_MASK 0x1
-#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_SHIFT 3
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
u8 byte2;
u8 byte3;
- u8 e4_reserved8;
+ u8 reserved8;
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 rcv_cont_len;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index f34dbd0db795..a84063492c71 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -268,14 +268,15 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
}
/**
- * @brief qed_chain_advance_page -
+ * qed_chain_advance_page(): Advance the next element across pages for a
+ * linked chain.
*
- * Advance the next element across pages for a linked chain
+ * @p_chain: P_chain.
+ * @p_next_elem: P_next_elem.
+ * @idx_to_inc: Idx_to_inc.
+ * @page_to_inc: page_to_inc.
*
- * @param p_chain
- * @param p_next_elem
- * @param idx_to_inc
- * @param page_to_inc
+ * Return: Void.
*/
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
@@ -336,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain,
} while (0)
/**
- * @brief qed_chain_return_produced -
+ * qed_chain_return_produced(): A chain in which the driver "Produces"
+ * elements should use this API
+ * to indicate previous produced elements
+ * are now consumed.
*
- * A chain in which the driver "Produces" elements should use this API
- * to indicate previous produced elements are now consumed.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
{
@@ -353,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_produce -
+ * qed_chain_produce(): A chain in which the driver "Produces"
+ * elements should use this to get a pointer to
+ * the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for
+ * new element.
*
- * A chain in which the driver "Produces" elements should use this to get
- * a pointer to the next element which can be "Produced". It's driver
- * responsibility to validate that the chain has room for new element.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to next element
+ * Return: void*, a pointer to next element.
*/
static inline void *qed_chain_produce(struct qed_chain *p_chain)
{
@@ -395,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_capacity -
- *
- * Get the maximum number of BDs in chain
+ * qed_chain_get_capacity(): Get the maximum number of BDs in chain
*
- * @param p_chain
- * @param num
+ * @p_chain: Chain.
*
- * @return number of unusable BDs
+ * Return: number of unusable BDs.
*/
static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
{
@@ -410,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_recycle_consumed -
+ * qed_chain_recycle_consumed(): Returns an element which was
+ * previously consumed;
+ * Increments producers so they could
+ * be written to FW.
*
- * Returns an element which was previously consumed;
- * Increments producers so they could be written to FW.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
{
@@ -427,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_consume -
+ * qed_chain_consume(): A Chain in which the driver utilizes data written
+ * by a different source (i.e., FW) should use this to
+ * access passed buffers.
*
- * A Chain in which the driver utilizes data written by a different source
- * (i.e., FW) should use this to access passed buffers.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to the next buffer written
+ * Return: void*, a pointer to the next buffer written.
*/
static inline void *qed_chain_consume(struct qed_chain *p_chain)
{
@@ -468,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_reset - Resets the chain to its start state
+ * qed_chain_reset(): Resets the chain to its start state.
+ *
+ * @p_chain: pointer to a previously allocated chain.
*
- * @param p_chain pointer to a previously allocated chain
+ * Return Void.
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
@@ -519,13 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_last_elem -
+ * qed_chain_get_last_elem(): Returns a pointer to the last element of the
+ * chain.
*
- * Returns a pointer to the last element of the chain
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*
+ * Return: void*.
*/
static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
{
@@ -563,10 +565,13 @@ out:
}
/**
- * @brief qed_chain_set_prod - sets the prod to the given value
+ * qed_chain_set_prod(): sets the prod to the given value.
+ *
+ * @p_chain: Chain.
+ * @prod_idx: Prod Idx.
+ * @p_prod_elem: Prod elem.
*
- * @param prod_idx
- * @param p_prod_elem
+ * Return Void.
*/
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
@@ -610,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain,
}
/**
- * @brief qed_chain_pbl_zero_mem - set chain memory to 0
+ * qed_chain_pbl_zero_mem(): set chain memory to 0.
+ *
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
{
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 4df0bf0a0864..e1bf3219b4e6 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -331,7 +331,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev,
enum qed_filter_config_mode mode);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
- int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*req_bulletin_update_mac)(struct qed_dev *cdev, const u8 *mac);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 850b98991670..0dae7fcc5ef2 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -24,6 +24,9 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <net/devlink.h>
+#define QED_TX_SWS_TIMER_DFLT 500
+#define QED_TWO_MSL_TIMER_DFLT 4000
+
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
DCBX_PROTOCOL_FCOE,
@@ -588,7 +591,7 @@ enum qed_int_mode {
};
struct qed_sb_info {
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
@@ -613,7 +616,6 @@ enum qed_hw_err_type {
enum qed_dev_type {
QED_DEV_TYPE_BB,
QED_DEV_TYPE_AH,
- QED_DEV_TYPE_E5,
};
struct qed_dev_info {
@@ -819,47 +821,47 @@ struct qed_common_cb_ops {
struct qed_selftest_ops {
/**
- * @brief selftest_interrupt - Perform interrupt test
+ * selftest_interrupt(): Perform interrupt test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_interrupt)(struct qed_dev *cdev);
/**
- * @brief selftest_memory - Perform memory test
+ * selftest_memory(): Perform memory test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_memory)(struct qed_dev *cdev);
/**
- * @brief selftest_register - Perform register test
+ * selftest_register(): Perform register test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_register)(struct qed_dev *cdev);
/**
- * @brief selftest_clock - Perform clock test
+ * selftest_clock(): Perform clock test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
/**
- * @brief selftest_nvram - Perform nvram test
+ * selftest_nvram(): Perform nvram test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_nvram) (struct qed_dev *cdev);
};
@@ -927,47 +929,53 @@ struct qed_common_ops {
enum qed_hw_err_type err_type);
/**
- * @brief can_link_change - can the instance change the link or not
+ * can_link_change(): can the instance change the link or not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return true if link-change is allowed, false otherwise.
+ * Return: true if link-change is allowed, false otherwise.
*/
bool (*can_link_change)(struct qed_dev *cdev);
/**
- * @brief set_link - set links according to params
+ * set_link(): set links according to params.
*
- * @param cdev
- * @param params - values used to override the default link configuration
+ * @cdev: Qed dev pointer.
+ * @params: values used to override the default link configuration.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_link)(struct qed_dev *cdev,
struct qed_link_params *params);
/**
- * @brief get_link - returns the current link state.
+ * get_link(): returns the current link state.
+ *
+ * @cdev: Qed dev pointer.
+ * @if_link: structure to be filled with current link configuration.
*
- * @param cdev
- * @param if_link - structure to be filled with current link configuration.
+ * Return: Void.
*/
void (*get_link)(struct qed_dev *cdev,
struct qed_link_output *if_link);
/**
- * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ * drain(): drains chip in case Tx completions fail to arrive due to pause.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
*/
int (*drain)(struct qed_dev *cdev);
/**
- * @brief update_msglvl - update module debug level
+ * update_msglvl(): update module debug level.
+ *
+ * @cdev: Qed dev pointer.
+ * @dp_module: Debug module.
+ * @dp_level: Debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * Return: Void.
*/
void (*update_msglvl)(struct qed_dev *cdev,
u32 dp_module,
@@ -981,70 +989,73 @@ struct qed_common_ops {
struct qed_chain *p_chain);
/**
- * @brief nvm_flash - Flash nvm data.
+ * nvm_flash(): Flash nvm data.
*
- * @param cdev
- * @param name - file containing the data
+ * @cdev: Qed dev pointer.
+ * @name: file containing the data.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_flash)(struct qed_dev *cdev, const char *name);
/**
- * @brief nvm_get_image - reads an entire image from nvram
+ * nvm_get_image(): reads an entire image from nvram.
*
- * @param cdev
- * @param type - type of the request nvram image
- * @param buf - preallocated buffer to fill with the image
- * @param len - length of the allocated buffer
+ * @cdev: Qed dev pointer.
+ * @type: type of the request nvram image.
+ * @buf: preallocated buffer to fill with the image.
+ * @len: length of the allocated buffer.
*
- * @return 0 on success, error otherwise
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_get_image)(struct qed_dev *cdev,
enum qed_nvm_images type, u8 *buf, u16 len);
/**
- * @brief set_coalesce - Configure Rx coalesce value in usec
+ * set_coalesce(): Configure Rx coalesce value in usec.
*
- * @param cdev
- * @param rx_coal - Rx coalesce value in usec
- * @param tx_coal - Tx coalesce value in usec
- * @param qid - Queue index
- * @param sb_id - Status Block Id
+ * @cdev: Qed dev pointer.
+ * @rx_coal: Rx coalesce value in usec.
+ * @tx_coal: Tx coalesce value in usec.
+ * @handle: Handle.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_coalesce)(struct qed_dev *cdev,
u16 rx_coal, u16 tx_coal, void *handle);
/**
- * @brief set_led - Configure LED mode
+ * set_led() - Configure LED mode.
*
- * @param cdev
- * @param mode - LED mode
+ * @cdev: Qed dev pointer.
+ * @mode: LED mode.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
/**
- * @brief attn_clr_enable - Prevent attentions from being reasserted
+ * attn_clr_enable(): Prevent attentions from being reasserted.
*
- * @param cdev
- * @param clr_enable
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable.
+ *
+ * Return: Void.
*/
void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Dddress of where db_data is stored.
+ * @db_width: Doorbell is 32b or 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_is_32b - doorbell is 32b pr 64b
- * @param db_is_user - doorbell recovery addresses are user or kernel space
+ * Return: Int.
*/
int (*db_recovery_add)(struct qed_dev *cdev,
void __iomem *db_addr,
@@ -1053,114 +1064,130 @@ struct qed_common_ops {
enum qed_db_rec_space db_space);
/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
+ * db_recovery_del(): remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
- * entry to delete.
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address where db_data is stored. Serves as key for the
+ * entry to delete.
+ *
+ * Return: Int.
*/
int (*db_recovery_del)(struct qed_dev *cdev,
void __iomem *db_addr, void *db_data);
/**
- * @brief recovery_process - Trigger a recovery process
+ * recovery_process(): Trigger a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_process)(struct qed_dev *cdev);
/**
- * @brief recovery_prolog - Execute the prolog operations of a recovery process
+ * recovery_prolog(): Execute the prolog operations of a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_prolog)(struct qed_dev *cdev);
/**
- * @brief update_drv_state - API to inform the change in the driver state.
+ * update_drv_state(): API to inform the change in the driver state.
*
- * @param cdev
- * @param active
+ * @cdev: Qed dev pointer.
+ * @active: Active
*
+ * Return: Int.
*/
int (*update_drv_state)(struct qed_dev *cdev, bool active);
/**
- * @brief update_mac - API to inform the change in the mac address
+ * update_mac(): API to inform the change in the mac address.
*
- * @param cdev
- * @param mac
+ * @cdev: Qed dev pointer.
+ * @mac: MAC.
*
+ * Return: Int.
*/
- int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
/**
- * @brief update_mtu - API to inform the change in the mtu
+ * update_mtu(): API to inform the change in the mtu.
*
- * @param cdev
- * @param mtu
+ * @cdev: Qed dev pointer.
+ * @mtu: MTU.
*
+ * Return: Int.
*/
int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
/**
- * @brief update_wol - update of changes in the WoL configuration
+ * update_wol(): Update of changes in the WoL configuration.
*
- * @param cdev
- * @param enabled - true iff WoL should be enabled.
+ * @cdev: Qed dev pointer.
+ * @enabled: true iff WoL should be enabled.
+ *
+ * Return: Int.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
/**
- * @brief read_module_eeprom
+ * read_module_eeprom(): Read EEPROM.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: buffer.
+ * @dev_addr: PHY device memory region.
+ * @offset: offset into eeprom contents to be read.
+ * @len: buffer length, i.e., max bytes to be read.
*
- * @param cdev
- * @param buf - buffer
- * @param dev_addr - PHY device memory region
- * @param offset - offset into eeprom contents to be read
- * @param len - buffer length, i.e., max bytes to be read
+ * Return: Int.
*/
int (*read_module_eeprom)(struct qed_dev *cdev,
char *buf, u8 dev_addr, u32 offset, u32 len);
/**
- * @brief get_affin_hwfn_idx
+ * get_affin_hwfn_idx(): Get affine HW function.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: u8.
*/
u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param buf - buffer
- * @param cmd - NVM CFG command id
- * @param entity_id - Entity id
+ * read_nvm_cfg(): Read NVM config attribute value.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: Buffer.
+ * @cmd: NVM CFG command id.
+ * @entity_id: Entity id.
*
+ * Return: Int.
*/
int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
u32 entity_id);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param cmd - NVM CFG command id
+ * read_nvm_cfg_len(): Read NVM config attribute value.
*
- * @return config id length, 0 on error.
+ * @cdev: Qed dev pointer.
+ * @cmd: NVM CFG command id.
+ *
+ * Return: config id length, 0 on error.
*/
int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
/**
- * @brief set_grc_config - Configure value for grc config id.
- * @param cdev
- * @param cfg_id - grc config id
- * @param val - grc config value
+ * set_grc_config(): Configure value for grc config id.
+ *
+ * @cdev: Qed dev pointer.
+ * @cfg_id: grc config id
+ * @val: grc config value
*
+ * Return: Int.
*/
int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
@@ -1386,7 +1413,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
u16 rc = 0;
prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
- STATUS_BLOCK_E4_PROD_INDEX_MASK;
+ STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= QED_SB_IDX;
@@ -1397,18 +1424,16 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
}
/**
+ * qed_sb_ack(): This function creates an update command for interrupts
+ * that is written to the IGU.
*
- * @brief This function creates an update command for interrupts that is
- * written to the IGU.
- *
- * @param sb_info - This is the structure allocated and
- * initialized per status block. Assumption is
- * that it was initialized using qed_sb_init
- * @param int_cmd - Enable/Disable/Nop
- * @param upd_flg - whether igu consumer should be
- * updated.
+ * @sb_info: This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using qed_sb_init
+ * @int_cmd: Enable/Disable/Nop
+ * @upd_flg: Whether igu consumer should be updated.
*
- * @return inline void
+ * Return: inline void.
*/
static inline void qed_sb_ack(struct qed_sb_info *sb_info,
enum igu_int_cmd int_cmd,
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 04180d9af560..494cdc3cd840 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -182,7 +182,7 @@ struct qed_iscsi_cb_ops {
* @param stats - pointer to struck that would be filled
* we stats
* @return 0 on success, error otherwise.
- * @change_mac Change MAC of interface
+ * @change_mac: Change MAC of interface
* @param cdev
* @param handle - the connection handle.
* @param mac - new MAC to configure.
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index ff808d248883..5b67cd03276e 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -208,57 +208,57 @@ enum qed_ll2_xmit_flags {
struct qed_ll2_ops {
/**
- * @brief start - initializes ll2
+ * start(): Initializes ll2.
*
- * @param cdev
- * @param params - protocol driver configuration for the ll2.
+ * @cdev: Qed dev pointer.
+ * @params: Protocol driver configuration for the ll2.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
/**
- * @brief stop - stops the ll2
+ * stop(): Stops the ll2
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*stop)(struct qed_dev *cdev);
/**
- * @brief start_xmit - transmits an skb over the ll2 interface
+ * start_xmit(): Transmits an skb over the ll2 interface
*
- * @param cdev
- * @param skb
- * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags.
+ * @cdev: Qed dev pointer.
+ * @skb: SKB.
+ * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
unsigned long xmit_flags);
/**
- * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * register_cb_ops(): Protocol driver register the callback for Rx/Tx
* packets. Should be called before `start'.
*
- * @param cdev
- * @param cookie - to be passed to the callback functions.
- * @param ops - the callback functions to register for Rx / Tx.
+ * @cdev: Qed dev pointer.
+ * @cookie: to be passed to the callback functions.
+ * @ops: the callback functions to register for Rx / Tx.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
void (*register_cb_ops)(struct qed_dev *cdev,
const struct qed_ll2_cb_ops *ops,
void *cookie);
/**
- * @brief get LL2 related statistics
+ * get_stats(): Get LL2 related statistics.
*
- * @param cdev
- * @param stats - pointer to struct that would be filled with stats
+ * @cdev: Qed dev pointer.
+ * @stats: Pointer to struct that would be filled with stats.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
};
diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h
index 14671bc19ed1..1d51df347560 100644
--- a/include/linux/qed/qed_nvmetcp_if.h
+++ b/include/linux/qed/qed_nvmetcp_if.h
@@ -171,6 +171,23 @@ struct nvmetcp_task_params {
* @param dest_port
* @clear_all_filters: Clear all filters.
* @param cdev
+ * @init_read_io: Init read IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_write_io: Init write IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_icreq_exchange: Exchange ICReq.
+ * @task_params
+ * @init_conn_req_pdu_hdr
+ * @tx_sgl_task_params
+ * @rx_sgl_task_params
+ * @init_task_cleanup: Init task cleanup.
+ * @task_params
*/
struct qed_nvmetcp_ops {
const struct qed_common_ops *common;
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index aeb242cefebf..3b76c07fbcf8 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -662,7 +662,8 @@ struct qed_rdma_ops {
u8 connection_handle,
struct qed_ll2_stats *p_stats);
int (*ll2_set_mac_filter)(struct qed_dev *cdev,
- u8 *old_mac_address, u8 *new_mac_address);
+ u8 *old_mac_address,
+ const u8 *new_mac_address);
int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset);
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index bab078b25834..6dfed163ab6c 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -27,6 +27,7 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_MAX_XRC_SRQS (1024)
#define RDMA_MAX_SRQS (32 * 1024)
+#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e12b524426b0..c1a927ddec64 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1471,6 +1471,7 @@ struct task_struct {
mce_whole_page : 1,
__mce_reserved : 62;
struct callback_head mce_kill_me;
+ int mce_count;
#endif
#ifdef CONFIG_KRETPROBES
@@ -1719,7 +1720,7 @@ extern struct pid *cad_pid;
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-static inline bool is_percpu_thread(void)
+static __always_inline bool is_percpu_thread(void)
{
#ifdef CONFIG_SMP
return (current->flags & PF_NO_SETAFFINITY) &&
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
index 21c3771e6a56..988528b5da43 100644
--- a/include/linux/secretmem.h
+++ b/include/linux/secretmem.h
@@ -23,7 +23,7 @@ static inline bool page_is_secretmem(struct page *page)
mapping = (struct address_space *)
((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
- if (mapping != page->mapping)
+ if (!mapping || mapping != page->mapping)
return false;
return mapping->a_ops == &secretmem_aops;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 841e2f0f5240..cb96f1e6460c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -652,6 +652,7 @@ typedef unsigned char *sk_buff_data_t;
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @redirected: packet was redirected by packet classifier
* @from_ingress: packet was redirected from the ingress path
+ * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
@@ -868,6 +869,9 @@ struct sk_buff {
#ifdef CONFIG_NET_REDIRECT
__u8 from_ingress:1;
#endif
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ __u8 nf_skip_egress:1;
+#endif
#ifdef CONFIG_TLS_DEVICE
__u8 decrypted:1;
#endif
diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h
index fa1d6af0164e..d683251a0b40 100644
--- a/include/linux/soc/marvell/octeontx2/asm.h
+++ b/include/linux/soc/marvell/octeontx2/asm.h
@@ -5,6 +5,7 @@
#ifndef __SOC_OTX2_ASM_H
#define __SOC_OTX2_ASM_H
+#include <linux/types.h>
#if defined(CONFIG_ARM64)
/*
* otx2_lmt_flush is used for LMT store operation.
@@ -34,9 +35,23 @@
: [rf] "+r"(val) \
: [rs] "r"(addr)); \
})
+
+static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ asm volatile (".cpu generic+lse\n"
+ "ldadda %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
#else
#define otx2_lmt_flush(ioaddr) ({ 0; })
#define cn10k_lmt_flush(val, addr) ({ addr = val; })
+#define otx2_atomic64_fetch_add(incr, ptr) ({ incr; })
#endif
#endif /* __SOC_OTX2_ASM_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 8371bca13729..6b0b686f6f90 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -531,6 +531,9 @@ struct spi_controller {
/* I/O mutex */
struct mutex io_mutex;
+ /* Used to avoid adding the same CS twice */
+ struct mutex add_lock;
+
/* lock and mutex for SPI bus locking */
spinlock_t bus_lock_spinlock;
struct mutex bus_lock_mutex;
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
index a9f9c5714e65..fe95f0922526 100644
--- a/include/linux/trace_recursion.h
+++ b/include/linux/trace_recursion.h
@@ -16,23 +16,8 @@
* When function tracing occurs, the following steps are made:
* If arch does not support a ftrace feature:
* call internal function (uses INTERNAL bits) which calls...
- * If callback is registered to the "global" list, the list
- * function is called and recursion checks the GLOBAL bits.
- * then this function calls...
* The function callback, which can use the FTRACE bits to
* check for recursion.
- *
- * Now if the arch does not support a feature, and it calls
- * the global list function which calls the ftrace callback
- * all three of these steps will do a recursion protection.
- * There's no reason to do one if the previous caller already
- * did. The recursion that we are protecting against will
- * go through the same steps again.
- *
- * To prevent the multiple recursion checks, if a recursion
- * bit is set that is higher than the MAX bit of the current
- * check, then we know that the check was made by the previous
- * caller, and we can skip the current check.
*/
enum {
/* Function recursion bits */
@@ -40,12 +25,14 @@ enum {
TRACE_FTRACE_NMI_BIT,
TRACE_FTRACE_IRQ_BIT,
TRACE_FTRACE_SIRQ_BIT,
+ TRACE_FTRACE_TRANSITION_BIT,
- /* INTERNAL_BITs must be greater than FTRACE_BITs */
+ /* Internal use recursion bits */
TRACE_INTERNAL_BIT,
TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT,
+ TRACE_INTERNAL_TRANSITION_BIT,
TRACE_BRANCH_BIT,
/*
@@ -86,12 +73,6 @@ enum {
*/
TRACE_GRAPH_NOTRACE_BIT,
- /*
- * When transitioning between context, the preempt_count() may
- * not be correct. Allow for a single recursion to cover this case.
- */
- TRACE_TRANSITION_BIT,
-
/* Used to prevent recursion recording from recursing. */
TRACE_RECORD_RECURSION_BIT,
};
@@ -113,12 +94,10 @@ enum {
#define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
-#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_LIST_START TRACE_INTERNAL_BIT
-#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
-#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
+#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
/*
* Used for setting context
@@ -132,6 +111,7 @@ enum {
TRACE_CTX_IRQ,
TRACE_CTX_SOFTIRQ,
TRACE_CTX_NORMAL,
+ TRACE_CTX_TRANSITION,
};
static __always_inline int trace_get_context_bit(void)
@@ -160,45 +140,34 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
#endif
static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
- int start, int max)
+ int start)
{
unsigned int val = READ_ONCE(current->trace_recursion);
int bit;
- /* A previous recursion check was made */
- if ((val & TRACE_CONTEXT_MASK) > max)
- return 0;
-
bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit))) {
/*
* It could be that preempt_count has not been updated during
* a switch between contexts. Allow for a single recursion.
*/
- bit = TRACE_TRANSITION_BIT;
+ bit = TRACE_CTX_TRANSITION + start;
if (val & (1 << bit)) {
do_ftrace_record_recursion(ip, pip);
return -1;
}
- } else {
- /* Normal check passed, clear the transition to allow it again */
- val &= ~(1 << TRACE_TRANSITION_BIT);
}
val |= 1 << bit;
current->trace_recursion = val;
barrier();
- return bit + 1;
+ return bit;
}
static __always_inline void trace_clear_recursion(int bit)
{
- if (!bit)
- return;
-
barrier();
- bit--;
trace_recursion_clear(bit);
}
@@ -214,7 +183,7 @@ static __always_inline void trace_clear_recursion(int bit)
static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
unsigned long parent_ip)
{
- return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX);
+ return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
}
/**
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 3e80c4bc66f7..2564b7434b4d 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -197,6 +197,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
mem_cgroup_handle_over_high();
blkcg_maybe_throttle_current();
+
+ rseq_handle_notify_resume(NULL, regs);
}
/*
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index e81856c0ba13..e8ec116c916b 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -83,6 +83,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return local64_read(&p->v);
}
+static inline void u64_stats_set(u64_stats_t *p, u64 val)
+{
+ local64_set(&p->v, val);
+}
+
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
{
local64_add(val, &p->v);
@@ -104,6 +109,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return p->v;
}
+static inline void u64_stats_set(u64_stats_t *p, u64 val)
+{
+ p->v = val;
+}
+
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
{
p->v += val;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 5265024e8b90..207101a9c5c3 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -27,6 +27,12 @@ enum iter_type {
ITER_DISCARD,
};
+struct iov_iter_state {
+ size_t iov_offset;
+ size_t count;
+ unsigned long nr_segs;
+};
+
struct iov_iter {
u8 iter_type;
bool data_source;
@@ -47,7 +53,6 @@ struct iov_iter {
};
loff_t xarray_start;
};
- size_t truncated;
};
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
@@ -55,6 +60,14 @@ static inline enum iter_type iov_iter_type(const struct iov_iter *i)
return i->iter_type;
}
+static inline void iov_iter_save_state(struct iov_iter *iter,
+ struct iov_iter_state *state)
+{
+ state->iov_offset = iter->iov_offset;
+ state->count = iter->count;
+ state->nr_segs = iter->nr_segs;
+}
+
static inline bool iter_is_iovec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_IOVEC;
@@ -233,6 +246,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
@@ -255,10 +269,8 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
* conversion in assignement is by definition greater than all
* values of size_t, including old i->count.
*/
- if (i->count > count) {
- i->truncated += i->count - count;
+ if (i->count > count)
i->count = count;
- }
}
/*
@@ -267,7 +279,6 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
*/
static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
- i->truncated -= count - i->count;
i->count = count;
}
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 548a028f2dab..2c1fc9212cf2 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -124,6 +124,7 @@ struct usb_hcd {
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
+#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -134,6 +135,7 @@ struct usb_hcd {
#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
/*
* Specifies if interfaces are authorized by default
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index eb70cabe6e7f..33a4240e6a6f 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -127,6 +127,8 @@ static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type t
long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type);
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type);
bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max);
static inline void set_rlimit_ucount_max(struct user_namespace *ns,
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 2ebef6b1a3d6..74d3c1efd9bb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -399,9 +399,8 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
-struct workqueue_struct *alloc_workqueue(const char *fmt,
- unsigned int flags,
- int max_active, ...);
+__printf(1, 4) struct workqueue_struct *
+alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
diff --git a/include/net/act_api.h b/include/net/act_api.h
index f19f7f4a463c..b5b624c7e488 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -30,13 +30,13 @@ struct tc_action {
atomic_t tcfa_bindcnt;
int tcfa_action;
struct tcf_t tcfa_tm;
- struct gnet_stats_basic_packed tcfa_bstats;
- struct gnet_stats_basic_packed tcfa_bstats_hw;
+ struct gnet_stats_basic_sync tcfa_bstats;
+ struct gnet_stats_basic_sync tcfa_bstats_hw;
struct gnet_stats_queue tcfa_qstats;
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
@@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb)
{
if (likely(a->cpu_bstats)) {
- bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
return;
}
spin_lock(&a->tcfa_lock);
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 8b7eb46ad72d..03d409de61ad 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -304,7 +304,7 @@ extern spinlock_t ax25_list_lock;
void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
-ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *,
+ax25_cb *ax25_find_cb(const ax25_address *, ax25_address *, ax25_digi *,
struct net_device *);
void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
void ax25_destroy_socket(ax25_cb *);
@@ -384,10 +384,11 @@ struct ax25_linkfail {
void ax25_linkfail_register(struct ax25_linkfail *lf);
void ax25_linkfail_release(struct ax25_linkfail *lf);
-int __must_check ax25_listen_register(ax25_address *, struct net_device *);
-void ax25_listen_release(ax25_address *, struct net_device *);
+int __must_check ax25_listen_register(const ax25_address *,
+ struct net_device *);
+void ax25_listen_release(const ax25_address *, struct net_device *);
int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
-int ax25_listen_mine(ax25_address *, struct net_device *);
+int ax25_listen_mine(const ax25_address *, struct net_device *);
void ax25_link_failed(ax25_cb *, int);
int ax25_protocol_is_registered(unsigned int);
@@ -401,8 +402,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
-ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
- ax25_digi *, struct net_device *);
+ax25_cb *ax25_send_frame(struct sk_buff *, int, const ax25_address *,
+ ax25_address *, ax25_digi *, struct net_device *);
void ax25_output(ax25_cb *, int, struct sk_buff *);
void ax25_kick(ax25_cb *);
void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 9125effbf448..3271870fd85e 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -153,6 +153,30 @@ struct bt_voice {
#define BT_SCM_PKT_STATUS 0x03
+#define BT_CODEC 19
+
+struct bt_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct bt_codec {
+ __u8 id;
+ __u16 cid;
+ __u16 vid;
+ __u8 data_path;
+ __u8 num_caps;
+} __packed;
+
+struct bt_codecs {
+ __u8 num_codecs;
+ struct bt_codec codecs[];
+} __packed;
+
+#define BT_CODEC_CVSD 0x02
+#define BT_CODEC_TRANSPARENT 0x03
+#define BT_CODEC_MSBC 0x05
+
__printf(1, 2)
void bt_info(const char *fmt, ...);
__printf(1, 2)
@@ -420,6 +444,72 @@ out:
return NULL;
}
+/* Shall not be called with lock_sock held */
+static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb;
+ size_t size = min_t(size_t, len, mtu);
+ int err;
+
+ skb = bt_skb_send_alloc(sk, size + headroom + tailroom,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return ERR_PTR(err);
+
+ skb_reserve(skb, headroom);
+ skb_tailroom_reserve(skb, mtu, tailroom);
+
+ if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) {
+ kfree_skb(skb);
+ return ERR_PTR(-EFAULT);
+ }
+
+ skb->priority = sk->sk_priority;
+
+ return skb;
+}
+
+/* Similar to bt_skb_sendmsg but can split the msg into multiple fragments
+ * accourding to the MTU.
+ */
+static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb, **frag;
+
+ skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR_OR_NULL(skb))
+ return skb;
+
+ len -= skb->len;
+ if (!len)
+ return skb;
+
+ /* Add remaining data over MTU as continuation fragments */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len) {
+ struct sk_buff *tmp;
+
+ tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR(tmp)) {
+ kfree_skb(skb);
+ return tmp;
+ }
+
+ len -= tmp->len;
+
+ *frag = tmp;
+ frag = &(*frag)->next;
+ }
+
+ return skb;
+}
+
int bt_to_errno(u16 code);
void hci_sock_set_flag(struct sock *sk, int nr);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index b80415011dcd..63065bc01b76 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -330,6 +330,8 @@ enum {
HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING,
HCI_FORCE_NO_MITM,
+ HCI_QUALITY_REPORT,
+ HCI_OFFLOAD_CODECS_ENABLED,
__HCI_NUM_FLAGS,
};
@@ -871,6 +873,40 @@ struct hci_cp_logical_link_cancel {
__u8 flow_spec_id;
} __packed;
+#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
+struct hci_coding_format {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+} __packed;
+
+struct hci_cp_enhanced_setup_sync_conn {
+ __le16 handle;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ struct hci_coding_format tx_coding_format;
+ struct hci_coding_format rx_coding_format;
+ __le16 tx_codec_frame_size;
+ __le16 rx_codec_frame_size;
+ __le32 in_bandwidth;
+ __le32 out_bandwidth;
+ struct hci_coding_format in_coding_format;
+ struct hci_coding_format out_coding_format;
+ __le16 in_coded_data_size;
+ __le16 out_coded_data_size;
+ __u8 in_pcm_data_format;
+ __u8 out_pcm_data_format;
+ __u8 in_pcm_sample_payload_msb_pos;
+ __u8 out_pcm_sample_payload_msb_pos;
+ __u8 in_data_path;
+ __u8 out_data_path;
+ __u8 in_transport_unit_size;
+ __u8 out_transport_unit_size;
+ __le16 max_latency;
+ __le16 pkt_type;
+ __u8 retrans_effort;
+} __packed;
+
struct hci_rp_logical_link_cancel {
__u8 status;
__u8 phy_handle;
@@ -1250,6 +1286,14 @@ struct hci_rp_read_local_oob_ext_data {
__u8 rand256[16];
} __packed;
+#define HCI_CONFIGURE_DATA_PATH 0x0c83
+struct hci_op_configure_data_path {
+ __u8 direction;
+ __u8 data_path_id;
+ __u8 vnd_len;
+ __u8 vnd_data[];
+} __packed;
+
#define HCI_OP_READ_LOCAL_VERSION 0x1001
struct hci_rp_read_local_version {
__u8 status;
@@ -1307,6 +1351,28 @@ struct hci_rp_read_data_block_size {
} __packed;
#define HCI_OP_READ_LOCAL_CODECS 0x100b
+struct hci_std_codecs {
+ __u8 num;
+ __u8 codec[];
+} __packed;
+
+struct hci_vnd_codec {
+ /* company id */
+ __le16 cid;
+ /* vendor codec id */
+ __le16 vid;
+} __packed;
+
+struct hci_vnd_codecs {
+ __u8 num;
+ struct hci_vnd_codec codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs {
+ __u8 status;
+ struct hci_std_codecs std_codecs;
+ struct hci_vnd_codecs vnd_codecs;
+} __packed;
#define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c
struct hci_rp_read_local_pairing_opts {
@@ -1315,6 +1381,54 @@ struct hci_rp_read_local_pairing_opts {
__u8 max_key_size;
} __packed;
+#define HCI_OP_READ_LOCAL_CODECS_V2 0x100d
+struct hci_std_codec_v2 {
+ __u8 id;
+ __u8 transport;
+} __packed;
+
+struct hci_std_codecs_v2 {
+ __u8 num;
+ struct hci_std_codec_v2 codec[];
+} __packed;
+
+struct hci_vnd_codec_v2 {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+} __packed;
+
+struct hci_vnd_codecs_v2 {
+ __u8 num;
+ struct hci_vnd_codec_v2 codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs_v2 {
+ __u8 status;
+ struct hci_std_codecs_v2 std_codecs;
+ struct hci_vnd_codecs_v2 vendor_codecs;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODEC_CAPS 0x100e
+struct hci_op_read_local_codec_caps {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+ __u8 direction;
+} __packed;
+
+struct hci_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct hci_rp_read_local_codec_caps {
+ __u8 status;
+ __u8 num_caps;
+} __packed;
+
#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
struct hci_rp_read_page_scan_activity {
__u8 status;
@@ -2551,6 +2665,9 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_iso_data_len(h) ((h) & 0x3fff)
#define hci_iso_data_flags(h) ((h) >> 14)
+/* codec transport types */
+#define HCI_TRANSPORT_SCO_ESCO 0x01
+
/* le24 support */
static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
{
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index a7360c8c72f8..dd8840e70e25 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -131,6 +131,17 @@ struct bdaddr_list {
u8 bdaddr_type;
};
+struct codec_list {
+ struct list_head list;
+ u8 id;
+ __u16 cid;
+ __u16 vid;
+ u8 transport;
+ u8 num_caps;
+ u32 len;
+ struct hci_codec_caps caps[];
+};
+
struct bdaddr_list_with_irk {
struct list_head list;
bdaddr_t bdaddr;
@@ -536,6 +547,7 @@ struct hci_dev {
struct list_head pend_le_conns;
struct list_head pend_le_reports;
struct list_head blocked_keys;
+ struct list_head local_codecs;
struct hci_dev_stats stat;
@@ -605,7 +617,12 @@ struct hci_dev {
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
void (*cmd_timeout)(struct hci_dev *hdev);
- bool (*prevent_wake)(struct hci_dev *hdev);
+ bool (*wakeup)(struct hci_dev *hdev);
+ int (*set_quality_report)(struct hci_dev *hdev, bool enable);
+ int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path);
+ int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -699,6 +716,7 @@ struct hci_conn {
struct amp_mgr *amp_mgr;
struct hci_conn *link;
+ struct bt_codec codec;
void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
@@ -760,6 +778,7 @@ extern struct mutex hci_cb_list_lock;
hci_dev_clear_flag(hdev, HCI_LE_ADV); \
hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
} while (0)
/* ----- HCI interface to upper protocols ----- */
@@ -1099,13 +1118,14 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u16 conn_timeout,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
- u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role, bdaddr_t *direct_rpa);
+ u8 dst_type, bool dst_resolved, u8 sec_level,
+ u16 conn_timeout, u8 role,
+ bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting);
+ __u16 setting, struct bt_codec *codec);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
@@ -1360,6 +1380,8 @@ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
u16 scan_rsp_len, u8 *scan_rsp_data);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
+u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance);
+bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance);
void hci_adv_monitors_clear(struct hci_dev *hdev);
void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
@@ -1442,6 +1464,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
/* Use LL Privacy based address resolution if supported */
#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+/* Use enhanced synchronous connection if command is supported */
+#define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08)
+
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40))
@@ -1609,43 +1634,6 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
mutex_unlock(&hci_cb_list_lock);
}
-static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type,
- size_t *data_len)
-{
- size_t parsed = 0;
-
- if (eir_len < 2)
- return NULL;
-
- while (parsed < eir_len - 1) {
- u8 field_len = eir[0];
-
- if (field_len == 0)
- break;
-
- parsed += field_len + 1;
-
- if (parsed > eir_len)
- break;
-
- if (eir[1] != type) {
- eir += field_len + 1;
- continue;
- }
-
- /* Zero length data */
- if (field_len == 1)
- return NULL;
-
- if (data_len)
- *data_len = field_len - 1;
-
- return &eir[2];
- }
-
- return NULL;
-}
-
static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
{
if (addr_type != ADDR_LE_DEV_RANDOM)
@@ -1867,4 +1855,9 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
#define SCO_AIRMODE_CVSD 0x0000
#define SCO_AIRMODE_TRANSP 0x0003
+#define LOCAL_CODEC_ACL_MASK BIT(0)
+#define LOCAL_CODEC_SCO_MASK BIT(1)
+
+#define TRANSPORT_TYPE_MAX 0x04
+
#endif /* __HCI_CORE_H */
diff --git a/include/net/codel.h b/include/net/codel.h
index a6e428f80135..a6c9e34e62b8 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -102,6 +102,9 @@ static inline u32 codel_time_to_us(codel_time_t val)
* @interval: width of moving time window
* @mtu: device mtu, or minimal queue backlog in bytes.
* @ecn: is Explicit Congestion Notification enabled
+ * @ce_threshold_selector: apply ce_threshold to packets matching this value
+ * in the diffserv/ECN byte of the IP header
+ * @ce_threshold_mask: mask to apply to ce_threshold_selector comparison
*/
struct codel_params {
codel_time_t target;
@@ -109,6 +112,8 @@ struct codel_params {
codel_time_t interval;
u32 mtu;
bool ecn;
+ u8 ce_threshold_selector;
+ u8 ce_threshold_mask;
};
/**
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
index d289b91dcd65..137d40d8cbeb 100644
--- a/include/net/codel_impl.h
+++ b/include/net/codel_impl.h
@@ -54,6 +54,8 @@ static void codel_params_init(struct codel_params *params)
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+ params->ce_threshold_mask = 0;
+ params->ce_threshold_selector = 0;
params->ecn = false;
}
@@ -246,9 +248,19 @@ static struct sk_buff *codel_dequeue(void *ctx,
vars->rec_inv_sqrt);
}
end:
- if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
- INET_ECN_set_ce(skb))
- stats->ce_mark++;
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) {
+ bool set_ce = true;
+
+ if (params->ce_threshold_mask) {
+ int dsfield = skb_get_dsfield(skb);
+
+ set_ce = (dsfield >= 0 &&
+ (((u8)dsfield & params->ce_threshold_mask) ==
+ params->ce_threshold_selector));
+ }
+ if (set_ce && INET_ECN_set_ce(skb))
+ stats->ce_mark++;
+ }
return skb;
}
diff --git a/include/net/datalink.h b/include/net/datalink.h
index a9663229b913..d9b7faaa539f 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -12,7 +12,7 @@ struct datalink_proto {
int (*rcvfunc)(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
int (*request)(struct datalink_proto *, struct sk_buff *,
- unsigned char *);
+ const unsigned char *);
struct list_head node;
};
diff --git a/include/net/devlink.h b/include/net/devlink.h
index c902e8e5f012..da3ceeb8b87b 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -21,45 +21,7 @@
#include <linux/xarray.h>
#include <linux/firmware.h>
-#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
- (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
-
-struct devlink_dev_stats {
- u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
- u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
-};
-
-struct devlink_ops;
-
-struct devlink {
- u32 index;
- struct list_head port_list;
- struct list_head rate_list;
- struct list_head sb_list;
- struct list_head dpipe_table_list;
- struct list_head resource_list;
- struct list_head param_list;
- struct list_head region_list;
- struct list_head reporter_list;
- struct mutex reporters_lock; /* protects reporter_list */
- struct devlink_dpipe_headers *dpipe_headers;
- struct list_head trap_list;
- struct list_head trap_group_list;
- struct list_head trap_policer_list;
- const struct devlink_ops *ops;
- struct xarray snapshot_ids;
- struct devlink_dev_stats stats;
- struct device *dev;
- possible_net_t _net;
- struct mutex lock; /* Serializes access to devlink instance specific objects such as
- * port, sb, dpipe, resource, params, region, traps and more.
- */
- u8 reload_failed:1,
- reload_enabled:1;
- refcount_t refcount;
- struct completion comp;
- char priv[0] __aligned(NETDEV_ALIGN);
-};
+struct devlink;
struct devlink_port_phys_attrs {
u32 port_number; /* Same value as "split group".
@@ -1224,6 +1186,11 @@ enum devlink_trap_group_generic_id {
.min_burst = _min_burst, \
}
+enum {
+ /* device supports reload operations */
+ DEVLINK_F_RELOAD = 1UL << 0,
+};
+
struct devlink_ops {
/**
* @supported_flash_update_params:
@@ -1520,34 +1487,9 @@ struct devlink_ops {
struct netlink_ext_ack *extack);
};
-static inline void *devlink_priv(struct devlink *devlink)
-{
- BUG_ON(!devlink);
- return &devlink->priv;
-}
-
-static inline struct devlink *priv_to_devlink(void *priv)
-{
- BUG_ON(!priv);
- return container_of(priv, struct devlink, priv);
-}
-
-static inline struct devlink_port *
-netdev_to_devlink_port(struct net_device *dev)
-{
- if (dev->netdev_ops->ndo_get_devlink_port)
- return dev->netdev_ops->ndo_get_devlink_port(dev);
- return NULL;
-}
-
-static inline struct devlink *netdev_to_devlink(struct net_device *dev)
-{
- struct devlink_port *devlink_port = netdev_to_devlink_port(dev);
-
- if (devlink_port)
- return devlink_port->devlink;
- return NULL;
-}
+void *devlink_priv(struct devlink *devlink);
+struct devlink *priv_to_devlink(void *priv);
+struct device *devlink_to_dev(const struct devlink *devlink);
struct ib_device;
@@ -1566,10 +1508,9 @@ static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
{
return devlink_alloc_ns(ops, priv_size, &init_net, dev);
}
+void devlink_set_features(struct devlink *devlink, u64 features);
void devlink_register(struct devlink *devlink);
void devlink_unregister(struct devlink *devlink);
-void devlink_reload_enable(struct devlink *devlink);
-void devlink_reload_disable(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
@@ -1653,12 +1594,6 @@ void devlink_param_unregister(struct devlink *devlink,
const struct devlink_param *param);
void devlink_params_publish(struct devlink *devlink);
void devlink_params_unpublish(struct devlink *devlink);
-int devlink_port_params_register(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
-void devlink_port_params_unregister(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
union devlink_param_value *init_val);
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
diff --git a/include/net/dn.h b/include/net/dn.h
index 56ab0726c641..ba9655b0098a 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -166,7 +166,7 @@ struct dn_skb_cb {
int iif;
};
-static inline __le16 dn_eth2dn(unsigned char *ethaddr)
+static inline __le16 dn_eth2dn(const unsigned char *ethaddr)
{
return get_unaligned((__le16 *)(ethaddr + 4));
}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 258867eff230..1cd9c2461f0d 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -51,6 +51,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_SEVILLE_VALUE 21
#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
#define DSA_TAG_PROTO_SJA1110_VALUE 23
+#define DSA_TAG_PROTO_RTL8_4_VALUE 24
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -77,6 +78,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
+ DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE,
};
struct dsa_switch;
@@ -472,14 +474,41 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
}
+#define dsa_tree_for_each_user_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_switch_for_each_port(_dp, _ds) \
+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
+ list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
+ list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_available_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (!dsa_port_is_unused((_dp)))
+
+#define dsa_switch_for_each_user_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_switch_for_each_cpu_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_cpu((_dp)))
+
static inline u32 dsa_user_ports(struct dsa_switch *ds)
{
+ struct dsa_port *dp;
u32 mask = 0;
- int p;
- for (p = 0; p < ds->num_ports; p++)
- if (dsa_is_user_port(ds, p))
- mask |= BIT(p);
+ dsa_switch_for_each_user_port(dp, ds)
+ mask |= BIT(dp->index);
return mask;
}
@@ -585,8 +614,16 @@ struct dsa_switch_ops {
int (*change_tag_protocol)(struct dsa_switch *ds, int port,
enum dsa_tag_protocol proto);
+ /* Optional switch-wide initialization and destruction methods */
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
+
+ /* Per-port initialization and destruction methods. Mandatory if the
+ * driver registers devlink port regions, optional otherwise.
+ */
+ int (*port_setup)(struct dsa_switch *ds, int port);
+ void (*port_teardown)(struct dsa_switch *ds, int port);
+
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/*
@@ -637,6 +674,12 @@ struct dsa_switch_ops {
int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
+ void (*get_eth_phy_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
void (*get_stats64)(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s);
void (*self_test)(struct dsa_switch *ds, int port,
@@ -1046,6 +1089,7 @@ static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
+void dsa_switch_shutdown(struct dsa_switch *ds);
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 1424e02cef90..7aa2b8e1fb29 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -7,14 +7,17 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
-/* Note: this used to be in include/uapi/linux/gen_stats.h */
-struct gnet_stats_basic_packed {
- __u64 bytes;
- __u64 packets;
-};
-
-struct gnet_stats_basic_cpu {
- struct gnet_stats_basic_packed bstats;
+/* Throughput stats.
+ * Must be initialized beforehand with gnet_stats_basic_sync_init().
+ *
+ * If no reads can ever occur parallel to writes (e.g. stack-allocated
+ * bstats), then the internal stat values can be written to and read
+ * from directly. Otherwise, use _bstats_set/update() for writes and
+ * gnet_stats_add_basic() for reads.
+ */
+struct gnet_stats_basic_sync {
+ u64_stats_t bytes;
+ u64_stats_t packets;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
@@ -34,6 +37,7 @@ struct gnet_dump {
struct tc_stats tc_stats;
};
+void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d, int padattr);
@@ -42,41 +46,38 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d,
int padattr);
-int gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-void __gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-int gnet_stats_copy_basic_hw(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+int gnet_stats_copy_basic_hw(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
-void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
- const struct gnet_stats_queue __percpu *cpu_q,
- const struct gnet_stats_queue *q, __u32 qlen);
+void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu_q,
+ const struct gnet_stats_queue *q);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
-int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
-int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
struct gnet_stats_rate_est64 *sample);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index b06c2d02ec84..fa6a87246a7b 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -289,7 +289,7 @@ static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
{
/* The below has to be done to allow calling inet_csk_destroy_sock */
sock_set_flag(sk, SOCK_DEAD);
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(*sk->sk_prot->orphan_count);
}
void inet_csk_destroy_sock(struct sock *sk);
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index ba77f47ef61e..ea32393464a2 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -188,6 +188,23 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
return 0;
}
+static inline int skb_get_dsfield(struct sk_buff *skb)
+{
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ break;
+ return ipv4_get_dsfield(ip_hdr(skb));
+
+ case cpu_to_be16(ETH_P_IPV6):
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ break;
+ return ipv6_get_dsfield(ipv6_hdr(skb));
+ }
+
+ return -1;
+}
+
static inline int INET_ECN_set_ect1(struct sk_buff *skb)
{
switch (skb_protocol(skb, true)) {
diff --git a/include/net/ioam6.h b/include/net/ioam6.h
index 3c2993bc48c8..3f45ba37a2c6 100644
--- a/include/net/ioam6.h
+++ b/include/net/ioam6.h
@@ -56,7 +56,8 @@ static inline struct ioam6_pernet_data *ioam6_pernet(struct net *net)
struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id);
void ioam6_fill_trace_data(struct sk_buff *skb,
struct ioam6_namespace *ns,
- struct ioam6_trace_hdr *trace);
+ struct ioam6_trace_hdr *trace,
+ bool is_input);
int ioam6_init(void);
void ioam6_exit(void);
diff --git a/include/net/ip.h b/include/net/ip.h
index 9192444f2964..cf229a531194 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -291,7 +291,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
+static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+ return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 21c5386d4a6d..ab5348e57db1 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -597,5 +597,5 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
u8 rt_family, unsigned char *flags, bool skip_oif);
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
- int nh_weight, u8 rt_family);
+ int nh_weight, u8 rt_family, u32 nh_tclassid);
#endif /* _NET_FIB_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 7cb5a1aace40..ff1804a0c469 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -931,6 +931,7 @@ struct netns_ipvs {
int sysctl_conn_reuse_mode;
int sysctl_schedule_icmp;
int sysctl_ignore_tunneled;
+ int sysctl_run_estimation;
/* ip_vs_lblc */
int sysctl_lblc_expiration;
@@ -1071,6 +1072,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return ipvs->sysctl_cache_bypass;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_run_estimation;
+}
+
#else
static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1163,6 +1169,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return 0;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
#endif
/* IPVS core functions
diff --git a/include/net/llc.h b/include/net/llc.h
index df282d9b4017..fd1f9a3fd8dd 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -133,7 +133,7 @@ static inline void llc_sap_put(struct llc_sap *sap)
struct llc_sap *llc_sap_find(unsigned char sap_value);
int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
- unsigned char *dmac, unsigned char dsap);
+ const unsigned char *dmac, unsigned char dsap);
void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index 8d5c543cd620..c72570a21a4f 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,7 +62,8 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */
-int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac,
+ u8 dsap);
int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
int llc_send_disc(struct sock *sk);
#endif /* LLC_IF_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 13cad5e9e6c0..dd757f0987b0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2829,13 +2829,13 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
* when they are able to replace in-use PTK keys according to the following
* requirements:
- * 1) They do not hand over frames decrypted with the old key to
- mac80211 once the call to set_key() with command %DISABLE_KEY has been
- completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,
+ * 1) They do not hand over frames decrypted with the old key to mac80211
+ once the call to set_key() with command %DISABLE_KEY has been completed,
2) either drop or continue to use the old key for any outgoing frames queued
at the time of the key deletion (including re-transmits),
3) never send out a frame queued prior to the set_key() %SET_KEY command
- encrypted with the new key and
+ encrypted with the new key when also needing
+ @IEEE80211_KEY_FLAG_GENERATE_IV and
4) never send out a frame unencrypted when it should be encrypted.
Mac80211 will not queue any new frames for a deleted key to the driver.
*/
diff --git a/include/net/mctp.h b/include/net/mctp.h
index a824d47c3c6d..2a83443bdfac 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -54,7 +54,7 @@ struct mctp_sock {
struct sock sk;
/* bind() params */
- int bind_net;
+ unsigned int bind_net;
mctp_eid_t bind_addr;
__u8 bind_type;
@@ -62,35 +62,46 @@ struct mctp_sock {
* by sk->net->keys_lock
*/
struct hlist_head keys;
+
+ /* mechanism for expiring allocated keys; will release an allocated
+ * tag, and any netdev state for a request/response pairing
+ */
+ struct timer_list key_expiry;
};
/* Key for matching incoming packets to sockets or reassembly contexts.
* Packets are matched on (src,dest,tag).
*
- * Lifetime requirements:
+ * Lifetime / locking requirements:
+ *
+ * - individual key data (ie, the struct itself) is protected by key->lock;
+ * changes must be made with that lock held.
*
- * - keys are free()ed via RCU
+ * - the lookup fields: peer_addr, local_addr and tag are set before the
+ * key is added to lookup lists, and never updated.
+ *
+ * - A ref to the key must be held (throuh key->refs) if a pointer to the
+ * key is to be accessed after key->lock is released.
*
* - a mctp_sk_key contains a reference to a struct sock; this is valid
* for the life of the key. On sock destruction (through unhash), the key is
- * removed from lists (see below), and will not be observable after a RCU
- * grace period.
- *
- * any RX occurring within that grace period may still queue to the socket,
- * but will hit the SOCK_DEAD case before the socket is freed.
+ * removed from lists (see below), and marked invalid.
*
* - these mctp_sk_keys appear on two lists:
* 1) the struct mctp_sock->keys list
* 2) the struct netns_mctp->keys list
*
- * updates to either list are performed under the netns_mctp->keys
- * lock.
+ * presences on these lists requires a (single) refcount to be held; both
+ * lists are updated as a single operation.
+ *
+ * Updates and lookups in either list are performed under the
+ * netns_mctp->keys lock. Lookup functions will need to lock the key and
+ * take a reference before unlocking the keys_lock. Consequently, the list's
+ * keys_lock *cannot* be acquired with the individual key->lock held.
*
* - a key may have a sk_buff attached as part of an in-progress message
- * reassembly (->reasm_head). The reassembly context is protected by
- * reasm_lock, which may be acquired with the keys lock (above) held, if
- * necessary. Consequently, keys lock *cannot* be acquired with the
- * reasm_lock held.
+ * reassembly (->reasm_head). The reasm data is protected by the individual
+ * key->lock.
*
* - there are two destruction paths for a mctp_sk_key:
*
@@ -101,6 +112,8 @@ struct mctp_sock {
* the (complete) reply, or during reassembly errors. Here, we clean up
* the reassembly context (marking reasm_dead, to prevent another from
* starting), and remove the socket from the netns & socket lists.
+ *
+ * - through an expiry timeout, on a per-socket timer
*/
struct mctp_sk_key {
mctp_eid_t peer_addr;
@@ -116,14 +129,25 @@ struct mctp_sk_key {
/* per-socket list */
struct hlist_node sklist;
+ /* lock protects against concurrent updates to the reassembly and
+ * expiry data below.
+ */
+ spinlock_t lock;
+
+ /* Keys are referenced during the output path, which may sleep */
+ refcount_t refs;
+
/* incoming fragment reassembly context */
- spinlock_t reasm_lock;
struct sk_buff *reasm_head;
struct sk_buff **reasm_tailp;
bool reasm_dead;
u8 last_seq;
- struct rcu_head rcu;
+ /* key validity */
+ bool valid;
+
+ /* expiry timeout; valid (above) cleared on expiry */
+ unsigned long expiry;
};
struct mctp_skb_cb {
@@ -191,6 +215,8 @@ int mctp_do_route(struct mctp_route *rt, struct sk_buff *skb);
int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
+void mctp_key_unref(struct mctp_sk_key *key);
+
/* routing <--> device interface */
unsigned int mctp_default_net(struct net *net);
int mctp_default_net_set(struct net *net, unsigned int index);
diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h
index 71a11012fac7..3a439463f055 100644
--- a/include/net/mctpdevice.h
+++ b/include/net/mctpdevice.h
@@ -17,6 +17,8 @@
struct mctp_dev {
struct net_device *dev;
+ refcount_t refs;
+
unsigned int net;
/* Only modified under RTNL. Reads have addrs_lock held */
@@ -32,4 +34,7 @@ struct mctp_dev {
struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev);
struct mctp_dev *__mctp_dev_get(const struct net_device *dev);
+void mctp_dev_hold(struct mctp_dev *mdev);
+void mctp_dev_put(struct mctp_dev *mdev);
+
#endif /* __NET_MCTPDEVICE_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 38e4094960ce..04341d86585d 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -137,7 +137,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
u8 *opt, int opt_len,
struct ndisc_options *ndopts);
-void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
+void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
int data_len, int pad);
#define NDISC_OPS_REDIRECT_DATA_SPACE 2
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 22ced1381ede..e8e48be66755 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -144,17 +144,18 @@ struct neighbour {
struct timer_list timer;
unsigned long used;
atomic_t probes;
- __u8 flags;
- __u8 nud_state;
- __u8 type;
- __u8 dead;
+ u8 nud_state;
+ u8 type;
+ u8 dead;
u8 protocol;
+ u32 flags;
seqlock_t ha_lock;
unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
struct hh_cache hh;
int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
struct list_head gc_list;
+ struct list_head managed_list;
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
@@ -172,7 +173,7 @@ struct pneigh_entry {
struct pneigh_entry *next;
possible_net_t net;
struct net_device *dev;
- u8 flags;
+ u32 flags;
u8 protocol;
u8 key[];
};
@@ -216,11 +217,13 @@ struct neigh_table {
int gc_thresh3;
unsigned long last_flush;
struct delayed_work gc_work;
+ struct delayed_work managed_work;
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
atomic_t gc_entries;
struct list_head gc_list;
+ struct list_head managed_list;
rwlock_t lock;
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
@@ -250,12 +253,21 @@ static inline void *neighbour_priv(const struct neighbour *n)
}
/* flags for neigh_update() */
-#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
-#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
-#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004
-#define NEIGH_UPDATE_F_EXT_LEARNED 0x20000000
-#define NEIGH_UPDATE_F_ISROUTER 0x40000000
-#define NEIGH_UPDATE_F_ADMIN 0x80000000
+#define NEIGH_UPDATE_F_OVERRIDE BIT(0)
+#define NEIGH_UPDATE_F_WEAK_OVERRIDE BIT(1)
+#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER BIT(2)
+#define NEIGH_UPDATE_F_USE BIT(3)
+#define NEIGH_UPDATE_F_MANAGED BIT(4)
+#define NEIGH_UPDATE_F_EXT_LEARNED BIT(5)
+#define NEIGH_UPDATE_F_ISROUTER BIT(6)
+#define NEIGH_UPDATE_F_ADMIN BIT(7)
+
+/* In-kernel representation for NDA_FLAGS_EXT flags: */
+#define NTF_OLD_MASK 0xff
+#define NTF_EXT_SHIFT 8
+#define NTF_EXT_MASK (NTF_EXT_MANAGED)
+
+#define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT)
extern const struct nla_policy nda_policy[];
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 0fd8a4159662..ceadf8ba25a4 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -17,7 +17,6 @@ struct inet_frags_ctl;
struct nft_ct_frag6_pernet {
struct ctl_table_header *nf_frag_frags_hdr;
struct fqdir *fqdir;
- unsigned int users;
};
#endif /* _NF_DEFRAG_IPV6_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 148f5d8ee5ab..a16171c5fd9e 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1202,7 +1202,7 @@ struct nft_object *nft_obj_lookup(const struct net *net,
void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq,
- int event, int family, int report, gfp_t gfp);
+ int event, u16 flags, int family, int report, gfp_t gfp);
/**
* struct nft_object_type - stateful object type
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 832ab69efda5..4c3809e141f4 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -6,7 +6,7 @@
struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
spinlock_t lock;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 986a2a9cfdfa..b593f95e9991 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -27,5 +27,11 @@ struct netns_nf {
#if IS_ENABLED(CONFIG_DECNET)
struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+ unsigned int defrag_ipv4_users;
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ unsigned int defrag_ipv6_users;
+#endif
};
#endif
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 10e1777877e6..28085b995ddc 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -325,7 +325,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
struct fib_nh_common *nhc = &nhi->fib_nhc;
int weight = nhg->nh_entries[i].weight;
- if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
+ if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
return -EMSGSIZE;
}
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index a4082406a003..3855f069627f 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -216,24 +216,14 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
page_pool_put_full_page(pool, page, true);
}
-#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
- (sizeof(dma_addr_t) > sizeof(unsigned long))
-
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
- dma_addr_t ret = page->dma_addr;
-
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
-
- return ret;
+ return page->dma_addr;
}
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
page->dma_addr = addr;
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- page->dma_addr_upper = upper_32_bits(addr);
}
static inline void page_pool_set_frag_count(struct page *page, long nr)
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 83a6d0792180..193f88ebf629 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -765,7 +765,7 @@ struct tc_cookie {
};
struct tc_qopt_offload_stats {
- struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_basic_sync *bstats;
struct gnet_stats_queue *qstats;
};
@@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params {
};
struct tc_gred_qopt_offload_stats {
- struct gnet_stats_basic_packed bstats[MAX_DPs];
+ struct gnet_stats_basic_sync bstats[MAX_DPs];
struct gnet_stats_queue qstats[MAX_DPs];
struct red_stats *xstats[MAX_DPs];
};
@@ -977,6 +977,7 @@ enum tc_tbf_command {
TC_TBF_REPLACE,
TC_TBF_DESTROY,
TC_TBF_STATS,
+ TC_TBF_GRAFT,
};
struct tc_tbf_qopt_offload_replace_params {
@@ -992,6 +993,7 @@ struct tc_tbf_qopt_offload {
union {
struct tc_tbf_qopt_offload_replace_params replace_params;
struct tc_qopt_offload_stats stats;
+ u32 child_handle;
};
};
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6d7b12cba015..bf79f3a890af 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -11,6 +11,7 @@
#include <uapi/linux/pkt_sched.h>
#define DEFAULT_TX_QUEUE_LEN 1000
+#define STAB_SIZE_LOG_MAX 30
struct qdisc_walker {
int stop;
diff --git a/include/net/rose.h b/include/net/rose.h
index cf517d306a28..0f0a4ce0fee7 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -162,8 +162,8 @@ extern int sysctl_rose_link_fail_timeout;
extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
-int rosecmp(rose_address *, rose_address *);
-int rosecmpm(rose_address *, rose_address *, unsigned short);
+int rosecmp(const rose_address *, const rose_address *);
+int rosecmpm(const rose_address *, const rose_address *, unsigned short);
char *rose2asc(char *buf, const rose_address *);
struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
void rose_kill_by_neigh(struct rose_neigh *);
@@ -205,8 +205,8 @@ extern const struct seq_operations rose_node_seqops;
extern struct seq_operations rose_route_seqops;
void rose_add_loopback_neigh(void);
-int __must_check rose_add_loopback_node(rose_address *);
-void rose_del_loopback_node(rose_address *);
+int __must_check rose_add_loopback_node(const rose_address *);
+void rose_del_loopback_node(const rose_address *);
void rose_rt_device_down(struct net_device *);
void rose_link_device_down(struct net_device *);
struct net_device *rose_dev_first(void);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 5a011f8d394e..ada02c4a4f51 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -40,6 +40,13 @@ enum qdisc_state_t {
__QDISC_STATE_DRAINING,
};
+enum qdisc_state2_t {
+ /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
+ * Use qdisc_run_begin/end() or qdisc_is_running() instead.
+ */
+ __QDISC_STATE2_RUNNING,
+};
+
#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
@@ -97,7 +104,7 @@ struct Qdisc {
struct netdev_queue *dev_queue;
struct net_rate_estimator __rcu *rate_est;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
int pad;
refcount_t refcnt;
@@ -107,10 +114,10 @@ struct Qdisc {
*/
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
- struct gnet_stats_basic_packed bstats;
- seqcount_t running;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
unsigned long state;
+ unsigned long state2; /* must be written under qdisc spinlock */
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
@@ -143,11 +150,15 @@ static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
return NULL;
}
+/* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
+ * root_lock section, or provide their own memory barriers -- ordering
+ * against qdisc_run_begin/end() atomic bit operations.
+ */
static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK)
return spin_is_locked(&qdisc->seqlock);
- return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
+ return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
@@ -167,6 +178,9 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
return !READ_ONCE(qdisc->q.qlen);
}
+/* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
+ * the qdisc root lock acquired.
+ */
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
@@ -206,15 +220,8 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
* after it releases the lock at the end of qdisc_run_end().
*/
return spin_trylock(&qdisc->seqlock);
- } else if (qdisc_is_running(qdisc)) {
- return false;
}
- /* Variant of write_seqcount_begin() telling lockdep a trylock
- * was attempted.
- */
- raw_write_seqcount_begin(&qdisc->running);
- seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
- return true;
+ return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
@@ -226,7 +233,7 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
&qdisc->state)))
__netif_schedule(qdisc);
} else {
- write_seqcount_end(&qdisc->running);
+ __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
}
@@ -592,14 +599,6 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
-{
- struct Qdisc *root = qdisc_root_sleeping(qdisc);
-
- ASSERT_RTNL();
- return &root->running;
-}
-
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
@@ -849,14 +848,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return sch->enqueue(skb, sch, to_free);
}
-static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
__u64 bytes, __u32 packets)
{
- bstats->bytes += bytes;
- bstats->packets += packets;
+ u64_stats_update_begin(&bstats->syncp);
+ u64_stats_add(&bstats->bytes, bytes);
+ u64_stats_add(&bstats->packets, packets);
+ u64_stats_update_end(&bstats->syncp);
}
-static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
_bstats_update(bstats,
@@ -864,26 +865,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
}
-static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- __u64 bytes, __u32 packets)
-{
- u64_stats_update_begin(&bstats->syncp);
- _bstats_update(&bstats->bstats, bytes, packets);
- u64_stats_update_end(&bstats->syncp);
-}
-
-static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- const struct sk_buff *skb)
-{
- u64_stats_update_begin(&bstats->syncp);
- bstats_update(&bstats->bstats, skb);
- u64_stats_update_end(&bstats->syncp);
-}
-
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
}
static inline void qdisc_bstats_update(struct Qdisc *sch,
@@ -972,10 +957,9 @@ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
__u32 *backlog)
{
struct gnet_stats_queue qstats = { 0 };
- __u32 len = qdisc_qlen_sum(sch);
- __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
- *qlen = qstats.qlen;
+ gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
+ *qlen = qstats.qlen + qdisc_qlen(sch);
*backlog = qstats.backlog;
}
@@ -1316,7 +1300,7 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
struct mini_Qdisc {
struct tcf_proto *filter_list;
struct tcf_block *block;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
struct rcu_head rcu;
};
@@ -1324,7 +1308,7 @@ struct mini_Qdisc {
static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
}
static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 2eb6d7c2c931..f37c7a558d6d 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -384,11 +384,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
* Verification Tag value does not match the receiver's own
* tag value, the receiver shall silently discard the packet...
*/
- if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
- return 1;
+ if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
+ return 0;
chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
- return 0;
+ return 1;
}
/* Check VTAG of the packet matches the sender's own tag and the T bit is
diff --git a/include/net/sock.h b/include/net/sock.h
index 66a9a90f9558..596ba85611bc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -262,7 +262,6 @@ struct bpf_local_storage;
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
- * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@ -270,6 +269,7 @@ struct bpf_local_storage;
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
+ * @sk_reserved_mem: space reserved and non-reclaimable for the socket
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
@@ -307,6 +307,7 @@ struct bpf_local_storage;
* @sk_priority: %SO_PRIORITY setting
* @sk_type: socket type (%SOCK_STREAM, etc)
* @sk_protocol: which protocol this socket belongs in this network family
+ * @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred
* @sk_peer_pid: &struct pid for this socket's peer
* @sk_peer_cred: %SO_PEERCRED setting
* @sk_rcvlowat: %SO_RCVLOWAT setting
@@ -328,7 +329,6 @@ struct bpf_local_storage;
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
- * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
@@ -393,7 +393,6 @@ struct sock {
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
- struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@ -412,6 +411,7 @@ struct sock {
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
+ u32 sk_reserved_mem;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
/* ===== mostly read cache line ===== */
@@ -442,7 +442,6 @@ struct sock {
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
- struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
@@ -488,8 +487,10 @@ struct sock {
u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;
#endif
+ spinlock_t sk_peer_lock;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
+
long sk_rcvtimeo;
ktime_t sk_stamp;
#if BITS_PER_LONG==32
@@ -1234,7 +1235,7 @@ struct proto {
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count;
+ unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@ -1515,20 +1516,49 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
skb_pfmemalloc(skb);
}
+static inline int sk_unused_reserved_mem(const struct sock *sk)
+{
+ int unused_mem;
+
+ if (likely(!sk->sk_reserved_mem))
+ return 0;
+
+ unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+ atomic_read(&sk->sk_rmem_alloc);
+
+ return unused_mem > 0 ? unused_mem : 0;
+}
+
static inline void sk_mem_reclaim(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable >= SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable);
+}
+
+static inline void sk_mem_reclaim_final(struct sock *sk)
+{
+ sk->sk_reserved_mem = 0;
+ sk_mem_reclaim(sk);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable > SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1540,9 +1570,12 @@ static inline void sk_mem_charge(struct sock *sk, int size)
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow.
* TCP send queues can make this happen, if sk_mem_reclaim()
@@ -1551,22 +1584,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
* If we reach 2 MBytes, reclaim 1 MBytes right now, there is
* no need to hold that much forward allocation anyway.
*/
- if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+ if (unlikely(reclaimable >= 1 << 21))
__sk_mem_reclaim(sk, 1 << 20);
}
-DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize);
- if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
- !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
- skb_ext_reset(skb);
- skb_zcopy_clear(skb, true);
- sk->sk_tx_skb_cache = skb;
- return;
- }
__kfree_skb(skb);
}
@@ -1623,7 +1648,36 @@ void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+
+/**
+ * lock_sock_fast - fast version of lock_sock
+ * @sk: socket
+ *
+ * This version should be used for very small section, where process wont block
+ * return false if fast path is taken:
+ *
+ * sk_lock.slock locked, owned = 0, BH disabled
+ *
+ * return true if slow path is taken:
+ *
+ * sk_lock.slock unlocked, owned = 1, BH enabled
+ */
+static inline bool lock_sock_fast(struct sock *sk)
+{
+ /* The sk_lock has mutex_lock() semantics here. */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
+
+/* fast socket lock variant for caller already holding a [different] socket lock */
+static inline bool lock_sock_fast_nested(struct sock *sk)
+{
+ mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
/**
* unlock_sock_fast - complement of lock_sock_fast
@@ -1640,6 +1694,7 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
release_sock(sk);
__release(&sk->sk_lock.slock);
} else {
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
spin_unlock_bh(&sk->sk_lock.slock);
}
}
@@ -2355,6 +2410,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+ val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
@@ -2575,7 +2631,6 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@ -2587,12 +2642,6 @@ DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
- !sk->sk_rx_skb_cache) {
- sk->sk_rx_skb_cache = skb;
- skb_orphan(skb);
- return;
- }
__kfree_skb(skb);
}
@@ -2787,4 +2836,8 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs);
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+int sock_get_timeout(long timeo, void *optval, bool old_timeval);
+int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval);
+
#endif /* _SOCK_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3166dc15d7d6..d62467a0094f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -48,7 +48,9 @@
extern struct inet_hashinfo tcp_hashinfo;
-extern struct percpu_counter tcp_orphan_count;
+DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
+int tcp_orphan_count_sum(void);
+
void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
@@ -290,19 +292,6 @@ static inline bool tcp_out_of_memory(struct sock *sk)
void sk_forced_mem_schedule(struct sock *sk, int size);
-static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
-{
- struct percpu_counter *ocp = sk->sk_prot->orphan_count;
- int orphans = percpu_counter_read_positive(ocp);
-
- if (orphans << shift > sysctl_tcp_max_orphans) {
- orphans = percpu_counter_sum_positive(ocp);
- if (orphans << shift > sysctl_tcp_max_orphans)
- return true;
- }
- return false;
-}
-
bool tcp_check_oom(struct sock *sk, int shift);
@@ -330,8 +319,6 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
-struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
- struct page *page, int offset, size_t *size);
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
@@ -581,6 +568,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
+void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
@@ -874,10 +863,11 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
+#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
/* There is space for up to 24 bytes */
- __u32 in_flight:30,/* Bytes in flight at transmit */
- is_app_limited:1, /* cwnd not fully used? */
- unused:1;
+ __u32 is_app_limited:1, /* cwnd not fully used? */
+ delivered_ce:20,
+ unused:11;
/* pkts S/ACKed so far upon tx of skb, incl retrans: */
__u32 delivered;
/* start of send pipeline phase */
@@ -1029,7 +1019,9 @@ struct ack_sample {
struct rate_sample {
u64 prior_mstamp; /* starting timestamp for interval */
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
+ s32 delivered_ce; /* number of packets delivered w/ CE marks*/
long interval_us; /* time for tp->delivered to incr "delivered" */
u32 snd_interval_us; /* snd interval for delivered packets */
u32 rcv_interval_us; /* rcv interval for delivered packets */
@@ -1418,6 +1410,17 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+ int unused_mem = sk_unused_reserved_mem(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ if (unused_mem)
+ tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+ tcp_win_from_space(sk, unused_mem));
+}
+
void tcp_cleanup_rbuf(struct sock *sk, int copied);
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
@@ -1576,6 +1579,7 @@ struct tcp_md5sig_key {
u8 keylen;
u8 family; /* AF_INET or AF_INET6 */
u8 prefixlen;
+ u8 flags;
union tcp_md5_addr addr;
int l3index; /* set if key added with L3 scope */
u8 key[TCP_MD5SIG_MAXKEYLEN];
@@ -1621,10 +1625,10 @@ struct tcp_md5sig_pool {
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, int l3index,
+ int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, int l3index);
+ int family, u8 prefixlen, int l3index, u8 flags);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
diff --git a/include/net/tls.h b/include/net/tls.h
index be4b3e1cac46..b6d40642afdd 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -66,7 +66,7 @@
#define MAX_IV_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
-/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
+/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
*
* IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
*
@@ -74,6 +74,7 @@
* Hence b0 contains (3 - 1) = 2.
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
+#define TLS_SM4_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \
__SNMP_INC_STATS((net)->mib.tls_statistics, field)
diff --git a/include/net/xdp.h b/include/net/xdp.h
index ad5b02dcb6f4..447f9b1578f3 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -15,13 +15,13 @@
* level RX-ring queues. It is information that is specific to how
* the driver have configured a given RX-ring queue.
*
- * Each xdp_buff frame received in the driver carry a (pointer)
+ * Each xdp_buff frame received in the driver carries a (pointer)
* reference to this xdp_rxq_info structure. This provides the XDP
* data-path read-access to RX-info for both kernel and bpf-side
* (limited subset).
*
* For now, direct access is only safe while running in NAPI/softirq
- * context. Contents is read-mostly and must not be updated during
+ * context. Contents are read-mostly and must not be updated during
* driver NAPI/softirq poll.
*
* The driver usage API is a register and unregister API.
@@ -30,8 +30,8 @@
* can be attached as long as it doesn't change the underlying
* RX-ring. If the RX-ring does change significantly, the NIC driver
* naturally need to stop the RX-ring before purging and reallocating
- * memory. In that process the driver MUST call unregistor (which
- * also apply for driver shutdown and unload). The register API is
+ * memory. In that process the driver MUST call unregister (which
+ * also applies for driver shutdown and unload). The register API is
* also mandatory during RX-ring setup.
*/
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 4e295541e396..443d45951564 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -77,6 +77,12 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return xp_alloc(pool);
}
+/* Returns as many entries as possible up to max. 0 <= N <= max. */
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return xp_alloc_batch(pool, xdp, max);
+}
+
static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
return xp_can_alloc(pool, count);
@@ -89,6 +95,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
xp_free(xskb);
}
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+ xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+ xdp->data_meta = xdp->data;
+ xdp->data_end = xdp->data + size;
+}
+
static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
u64 addr)
{
@@ -212,6 +225,11 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return NULL;
}
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return 0;
+}
+
static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
return false;
@@ -221,6 +239,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+}
+
static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
u64 addr)
{
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 7a9a23e7a604..ddeefc4a1040 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -7,6 +7,7 @@
#include <linux/if_xdp.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/bpf.h>
#include <net/xdp.h>
struct xsk_buff_pool;
@@ -23,7 +24,6 @@ struct xdp_buff_xsk {
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
- bool unaligned;
u64 orig_addr;
struct list_head free_list_node;
};
@@ -67,6 +67,7 @@ struct xsk_buff_pool {
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
+ u32 chunk_shift;
u32 frame_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
@@ -81,6 +82,13 @@ struct xsk_buff_pool {
struct xdp_buff_xsk *free_heads[];
};
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
+
/* AF_XDP core. */
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem);
@@ -89,7 +97,6 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id);
void xp_destroy(struct xsk_buff_pool *pool);
-void xp_release(struct xdp_buff_xsk *xskb);
void xp_get_pool(struct xsk_buff_pool *pool);
bool xp_put_pool(struct xsk_buff_pool *pool);
void xp_clear_dev(struct xsk_buff_pool *pool);
@@ -99,12 +106,28 @@ void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);
+static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ xskb->orig_addr = addr;
+ xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
+}
+
+static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ dma_addr_t *dma_pages, u64 addr)
+{
+ xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
+ (addr & ~PAGE_MASK);
+ xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
+}
+
/* AF_XDP ZC drivers, via xdp_sock_buff.h */
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages);
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
@@ -180,4 +203,25 @@ static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
xp_unaligned_extract_offset(addr);
}
+static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
+}
+
+static inline void xp_release(struct xdp_buff_xsk *xskb)
+{
+ if (xskb->pool->unaligned)
+ xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
+}
+
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+{
+ u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+
+ offset += xskb->pool->headroom;
+ if (!xskb->pool->unaligned)
+ return xskb->orig_addr + offset;
+ return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+}
+
#endif /* XSK_BUFF_POOL_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 09a17f6e93a7..b97e142a7ca9 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -146,7 +146,6 @@ struct scsi_device {
struct scsi_vpd __rcu *vpd_pg83;
struct scsi_vpd __rcu *vpd_pg80;
struct scsi_vpd __rcu *vpd_pg89;
- unsigned char current_tag; /* current tag */
struct scsi_target *sdev_target;
blist_flags_t sdev_bflags; /* black/white flags as also found in
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index c9d849924f89..4bf62de2e00e 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -44,6 +44,7 @@ struct device;
* @regs_cinh: The cache inhibited regs
* @dpio_id: The dpio index
* @qman_version: The qman version
+ * @qman_clk: The qman clock frequency in Hz
*
* Describes the attributes and features of the DPIO object.
*/
@@ -55,6 +56,7 @@ struct dpaa2_io_desc {
void __iomem *regs_cinh;
int dpio_id;
u32 qman_version;
+ u32 qman_clk;
};
struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
@@ -129,4 +131,11 @@ int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
u32 *fcnt, u32 *bcnt);
int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid,
u32 *num);
+
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff);
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff);
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce);
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d);
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes);
#endif /* __FSL_DPAA2_IO_H */
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 06706a9fd5b1..9b872da0c246 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -89,15 +89,6 @@
/* Source PGIDs, one per physical port */
#define PGID_SRC 80
-#define IFH_TAG_TYPE_C 0
-#define IFH_TAG_TYPE_S 1
-
-#define IFH_REW_OP_NOOP 0x0
-#define IFH_REW_OP_DSCP 0x1
-#define IFH_REW_OP_ONE_STEP_PTP 0x2
-#define IFH_REW_OP_TWO_STEP_PTP 0x3
-#define IFH_REW_OP_ORIGIN_PTP 0x5
-
#define OCELOT_NUM_TC 8
#define OCELOT_SPEED_2500 0
@@ -572,9 +563,22 @@ struct ocelot_vcap_block {
int pol_lpr;
};
-struct ocelot_vlan {
- bool valid;
+struct ocelot_bridge_vlan {
u16 vid;
+ unsigned long portmask;
+ unsigned long untagged;
+ struct list_head list;
+};
+
+enum ocelot_port_tag_config {
+ /* all VLANs are egress-untagged */
+ OCELOT_PORT_TAG_DISABLED = 0,
+ /* all VLANs except the native VLAN and VID 0 are egress-tagged */
+ OCELOT_PORT_TAG_NATIVE = 1,
+ /* all VLANs except VID 0 are egress-tagged */
+ OCELOT_PORT_TAG_TRUNK_NO_VID0 = 2,
+ /* all VLANs are egress-tagged */
+ OCELOT_PORT_TAG_TRUNK = 3,
};
enum ocelot_sb {
@@ -599,14 +603,12 @@ struct ocelot_port {
bool vlan_aware;
/* VLAN that untagged frames are classified to, on ingress */
- struct ocelot_vlan pvid_vlan;
- /* The VLAN ID that will be transmitted as untagged, on egress */
- struct ocelot_vlan native_vlan;
+ const struct ocelot_bridge_vlan *pvid_vlan;
+ unsigned int ptp_skbs_in_flight;
u8 ptp_cmd;
struct sk_buff_head tx_skbs;
u8 ts_id;
- spinlock_t ts_id_lock;
phy_interface_t phy_mode;
@@ -644,8 +646,7 @@ struct ocelot {
u8 base_mac[ETH_ALEN];
- /* Keep track of the vlan port masks */
- u32 vlan_mask[VLAN_N_VID];
+ struct list_head vlans;
/* Switches like VSC9959 have flooding per traffic class */
int num_flooding_pgids;
@@ -680,6 +681,9 @@ struct ocelot {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
struct hwtstamp_config hwtstamp_config;
+ unsigned int ptp_skbs_in_flight;
+ /* Protects the 2-step TX timestamp ID logic */
+ spinlock_t ts_id_lock;
/* Protects the PTP interface state */
struct mutex ptp_lock;
/* Protects the PTP clock */
@@ -692,15 +696,6 @@ struct ocelot_policer {
u32 burst; /* bytes */
};
-struct ocelot_skb_cb {
- struct sk_buff *clone;
- u8 ptp_cmd;
- u8 ts_id;
-};
-
-#define OCELOT_SKB_CB(skb) \
- ((struct ocelot_skb_cb *)((skb)->cb))
-
#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
@@ -752,8 +747,6 @@ u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 val, u32 reg, u32 offset);
-#if IS_ENABLED(CONFIG_MSCC_OCELOT_SWITCH_LIB)
-
/* Packet I/O */
bool ocelot_can_inject(struct ocelot *ocelot, int grp);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
@@ -761,36 +754,6 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
-u32 ocelot_ptp_rew_op(struct sk_buff *skb);
-#else
-
-static inline bool ocelot_can_inject(struct ocelot *ocelot, int grp)
-{
- return false;
-}
-
-static inline void ocelot_port_inject_frame(struct ocelot *ocelot, int port,
- int grp, u32 rew_op,
- struct sk_buff *skb)
-{
-}
-
-static inline int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp,
- struct sk_buff **skb)
-{
- return -EIO;
-}
-
-static inline void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
-{
-}
-
-static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
-{
- return 0;
-}
-#endif
-
/* Hardware initialization */
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
diff --git a/include/soc/mscc/ocelot_ptp.h b/include/soc/mscc/ocelot_ptp.h
index ded497d72bdb..f085884b1fa2 100644
--- a/include/soc/mscc/ocelot_ptp.h
+++ b/include/soc/mscc/ocelot_ptp.h
@@ -13,6 +13,9 @@
#include <linux/ptp_clock_kernel.h>
#include <soc/mscc/ocelot.h>
+#define OCELOT_MAX_PTP_ID 63
+#define OCELOT_PTP_FIFO_SIZE 128
+
#define PTP_PIN_CFG_RSZ 0x20
#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index 25fd525aaf92..eeb1142aa1b1 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -576,6 +576,16 @@ enum ocelot_mask_mode {
OCELOT_MASK_MODE_REDIRECT,
};
+enum ocelot_es0_vid_sel {
+ OCELOT_ES0_VID_PLUS_CLASSIFIED_VID = 0,
+ OCELOT_ES0_VID = 1,
+};
+
+enum ocelot_es0_pcp_sel {
+ OCELOT_CLASSIFIED_PCP = 0,
+ OCELOT_ES0_PCP = 1,
+};
+
enum ocelot_es0_tag {
OCELOT_NO_ES0_TAG,
OCELOT_ES0_TAG,
@@ -694,7 +704,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id,
- bool tc_offload);
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
+ unsigned long cookie, bool tc_offload);
#endif /* _OCELOT_VCAP_H_ */
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 01570dbda503..0e45963bb767 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -224,6 +224,7 @@ struct hda_codec {
#endif
/* misc flags */
+ unsigned int configured:1; /* codec was configured */
unsigned int in_freeing:1; /* being released */
unsigned int registered:1; /* codec was registered */
unsigned int display_power_control:1; /* needs display power */
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 989e1517332d..7a08ed2acd60 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -98,6 +98,7 @@ struct snd_rawmidi_file {
struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *input;
struct snd_rawmidi_substream *output;
+ unsigned int user_pversion; /* supported protocol version */
};
struct snd_rawmidi_str {
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 9f73ed2cf061..bca73e8c8cde 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -306,11 +306,13 @@ enum afs_flock_operation {
enum afs_cb_break_reason {
afs_cb_break_no_break,
+ afs_cb_break_no_promise,
afs_cb_break_for_callback,
afs_cb_break_for_deleted,
afs_cb_break_for_lapsed,
+ afs_cb_break_for_s_reinit,
afs_cb_break_for_unlink,
- afs_cb_break_for_vsbreak,
+ afs_cb_break_for_v_break,
afs_cb_break_for_volume_callback,
afs_cb_break_for_zap,
};
@@ -602,11 +604,13 @@ enum afs_cb_break_reason {
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
+ EM(afs_cb_break_no_promise, "no-promise") \
EM(afs_cb_break_for_callback, "break-cb") \
EM(afs_cb_break_for_deleted, "break-del") \
EM(afs_cb_break_for_lapsed, "break-lapsed") \
+ EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_vsbreak, "break-vs") \
+ EM(afs_cb_break_for_v_break, "break-v") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
E_(afs_cb_break_for_zap, "break-zap")
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index 9a448fe9355d..920b6a303d60 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -178,7 +178,7 @@ TRACE_EVENT(cachefiles_unlink,
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
+ __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX;
__entry->de = de;
__entry->why = why;
),
@@ -205,7 +205,7 @@ TRACE_EVENT(cachefiles_rename,
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
+ __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX;
__entry->de = de;
__entry->to = to;
__entry->why = why;
@@ -305,7 +305,7 @@ TRACE_EVENT(cachefiles_mark_buried,
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
+ __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX;
__entry->de = de;
__entry->why = why;
),
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 44d8e2981065..2814f188d98c 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -21,9 +21,9 @@ TRACE_EVENT(devlink_hwmsg,
TP_ARGS(devlink, incoming, type, buf, len),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__field(bool, incoming)
__field(unsigned long, type)
__dynamic_array(u8, buf, len)
@@ -31,9 +31,9 @@ TRACE_EVENT(devlink_hwmsg,
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__entry->incoming = incoming;
__entry->type = type;
memcpy(__get_dynamic_array(buf), buf, len);
@@ -55,17 +55,17 @@ TRACE_EVENT(devlink_hwerr,
TP_ARGS(devlink, err, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__field(int, err)
__string(msg, msg)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__entry->err = err;
__assign_str(msg, msg);
),
@@ -85,17 +85,17 @@ TRACE_EVENT(devlink_health_report,
TP_ARGS(devlink, reporter_name, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, msg)
__string(msg, msg)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__assign_str(msg, msg);
),
@@ -116,18 +116,18 @@ TRACE_EVENT(devlink_health_recover_aborted,
TP_ARGS(devlink, reporter_name, health_state, time_since_last_recover),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, reporter_name)
__field(bool, health_state)
__field(u64, time_since_last_recover)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__entry->health_state = health_state;
__entry->time_since_last_recover = time_since_last_recover;
@@ -150,17 +150,17 @@ TRACE_EVENT(devlink_health_reporter_state_update,
TP_ARGS(devlink, reporter_name, new_state),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(reporter_name, reporter_name)
__field(u8, new_state)
),
TP_fast_assign(
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(reporter_name, reporter_name);
__entry->new_state = new_state;
),
@@ -181,9 +181,9 @@ TRACE_EVENT(devlink_trap_report,
TP_ARGS(devlink, skb, metadata),
TP_STRUCT__entry(
- __string(bus_name, devlink->dev->bus->name)
- __string(dev_name, dev_name(devlink->dev))
- __string(driver_name, devlink->dev->driver->name)
+ __string(bus_name, devlink_to_dev(devlink)->bus->name)
+ __string(dev_name, dev_name(devlink_to_dev(devlink)))
+ __string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(trap_name, metadata->trap_name)
__string(trap_group_name, metadata->trap_group_name)
__dynamic_array(char, input_dev_name, IFNAMSIZ)
@@ -192,9 +192,9 @@ TRACE_EVENT(devlink_trap_report,
TP_fast_assign(
struct net_device *input_dev = metadata->input_dev;
- __assign_str(bus_name, devlink->dev->bus->name);
- __assign_str(dev_name, dev_name(devlink->dev));
- __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(bus_name, devlink_to_dev(devlink)->bus->name);
+ __assign_str(dev_name, dev_name(devlink_to_dev(devlink)));
+ __assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(trap_name, metadata->trap_name);
__assign_str(trap_group_name, metadata->trap_group_name);
__assign_str(input_dev_name,
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index bf9806fd1306..db4f2cec8360 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -35,20 +35,20 @@ TRACE_EVENT(erofs_lookup,
TP_STRUCT__entry(
__field(dev_t, dev )
__field(erofs_nid_t, nid )
- __field(const char *, name )
+ __string(name, dentry->d_name.name )
__field(unsigned int, flags )
),
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->nid = EROFS_I(dir)->nid;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->flags = flags;
),
TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
show_dev_nid(__entry),
- __entry->name,
+ __get_str(name),
__entry->flags)
);
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index 491098a0d8ed..bf7533f171ff 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -13,11 +13,11 @@
TRACE_EVENT(kyber_latency,
- TP_PROTO(struct request_queue *q, const char *domain, const char *type,
+ TP_PROTO(dev_t dev, const char *domain, const char *type,
unsigned int percentile, unsigned int numerator,
unsigned int denominator, unsigned int samples),
- TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
+ TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
),
TP_fast_assign(
- __entry->dev = disk_devt(q->disk);
+ __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
@@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency,
TRACE_EVENT(kyber_adjust,
- TP_PROTO(struct request_queue *q, const char *domain,
- unsigned int depth),
+ TP_PROTO(dev_t dev, const char *domain, unsigned int depth),
- TP_ARGS(q, domain, depth),
+ TP_ARGS(dev, domain, depth),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -59,7 +58,7 @@ TRACE_EVENT(kyber_adjust,
),
TP_fast_assign(
- __entry->dev = disk_devt(q->disk);
+ __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
@@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust,
TRACE_EVENT(kyber_throttled,
- TP_PROTO(struct request_queue *q, const char *domain),
+ TP_PROTO(dev_t dev, const char *domain),
- TP_ARGS(q, domain),
+ TP_ARGS(dev, domain),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -81,7 +80,7 @@ TRACE_EVENT(kyber_throttled,
),
TP_fast_assign(
- __entry->dev = disk_devt(q->disk);
+ __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
),
diff --git a/include/trace/events/mctp.h b/include/trace/events/mctp.h
new file mode 100644
index 000000000000..175b057c507f
--- /dev/null
+++ b/include/trace/events/mctp.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mctp
+
+#if !defined(_TRACE_MCTP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MCTP_H
+
+#include <linux/tracepoint.h>
+
+#ifndef __TRACE_MCTP_ENUMS
+#define __TRACE_MCTP_ENUMS
+enum {
+ MCTP_TRACE_KEY_TIMEOUT,
+ MCTP_TRACE_KEY_REPLIED,
+ MCTP_TRACE_KEY_INVALIDATED,
+ MCTP_TRACE_KEY_CLOSED,
+};
+#endif /* __TRACE_MCTP_ENUMS */
+
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_TIMEOUT);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_REPLIED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_INVALIDATED);
+TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_CLOSED);
+
+TRACE_EVENT(mctp_key_acquire,
+ TP_PROTO(const struct mctp_sk_key *key),
+ TP_ARGS(key),
+ TP_STRUCT__entry(
+ __field(__u8, paddr)
+ __field(__u8, laddr)
+ __field(__u8, tag)
+ ),
+ TP_fast_assign(
+ __entry->paddr = key->peer_addr;
+ __entry->laddr = key->local_addr;
+ __entry->tag = key->tag;
+ ),
+ TP_printk("local %d, peer %d, tag %1x",
+ __entry->laddr,
+ __entry->paddr,
+ __entry->tag
+ )
+);
+
+TRACE_EVENT(mctp_key_release,
+ TP_PROTO(const struct mctp_sk_key *key, int reason),
+ TP_ARGS(key, reason),
+ TP_STRUCT__entry(
+ __field(__u8, paddr)
+ __field(__u8, laddr)
+ __field(__u8, tag)
+ __field(int, reason)
+ ),
+ TP_fast_assign(
+ __entry->paddr = key->peer_addr;
+ __entry->laddr = key->local_addr;
+ __entry->tag = key->tag;
+ __entry->reason = reason;
+ ),
+ TP_printk("local %d, peer %d, tag %1x %s",
+ __entry->laddr,
+ __entry->paddr,
+ __entry->tag,
+ __print_symbolic(__entry->reason,
+ { MCTP_TRACE_KEY_TIMEOUT, "timeout" },
+ { MCTP_TRACE_KEY_REPLIED, "replied" },
+ { MCTP_TRACE_KEY_INVALIDATED, "invalidated" },
+ { MCTP_TRACE_KEY_CLOSED, "closed" })
+ )
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 1f0a2b4864e4..c77a1313b3b0 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -126,6 +126,8 @@
#define SO_BUF_LOCK 72
+#define SO_RESERVE_MEM 73
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 20e435fe657a..3246f2c74696 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -225,7 +225,14 @@ struct binder_freeze_info {
struct binder_frozen_status_info {
__u32 pid;
+
+ /* process received sync transactions since last frozen
+ * bit 0: received sync transaction after being frozen
+ * bit 1: new pending sync transaction during freezing
+ */
__u32 sync_recv;
+
+ /* process received async transactions since last frozen */
__u32 async_recv;
};
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 3e9785f1064a..6fc59d61937a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4046,7 +4046,7 @@ union bpf_attr {
* arguments. The *data* are a **u64** array and corresponding format string
* values are stored in the array. For strings and pointers where pointees
* are accessed, only the pointer values are stored in the *data* array.
- * The *data_len* is the size of *data* in bytes.
+ * The *data_len* is the size of *data* in bytes - must be a multiple of 8.
*
* Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
* Reading kernel memory may fail due to either invalid address or
@@ -4751,7 +4751,8 @@ union bpf_attr {
* Each format specifier in **fmt** corresponds to one u64 element
* in the **data** array. For strings and pointers where pointees
* are accessed, only the pointer values are stored in the *data*
- * array. The *data_len* is the size of *data* in bytes.
+ * array. The *data_len* is the size of *data* in bytes - must be
+ * a multiple of 8.
*
* Formats **%s** and **%p{i,I}{4,6}** require to read kernel
* memory. Reading kernel memory may fail due to either invalid
@@ -4898,6 +4899,16 @@ union bpf_attr {
* **-EINVAL** if *flags* is not zero.
*
* **-ENOENT** if architecture does not support branch records.
+ *
+ * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * Description
+ * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
+ * to format and can handle more format args as a result.
+ *
+ * Arguments are to be used as in **bpf_seq_printf**\ () helper.
+ * Return
+ * The number of bytes written to the buffer, or a negative error
+ * in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5077,6 +5088,7 @@ union bpf_attr {
FN(get_attach_cookie), \
FN(task_pt_regs), \
FN(get_branch_snapshot), \
+ FN(trace_vprintk), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/include/uapi/linux/cifs/cifs_mount.h b/include/uapi/linux/cifs/cifs_mount.h
index 69829205fdb5..8e87d27b0951 100644
--- a/include/uapi/linux/cifs/cifs_mount.h
+++ b/include/uapi/linux/cifs/cifs_mount.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
/*
- * include/uapi/linux/cifs/cifs_mount.h
*
* Author(s): Scott Lovenberg ([email protected])
*
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 32f53a0069d6..b897b80770f6 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -551,6 +551,8 @@ enum devlink_attr {
DEVLINK_ATTR_RATE_NODE_NAME, /* string */
DEVLINK_ATTR_RATE_PARENT_NODE_NAME, /* string */
+ DEVLINK_ATTR_REGION_MAX_SNAPSHOTS, /* u32 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b6db6590baf0..a2223b685451 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -603,6 +603,7 @@ enum ethtool_link_ext_state {
ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE,
ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
ETHTOOL_LINK_EXT_STATE_OVERHEAT,
+ ETHTOOL_LINK_EXT_STATE_MODULE,
};
/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
@@ -649,6 +650,11 @@ enum ethtool_link_ext_substate_cable_issue {
ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
};
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_MODULE. */
+enum ethtool_link_ext_substate_module {
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1,
+};
+
#define ETH_GSTRING_LEN 32
/**
@@ -707,6 +713,29 @@ enum ethtool_stringset {
};
/**
+ * enum ethtool_module_power_mode_policy - plug-in module power mode policy
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host
+ * to high power mode when the first port using it is put administratively
+ * up and to low power mode when the last port using it is put
+ * administratively down.
+ */
+enum ethtool_module_power_mode_policy {
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1,
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO,
+};
+
+/**
+ * enum ethtool_module_power_mode - plug-in module power mode
+ * @ETHTOOL_MODULE_POWER_MODE_LOW: Module is in low power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_HIGH: Module is in high power mode.
+ */
+enum ethtool_module_power_mode {
+ ETHTOOL_MODULE_POWER_MODE_LOW = 1,
+ ETHTOOL_MODULE_POWER_MODE_HIGH,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 5545f1ca9237..ca5fbb59fa42 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -47,6 +47,8 @@ enum {
ETHTOOL_MSG_MODULE_EEPROM_GET,
ETHTOOL_MSG_STATS_GET,
ETHTOOL_MSG_PHC_VCLOCKS_GET,
+ ETHTOOL_MSG_MODULE_GET,
+ ETHTOOL_MSG_MODULE_SET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -90,6 +92,8 @@ enum {
ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY,
ETHTOOL_MSG_STATS_GET_REPLY,
ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
+ ETHTOOL_MSG_MODULE_GET_REPLY,
+ ETHTOOL_MSG_MODULE_NTF,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -833,6 +837,19 @@ enum {
ETHTOOL_A_STATS_RMON_MAX = (__ETHTOOL_A_STATS_RMON_CNT - 1)
};
+/* MODULE */
+
+enum {
+ ETHTOOL_A_MODULE_UNSPEC,
+ ETHTOOL_A_MODULE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_MODULE_POWER_MODE_POLICY, /* u8 */
+ ETHTOOL_A_MODULE_POWER_MODE, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_MODULE_CNT,
+ ETHTOOL_A_MODULE_MAX = (__ETHTOOL_A_MODULE_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index 6135d92e0d47..daf82a230c0e 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -26,7 +26,7 @@
#ifndef _UAPI_HYPERV_H
#define _UAPI_HYPERV_H
-#include <linux/uuid.h>
+#include <linux/types.h>
/*
* Framework version for util services.
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 5f589c7a8382..5da4ee234e0b 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -86,6 +86,7 @@
* over Ethernet
*/
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#define ETH_P_REALTEK 0x8899 /* Multiple proprietary protocols */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 59ef35154e3d..b270a07b285e 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -317,13 +317,19 @@ enum {
IORING_REGISTER_IOWQ_AFF = 17,
IORING_UNREGISTER_IOWQ_AFF = 18,
- /* set/get max number of workers */
+ /* set/get max number of io-wq workers */
IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
/* this goes last */
IORING_REGISTER_LAST
};
+/* io-wq worker categories */
+enum {
+ IO_WQ_BOUND,
+ IO_WQ_UNBOUND,
+};
+
/* deprecated, see struct io_uring_rsrc_update */
struct io_uring_files_update {
__u32 offset;
diff --git a/include/uapi/linux/ioam6_iptunnel.h b/include/uapi/linux/ioam6_iptunnel.h
index bae14636a8c8..829ffdfcacca 100644
--- a/include/uapi/linux/ioam6_iptunnel.h
+++ b/include/uapi/linux/ioam6_iptunnel.h
@@ -9,9 +9,38 @@
#ifndef _UAPI_LINUX_IOAM6_IPTUNNEL_H
#define _UAPI_LINUX_IOAM6_IPTUNNEL_H
+/* Encap modes:
+ * - inline: direct insertion
+ * - encap: ip6ip6 encapsulation
+ * - auto: inline for local packets, encap for in-transit packets
+ */
+enum {
+ __IOAM6_IPTUNNEL_MODE_MIN,
+
+ IOAM6_IPTUNNEL_MODE_INLINE,
+ IOAM6_IPTUNNEL_MODE_ENCAP,
+ IOAM6_IPTUNNEL_MODE_AUTO,
+
+ __IOAM6_IPTUNNEL_MODE_MAX,
+};
+
+#define IOAM6_IPTUNNEL_MODE_MIN (__IOAM6_IPTUNNEL_MODE_MIN + 1)
+#define IOAM6_IPTUNNEL_MODE_MAX (__IOAM6_IPTUNNEL_MODE_MAX - 1)
+
enum {
IOAM6_IPTUNNEL_UNSPEC,
+
+ /* Encap mode */
+ IOAM6_IPTUNNEL_MODE, /* u8 */
+
+ /* Tunnel dst address.
+ * For encap,auto modes.
+ */
+ IOAM6_IPTUNNEL_DST, /* struct in6_addr */
+
+ /* IOAM Trace Header */
IOAM6_IPTUNNEL_TRACE, /* struct ioam6_trace_hdr */
+
__IOAM6_IPTUNNEL_MAX,
};
diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h
index 52b54d13f385..6acd4ccafbf7 100644
--- a/include/uapi/linux/mctp.h
+++ b/include/uapi/linux/mctp.h
@@ -10,6 +10,7 @@
#define __UAPI_MCTP_H
#include <linux/types.h>
+#include <linux/socket.h>
typedef __u8 mctp_eid_t;
@@ -18,11 +19,13 @@ struct mctp_addr {
};
struct sockaddr_mctp {
- unsigned short int smctp_family;
- int smctp_network;
+ __kernel_sa_family_t smctp_family;
+ __u16 __smctp_pad0;
+ unsigned int smctp_network;
struct mctp_addr smctp_addr;
__u8 smctp_type;
__u8 smctp_tag;
+ __u8 __smctp_pad1;
};
#define MCTP_NET_ANY 0x0
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 00a60695fa53..db05fb55055e 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -31,6 +31,7 @@ enum {
NDA_PROTOCOL, /* Originator of entry */
NDA_NH_ID,
NDA_FDB_EXT_ATTRS,
+ NDA_FLAGS_EXT,
__NDA_MAX
};
@@ -40,14 +41,16 @@ enum {
* Neighbor Cache Entry Flags
*/
-#define NTF_USE 0x01
-#define NTF_SELF 0x02
-#define NTF_MASTER 0x04
-#define NTF_PROXY 0x08 /* == ATF_PUBL */
-#define NTF_EXT_LEARNED 0x10
-#define NTF_OFFLOADED 0x20
-#define NTF_STICKY 0x40
-#define NTF_ROUTER 0x80
+#define NTF_USE (1 << 0)
+#define NTF_SELF (1 << 1)
+#define NTF_MASTER (1 << 2)
+#define NTF_PROXY (1 << 3) /* == ATF_PUBL */
+#define NTF_EXT_LEARNED (1 << 4)
+#define NTF_OFFLOADED (1 << 5)
+#define NTF_STICKY (1 << 6)
+#define NTF_ROUTER (1 << 7)
+/* Extended flags under NDA_FLAGS_EXT: */
+#define NTF_EXT_MANAGED (1 << 0)
/*
* Neighbor Cache Entry States.
@@ -65,12 +68,22 @@ enum {
#define NUD_PERMANENT 0x80
#define NUD_NONE 0x00
-/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
- * and make no address resolution or NUD.
- * NUD_PERMANENT also cannot be deleted by garbage collectors.
+/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change and make no
+ * address resolution or NUD.
+ *
+ * NUD_PERMANENT also cannot be deleted by garbage collectors. This holds true
+ * for dynamic entries with NTF_EXT_LEARNED flag as well. However, upon carrier
+ * down event, NUD_PERMANENT entries are not flushed whereas NTF_EXT_LEARNED
+ * flagged entries explicitly are (which is also consistent with the routing
+ * subsystem).
+ *
* When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
* states don't make sense and thus are ignored. Such entries don't age and
* can roam.
+ *
+ * NTF_EXT_MANAGED flagged neigbor entries are managed by the kernel on behalf
+ * of a user space control plane, and automatically refreshed so that (if
+ * possible) they remain in NUD_REACHABLE state.
*/
struct nda_cacheinfo {
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ef9a44286e23..53411ccc69db 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -51,6 +51,7 @@ enum nf_inet_hooks {
enum nf_dev_hooks {
NF_NETDEV_INGRESS,
+ NF_NETDEV_EGRESS,
NF_NETDEV_NUMHOOKS
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index ec88590b3198..f292b467b27f 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -840,6 +840,8 @@ enum {
TCA_FQ_CODEL_CE_THRESHOLD,
TCA_FQ_CODEL_DROP_BATCH_SIZE,
TCA_FQ_CODEL_MEMORY_LIMIT,
+ TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
+ TCA_FQ_CODEL_CE_THRESHOLD_MASK,
__TCA_FQ_CODEL_MAX
};
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
index b175bd0165a1..20f33b27787f 100644
--- a/include/uapi/linux/smc.h
+++ b/include/uapi/linux/smc.h
@@ -84,17 +84,28 @@ enum {
SMC_NLA_SYS_IS_ISM_V2, /* u8 */
SMC_NLA_SYS_LOCAL_HOST, /* string */
SMC_NLA_SYS_SEID, /* string */
+ SMC_NLA_SYS_IS_SMCR_V2, /* u8 */
__SMC_NLA_SYS_MAX,
SMC_NLA_SYS_MAX = __SMC_NLA_SYS_MAX - 1
};
-/* SMC_NLA_LGR_V2 nested attributes */
+/* SMC_NLA_LGR_D_V2_COMMON and SMC_NLA_LGR_R_V2_COMMON nested attributes */
enum {
SMC_NLA_LGR_V2_VER, /* u8 */
SMC_NLA_LGR_V2_REL, /* u8 */
SMC_NLA_LGR_V2_OS, /* u8 */
SMC_NLA_LGR_V2_NEG_EID, /* string */
SMC_NLA_LGR_V2_PEER_HOST, /* string */
+ __SMC_NLA_LGR_V2_MAX,
+ SMC_NLA_LGR_V2_MAX = __SMC_NLA_LGR_V2_MAX - 1
+};
+
+/* SMC_NLA_LGR_R_V2 nested attributes */
+enum {
+ SMC_NLA_LGR_R_V2_UNSPEC,
+ SMC_NLA_LGR_R_V2_DIRECT, /* u8 */
+ __SMC_NLA_LGR_R_V2_MAX,
+ SMC_NLA_LGR_R_V2_MAX = __SMC_NLA_LGR_R_V2_MAX - 1
};
/* SMC_GEN_LGR_SMCR attributes */
@@ -106,6 +117,8 @@ enum {
SMC_NLA_LGR_R_PNETID, /* string */
SMC_NLA_LGR_R_VLAN_ID, /* u8 */
SMC_NLA_LGR_R_CONNS_NUM, /* u32 */
+ SMC_NLA_LGR_R_V2_COMMON, /* nest */
+ SMC_NLA_LGR_R_V2, /* nest */
__SMC_NLA_LGR_R_MAX,
SMC_NLA_LGR_R_MAX = __SMC_NLA_LGR_R_MAX - 1
};
@@ -138,7 +151,7 @@ enum {
SMC_NLA_LGR_D_PNETID, /* string */
SMC_NLA_LGR_D_CHID, /* u16 */
SMC_NLA_LGR_D_PAD, /* flag */
- SMC_NLA_LGR_V2, /* nest */
+ SMC_NLA_LGR_D_V2_COMMON, /* nest */
__SMC_NLA_LGR_D_MAX,
SMC_NLA_LGR_D_MAX = __SMC_NLA_LGR_D_MAX - 1
};
diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
index 46918a1852d7..c60ca33eac59 100644
--- a/include/uapi/linux/vm_sockets.h
+++ b/include/uapi/linux/vm_sockets.h
@@ -64,7 +64,7 @@
* timeout for a STREAM socket.
*/
-#define SO_VM_SOCKETS_CONNECT_TIMEOUT 6
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD 6
/* Option name for using non-blocking send/receive. Use as the option name
* for setsockopt(3) or getsockopt(3) to set or get the non-blocking
@@ -81,6 +81,17 @@
#define SO_VM_SOCKETS_NONBLOCK_TXRX 7
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW 8
+
+#if !defined(__KERNEL__)
+#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD
+#else
+#define SO_VM_SOCKETS_CONNECT_TIMEOUT \
+ (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD : SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW)
+#endif
+#endif
+
/* The vSocket equivalent of INADDR_ANY. This works for the svm_cid field of
* sockaddr_vm and indicates the context ID of the current endpoint.
*/
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index b96c1ea7166d..eda0426ec4c2 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -213,13 +213,13 @@ enum {
XFRM_MSG_GETSPDINFO,
#define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO
+ XFRM_MSG_MAPPING,
+#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
+
XFRM_MSG_SETDEFAULT,
#define XFRM_MSG_SETDEFAULT XFRM_MSG_SETDEFAULT
XFRM_MSG_GETDEFAULT,
#define XFRM_MSG_GETDEFAULT XFRM_MSG_GETDEFAULT
-
- XFRM_MSG_MAPPING,
-#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
__XFRM_MSG_MAX
};
#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -514,9 +514,12 @@ struct xfrm_user_offload {
#define XFRM_OFFLOAD_INBOUND 2
struct xfrm_userpolicy_default {
-#define XFRM_USERPOLICY_DIRMASK_MAX (sizeof(__u8) * 8)
- __u8 dirmask;
- __u8 action;
+#define XFRM_USERPOLICY_UNSPEC 0
+#define XFRM_USERPOLICY_BLOCK 1
+#define XFRM_USERPOLICY_ACCEPT 2
+ __u8 in;
+ __u8 fwd;
+ __u8 out;
};
#ifndef __KERNEL__
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 7cc2a0f3f2f5..d13bb8c1b450 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -917,7 +917,6 @@ struct hl_wait_cs_in {
#define HL_WAIT_CS_STATUS_BUSY 1
#define HL_WAIT_CS_STATUS_TIMEDOUT 2
#define HL_WAIT_CS_STATUS_ABORTED 3
-#define HL_WAIT_CS_STATUS_INTERRUPTED 4
#define HL_WAIT_CS_STATUS_FLAG_GONE 0x1
#define HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD 0x2
@@ -1286,7 +1285,8 @@ struct hl_debug_args {
* EIO - The CS was aborted (usually because the device was reset)
* ENODEV - The device wants to do hard-reset (so user need to close FD)
*
- * The driver also returns a custom define inside the IOCTL which can be:
+ * The driver also returns a custom define in case the IOCTL call returned 0.
+ * The define can be one of the following:
*
* HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
* HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
@@ -1294,8 +1294,6 @@ struct hl_debug_args {
* (ETIMEDOUT)
* HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
* device was reset (EIO)
- * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
- *
*/
#define HL_IOCTL_WAIT_CS \
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 1d84ec9db93b..5859ca0a1439 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -784,6 +784,7 @@ struct snd_rawmidi_status {
#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
#define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params)
#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 39a5580f8feb..a3584a357f35 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -46,30 +46,18 @@ extern unsigned long *xen_contiguous_bitmap;
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
-
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-#else
-static inline int xen_create_contiguous_region(phys_addr_t pstart,
- unsigned int order,
- unsigned int address_bits,
- dma_addr_t *dma_handle)
-{
- return 0;
-}
-
-static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
- unsigned int order) { }
#endif
#if defined(CONFIG_XEN_PV)
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages);
+ unsigned int domid, bool no_translate);
#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid,
- bool no_translate, struct page **pages)
+ bool no_translate)
{
BUG();
return 0;
@@ -146,7 +134,7 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
*/
BUG_ON(err_ptr == NULL);
return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
- false, pages);
+ false);
}
/*
@@ -158,7 +146,6 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
* @err_ptr: Returns per-MFN error status.
* @prot: page protection mask
* @domid: Domain owning the pages
- * @pages: Array of pages if this domain has an auto-translated physmap
*
* @mfn and @err_ptr may point to the same buffer, the MFNs will be
* overwritten by the error codes after they are mapped.
@@ -169,14 +156,13 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
unsigned long addr, xen_pfn_t *mfn,
int nr, int *err_ptr,
- pgprot_t prot, unsigned int domid,
- struct page **pages)
+ pgprot_t prot, unsigned int domid)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
- true, pages);
+ true);
}
/* xen_remap_domain_gfn_range() - map a range of foreign frames
@@ -200,8 +186,7 @@ static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
- return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
- pages);
+ return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
}
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 2ed30ff6c906..762b534978d9 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -338,20 +338,19 @@ __setup("rootflags=", root_data_setup);
__setup("rootfstype=", fs_names_setup);
__setup("rootdelay=", root_delay_setup);
-static int __init split_fs_names(char *page, char *names)
+/* This can return zero length strings. Caller should check */
+static int __init split_fs_names(char *page, size_t size, char *names)
{
- int count = 0;
+ int count = 1;
char *p = page;
- strcpy(p, root_fs_names);
+ strlcpy(p, root_fs_names, size);
while (*p++) {
- if (p[-1] == ',')
+ if (p[-1] == ',') {
p[-1] = '\0';
+ count++;
+ }
}
- *p = '\0';
-
- for (p = page; *p; p += strlen(p)+1)
- count++;
return count;
}
@@ -404,12 +403,16 @@ void __init mount_block_root(char *name, int flags)
scnprintf(b, BDEVNAME_SIZE, "unknown-block(%u,%u)",
MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
if (root_fs_names)
- num_fs = split_fs_names(fs_names, root_fs_names);
+ num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names);
else
num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE);
retry:
for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1) {
- int err = do_mount_root(name, p, flags, root_mount_data);
+ int err;
+
+ if (!*p)
+ continue;
+ err = do_mount_root(name, p, flags, root_mount_data);
switch (err) {
case 0:
goto out;
@@ -543,19 +546,18 @@ static int __init mount_nodev_root(void)
fs_names = (void *)__get_free_page(GFP_KERNEL);
if (!fs_names)
return -EINVAL;
- num_fs = split_fs_names(fs_names, root_fs_names);
+ num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names);
for (i = 0, fstype = fs_names; i < num_fs;
i++, fstype += strlen(fstype) + 1) {
+ if (!*fstype)
+ continue;
if (!fs_is_nodev(fstype))
continue;
err = do_mount_root(root_device_name, fstype, root_mountflags,
root_mount_data);
if (!err)
break;
- if (err != -EACCES && err != -EINVAL)
- panic("VFS: Unable to mount root \"%s\" (%s), err=%d\n",
- root_device_name, fstype, err);
}
free_page((unsigned long)fs_names);
diff --git a/init/main.c b/init/main.c
index 3f7216934441..3c4054a95545 100644
--- a/init/main.c
+++ b/init/main.c
@@ -382,6 +382,7 @@ static char * __init xbc_make_cmdline(const char *key)
ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
if (ret < 0 || ret > len) {
pr_err("Failed to print extra kernel cmdline.\n");
+ memblock_free_ptr(new_cmdline, len + 1);
return NULL;
}
@@ -1242,7 +1243,7 @@ trace_initcall_start_cb(void *data, initcall_t fn)
{
ktime_t *calltime = (ktime_t *)data;
- printk(KERN_DEBUG "calling %pS @ %i irqs_disabled() %d\n", fn, task_pid_nr(current), irqs_disabled());
+ printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current));
*calltime = ktime_get();
}
@@ -1256,8 +1257,8 @@ trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
rettime = ktime_get();
delta = ktime_sub(rettime, *calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs, irqs_disabled() %d\n",
- fn, ret, duration, irqs_disabled());
+ printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
+ fn, ret, duration);
}
static ktime_t initcall_calltime;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 8dd73a64f921..b1cb1dbf7417 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -657,7 +657,7 @@ static int audit_filter_rules(struct task_struct *tsk,
result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
break;
case AUDIT_SADDR_FAM:
- if (ctx->sockaddr)
+ if (ctx && ctx->sockaddr)
result = audit_comparator(ctx->sockaddr->ss_family,
f->op, f->val);
break;
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index cebd4fb06d19..5e1ccfae916b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -645,7 +645,7 @@ static const struct bpf_iter_seq_info iter_seq_info = {
.seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
};
-static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
+static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
void *callback_ctx, u64 flags)
{
u32 i, key, num_elems = 0;
@@ -668,9 +668,8 @@ static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
val = array->value + array->elem_size * i;
num_elems++;
key = i;
- ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
- (u64)(long)&key, (u64)(long)val,
- (u64)(long)callback_ctx, 0);
+ ret = callback_fn((u64)(long)map, (u64)(long)&key,
+ (u64)(long)val, (u64)(long)callback_ctx, 0);
/* return value: 0 - continue, 1 - stop and return */
if (ret)
break;
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index d6731c32864e..9abcc33f02cf 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -368,6 +368,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
const struct btf_type *mtype, *ptype;
struct bpf_prog *prog;
u32 moff;
+ u32 flags;
moff = btf_member_bit_offset(t, member) / 8;
ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
@@ -431,10 +432,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
+ flags = st_ops->func_models[i].ret_size > 0 ?
+ BPF_TRAMP_F_RET_FENTRY_RET : 0;
err = arch_prepare_bpf_trampoline(NULL, image,
st_map->image + PAGE_SIZE,
- &st_ops->func_models[i], 0,
- tprogs, NULL);
+ &st_ops->func_models[i],
+ flags, tprogs, NULL);
if (err < 0)
goto reset_unlock;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 9f4636d021b1..ea8a468dbded 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -827,7 +827,7 @@ int bpf_jit_charge_modmem(u32 pages)
{
if (atomic_long_add_return(pages, &bpf_jit_current) >
(bpf_jit_limit >> PAGE_SHIFT)) {
- if (!capable(CAP_SYS_ADMIN)) {
+ if (!bpf_capable()) {
atomic_long_sub(pages, &bpf_jit_current);
return -EPERM;
}
@@ -2357,6 +2357,11 @@ const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
return NULL;
}
+const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
+{
+ return NULL;
+}
+
u64 __weak
bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 32471ba02708..d29af9988f37 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -668,7 +668,7 @@ static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
- *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
+ *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
offsetof(struct htab_elem, key) +
@@ -709,7 +709,7 @@ static int htab_lru_map_gen_lookup(struct bpf_map *map,
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
- *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
+ *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
offsetof(struct htab_elem, lru_node) +
@@ -2049,7 +2049,7 @@ static const struct bpf_iter_seq_info iter_seq_info = {
.seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
};
-static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
+static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
void *callback_ctx, u64 flags)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
@@ -2089,9 +2089,8 @@ static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
val = elem->key + roundup_key_size;
}
num_elems++;
- ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
- (u64)(long)key, (u64)(long)val,
- (u64)(long)callback_ctx, 0);
+ ret = callback_fn((u64)(long)map, (u64)(long)key,
+ (u64)(long)val, (u64)(long)callback_ctx, 0);
/* return value: 0 - continue, 1 - stop and return */
if (ret) {
rcu_read_unlock();
@@ -2397,7 +2396,7 @@ static int htab_of_map_gen_lookup(struct bpf_map *map,
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
- *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
+ *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
offsetof(struct htab_elem, key) +
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 9aabf84afd4b..1ffd469c217f 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -979,15 +979,13 @@ out:
return err;
}
-#define MAX_SNPRINTF_VARARGS 12
-
BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
const void *, data, u32, data_len)
{
int err, num_args;
u32 *bin_args;
- if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 ||
+ if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
(data_len && !data))
return -EINVAL;
num_args = data_len / 8;
@@ -1058,7 +1056,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
struct bpf_map *map = t->map;
void *value = t->value;
- void *callback_fn;
+ bpf_callback_t callback_fn;
void *key;
u32 idx;
@@ -1083,8 +1081,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
key = value - round_up(map->key_size, 8);
}
- BPF_CAST_CALL(callback_fn)((u64)(long)map, (u64)(long)key,
- (u64)(long)value, 0, 0);
+ callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
/* The verifier checked that return value is zero. */
this_cpu_write(hrtimer_running, NULL);
@@ -1437,6 +1434,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_snprintf_proto;
case BPF_FUNC_task_pt_regs:
return &bpf_task_pt_regs_proto;
+ case BPF_FUNC_trace_vprintk:
+ return bpf_get_trace_vprintk_proto();
default:
return NULL;
}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 09a3fd97d329..6e75bbee39f0 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -63,7 +63,8 @@ static inline int stack_map_data_size(struct bpf_map *map)
static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
{
- u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
+ u64 elem_size = sizeof(struct stack_map_bucket) +
+ (u64)smap->map.value_size;
int err;
smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e76b55917905..1433752db740 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -612,6 +612,20 @@ static const char *kernel_type_name(const struct btf* btf, u32 id)
return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
}
+/* The reg state of a pointer or a bounded scalar was saved when
+ * it was spilled to the stack.
+ */
+static bool is_spilled_reg(const struct bpf_stack_state *stack)
+{
+ return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
+}
+
+static void scrub_spilled_slot(u8 *stype)
+{
+ if (*stype != STACK_INVALID)
+ *stype = STACK_MISC;
+}
+
static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state)
{
@@ -717,7 +731,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, state->stack[i].spilled_ptr.live);
- if (state->stack[i].slot_type[0] == STACK_SPILL) {
+ if (is_spilled_reg(&state->stack[i])) {
reg = &state->stack[i].spilled_ptr;
t = reg->type;
verbose(env, "=%s", reg_type_str[t]);
@@ -1730,7 +1744,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
desc = &tab->descs[tab->nr_descs++];
desc->func_id = func_id;
- desc->imm = BPF_CAST_CALL(addr) - __bpf_call_base;
+ desc->imm = BPF_CALL_IMM(addr);
err = btf_distill_func_proto(&env->log, btf_vmlinux,
func_proto, func_name,
&desc->func_model);
@@ -2373,7 +2387,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
reg->precise = true;
}
for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
- if (func->stack[j].slot_type[0] != STACK_SPILL)
+ if (!is_spilled_reg(&func->stack[j]))
continue;
reg = &func->stack[j].spilled_ptr;
if (reg->type != SCALAR_VALUE)
@@ -2415,7 +2429,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
}
while (spi >= 0) {
- if (func->stack[spi].slot_type[0] != STACK_SPILL) {
+ if (!is_spilled_reg(&func->stack[spi])) {
stack_mask = 0;
break;
}
@@ -2514,7 +2528,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
return 0;
}
- if (func->stack[i].slot_type[0] != STACK_SPILL) {
+ if (!is_spilled_reg(&func->stack[i])) {
stack_mask &= ~(1ull << i);
continue;
}
@@ -2626,15 +2640,21 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
}
static void save_register_state(struct bpf_func_state *state,
- int spi, struct bpf_reg_state *reg)
+ int spi, struct bpf_reg_state *reg,
+ int size)
{
int i;
state->stack[spi].spilled_ptr = *reg;
- state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ if (size == BPF_REG_SIZE)
+ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
+ for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
+ state->stack[spi].slot_type[i - 1] = STACK_SPILL;
- for (i = 0; i < BPF_REG_SIZE; i++)
- state->stack[spi].slot_type[i] = STACK_SPILL;
+ /* size < 8 bytes spill */
+ for (; i; i--)
+ scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
}
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
@@ -2681,7 +2701,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
}
- if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
+ if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
if (dst_reg != BPF_REG_FP) {
/* The backtracking logic can only recognize explicit
@@ -2694,7 +2714,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
if (err)
return err;
}
- save_register_state(state, spi, reg);
+ save_register_state(state, spi, reg, size);
} else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
@@ -2706,16 +2726,16 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
- save_register_state(state, spi, reg);
+ save_register_state(state, spi, reg, size);
} else {
u8 type = STACK_MISC;
/* regular write of data into stack destroys any spilled ptr */
state->stack[spi].spilled_ptr.type = NOT_INIT;
/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
- if (state->stack[spi].slot_type[0] == STACK_SPILL)
+ if (is_spilled_reg(&state->stack[spi]))
for (i = 0; i < BPF_REG_SIZE; i++)
- state->stack[spi].slot_type[i] = STACK_MISC;
+ scrub_spilled_slot(&state->stack[spi].slot_type[i]);
/* only mark the slot as written if all 8 bytes were written
* otherwise read propagation may incorrectly stop too soon
@@ -2918,23 +2938,50 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
struct bpf_func_state *state = vstate->frame[vstate->curframe];
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
struct bpf_reg_state *reg;
- u8 *stype;
+ u8 *stype, type;
stype = reg_state->stack[spi].slot_type;
reg = &reg_state->stack[spi].spilled_ptr;
- if (stype[0] == STACK_SPILL) {
+ if (is_spilled_reg(&reg_state->stack[spi])) {
if (size != BPF_REG_SIZE) {
+ u8 scalar_size = 0;
+
if (reg->type != SCALAR_VALUE) {
verbose_linfo(env, env->insn_idx, "; ");
verbose(env, "invalid size of register fill\n");
return -EACCES;
}
- if (dst_regno >= 0) {
+
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ if (dst_regno < 0)
+ return 0;
+
+ for (i = BPF_REG_SIZE; i > 0 && stype[i - 1] == STACK_SPILL; i--)
+ scalar_size++;
+
+ if (!(off % BPF_REG_SIZE) && size == scalar_size) {
+ /* The earlier check_reg_arg() has decided the
+ * subreg_def for this insn. Save it first.
+ */
+ s32 subreg_def = state->regs[dst_regno].subreg_def;
+
+ state->regs[dst_regno] = *reg;
+ state->regs[dst_regno].subreg_def = subreg_def;
+ } else {
+ for (i = 0; i < size; i++) {
+ type = stype[(slot - i) % BPF_REG_SIZE];
+ if (type == STACK_SPILL)
+ continue;
+ if (type == STACK_MISC)
+ continue;
+ verbose(env, "invalid read from stack off %d+%d size %d\n",
+ off, i, size);
+ return -EACCES;
+ }
mark_reg_unknown(env, state->regs, dst_regno);
- state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
}
- mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
return 0;
}
for (i = 1; i < BPF_REG_SIZE; i++) {
@@ -2965,8 +3012,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
}
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
} else {
- u8 type;
-
for (i = 0; i < size; i++) {
type = stype[(slot - i) % BPF_REG_SIZE];
if (type == STACK_MISC)
@@ -4514,17 +4559,17 @@ static int check_stack_range_initialized(
goto mark;
}
- if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+ if (is_spilled_reg(&state->stack[spi]) &&
state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
goto mark;
- if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+ if (is_spilled_reg(&state->stack[spi]) &&
(state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
env->allow_ptr_leaks)) {
if (clobber) {
__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
- state->stack[spi].slot_type[j] = STACK_MISC;
+ scrub_spilled_slot(&state->stack[spi].slot_type[j]);
}
goto mark;
}
@@ -10356,9 +10401,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
* return false to continue verification of this path
*/
return false;
- if (i % BPF_REG_SIZE)
+ if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
continue;
- if (old->stack[spi].slot_type[0] != STACK_SPILL)
+ if (!is_spilled_reg(&old->stack[spi]))
continue;
if (!regsafe(env, &old->stack[spi].spilled_ptr,
&cur->stack[spi].spilled_ptr, idmap))
@@ -10565,7 +10610,7 @@ static int propagate_precision(struct bpf_verifier_env *env,
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] != STACK_SPILL)
+ if (!is_spilled_reg(&state->stack[i]))
continue;
state_reg = &state->stack[i].spilled_ptr;
if (state_reg->type != SCALAR_VALUE ||
@@ -12469,8 +12514,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
if (!bpf_pseudo_call(insn))
continue;
subprog = insn->off;
- insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
- __bpf_call_base;
+ insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
}
/* we use the aux data to keep a list of the start addresses
@@ -12950,32 +12994,25 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
patch_map_ops_generic:
switch (insn->imm) {
case BPF_FUNC_map_lookup_elem:
- insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
- __bpf_call_base;
+ insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
continue;
case BPF_FUNC_map_update_elem:
- insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
- __bpf_call_base;
+ insn->imm = BPF_CALL_IMM(ops->map_update_elem);
continue;
case BPF_FUNC_map_delete_elem:
- insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
- __bpf_call_base;
+ insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
continue;
case BPF_FUNC_map_push_elem:
- insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
- __bpf_call_base;
+ insn->imm = BPF_CALL_IMM(ops->map_push_elem);
continue;
case BPF_FUNC_map_pop_elem:
- insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
- __bpf_call_base;
+ insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
continue;
case BPF_FUNC_map_peek_elem:
- insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
- __bpf_call_base;
+ insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
continue;
case BPF_FUNC_redirect_map:
- insn->imm = BPF_CAST_CALL(ops->map_redirect) -
- __bpf_call_base;
+ insn->imm = BPF_CALL_IMM(ops->map_redirect);
continue;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 8afa8690d288..570b0c97392a 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -6574,22 +6574,29 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
- /* Don't associate the sock with unrelated interrupted task's cgroup. */
- if (in_interrupt())
- return;
+ struct cgroup *cgroup;
rcu_read_lock();
+ /* Don't associate the sock with unrelated interrupted task's cgroup. */
+ if (in_interrupt()) {
+ cgroup = &cgrp_dfl_root.cgrp;
+ cgroup_get(cgroup);
+ goto out;
+ }
+
while (true) {
struct css_set *cset;
cset = task_css_set(current);
if (likely(cgroup_tryget(cset->dfl_cgrp))) {
- skcd->cgroup = cset->dfl_cgrp;
- cgroup_bpf_get(cset->dfl_cgrp);
+ cgroup = cset->dfl_cgrp;
break;
}
cpu_relax();
}
+out:
+ skcd->cgroup = cgroup;
+ cgroup_bpf_get(cgroup);
rcu_read_unlock();
}
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index df1ccf4558f8..2a9695ccb65f 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -311,17 +311,19 @@ static struct cpuset top_cpuset = {
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
/*
- * There are two global locks guarding cpuset structures - cpuset_mutex and
+ * There are two global locks guarding cpuset structures - cpuset_rwsem and
* callback_lock. We also require taking task_lock() when dereferencing a
* task's cpuset pointer. See "The task_lock() exception", at the end of this
- * comment.
+ * comment. The cpuset code uses only cpuset_rwsem write lock. Other
+ * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
+ * prevent change to cpuset structures.
*
* A task must hold both locks to modify cpusets. If a task holds
- * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
+ * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
* is the only task able to also acquire callback_lock and be able to
* modify cpusets. It can perform various checks on the cpuset structure
* first, knowing nothing will change. It can also allocate memory while
- * just holding cpuset_mutex. While it is performing these checks, various
+ * just holding cpuset_rwsem. While it is performing these checks, various
* callback routines can briefly acquire callback_lock to query cpusets.
* Once it is ready to make the changes, it takes callback_lock, blocking
* everyone else.
@@ -393,7 +395,7 @@ static inline bool is_in_v2_mode(void)
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
*
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
*/
static void guarantee_online_cpus(struct task_struct *tsk,
struct cpumask *pmask)
@@ -435,7 +437,7 @@ out_unlock:
* One way or another, we guarantee to return some non-empty subset
* of node_states[N_MEMORY].
*
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
*/
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
@@ -447,7 +449,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
*/
static void cpuset_update_task_spread_flag(struct cpuset *cs,
struct task_struct *tsk)
@@ -468,7 +470,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set. Call holding cpuset_mutex.
+ * are only set if the other's are set. Call holding cpuset_rwsem.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -577,7 +579,7 @@ static inline void free_cpuset(struct cpuset *cs)
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
- * cpuset_mutex held.
+ * cpuset_rwsem held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
@@ -700,7 +702,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
rcu_read_unlock();
}
-/* Must be called with cpuset_mutex held. */
+/* Must be called with cpuset_rwsem held. */
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
@@ -726,7 +728,7 @@ static inline int nr_cpusets(void)
* domains when operating in the severe memory shortage situations
* that could cause allocation failures below.
*
- * Must be called with cpuset_mutex held.
+ * Must be called with cpuset_rwsem held.
*
* The three key local variables below are:
* cp - cpuset pointer, used (together with pos_css) to perform a
@@ -1005,7 +1007,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
* 'cpus' is removed, then call this routine to rebuild the
* scheduler's dynamic sched domains.
*
- * Call with cpuset_mutex held. Takes cpus_read_lock().
+ * Call with cpuset_rwsem held. Takes cpus_read_lock().
*/
static void rebuild_sched_domains_locked(void)
{
@@ -1078,7 +1080,7 @@ void rebuild_sched_domains(void)
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its cpus_allowed to the
- * effective cpuset's. As this function is called with cpuset_mutex held,
+ * effective cpuset's. As this function is called with cpuset_rwsem held,
* cpuset membership stays stable.
*/
static void update_tasks_cpumask(struct cpuset *cs)
@@ -1347,7 +1349,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
*
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
*
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
*/
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
{
@@ -1704,12 +1706,12 @@ static void *cpuset_being_rebound;
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its mems_allowed to the
- * effective cpuset's. As this function is called with cpuset_mutex held,
+ * effective cpuset's. As this function is called with cpuset_rwsem held,
* cpuset membership stays stable.
*/
static void update_tasks_nodemask(struct cpuset *cs)
{
- static nodemask_t newmems; /* protected by cpuset_mutex */
+ static nodemask_t newmems; /* protected by cpuset_rwsem */
struct css_task_iter it;
struct task_struct *task;
@@ -1722,7 +1724,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
* take while holding tasklist_lock. Forks can happen - the
* mpol_dup() cpuset_being_rebound check will catch such forks,
* and rebind their vma mempolicies too. Because we still hold
- * the global cpuset_mutex, we know that no other rebind effort
+ * the global cpuset_rwsem, we know that no other rebind effort
* will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
@@ -1768,7 +1770,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
*
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
*
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
*/
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
{
@@ -1821,7 +1823,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* mempolicies and if the cpuset is marked 'memory_migrate',
* migrate the tasks pages to the new memory.
*
- * Call with cpuset_mutex held. May take callback_lock during call.
+ * Call with cpuset_rwsem held. May take callback_lock during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
@@ -1911,7 +1913,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
* @cs: the cpuset in which each task's spread flags needs to be changed
*
* Iterate through each task of @cs updating its spread flags. As this
- * function is called with cpuset_mutex held, cpuset membership stays
+ * function is called with cpuset_rwsem held, cpuset membership stays
* stable.
*/
static void update_tasks_flags(struct cpuset *cs)
@@ -1931,7 +1933,7 @@ static void update_tasks_flags(struct cpuset *cs)
* cs: the cpuset to update
* turning_on: whether the flag is being set or cleared
*
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -1980,7 +1982,7 @@ out:
* cs: the cpuset to update
* new_prs: new partition root state
*
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
*/
static int update_prstate(struct cpuset *cs, int new_prs)
{
@@ -2167,7 +2169,7 @@ static int fmeter_getrate(struct fmeter *fmp)
static struct cpuset *cpuset_attach_old_cs;
-/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
+/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
@@ -2219,7 +2221,7 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
}
/*
- * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
+ * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach()
* but we can't allocate it dynamically there. Define it global and
* allocate from cpuset_init().
*/
@@ -2227,7 +2229,7 @@ static cpumask_var_t cpus_attach;
static void cpuset_attach(struct cgroup_taskset *tset)
{
- /* static buf protected by cpuset_mutex */
+ /* static buf protected by cpuset_rwsem */
static nodemask_t cpuset_attach_nodemask_to;
struct task_struct *task;
struct task_struct *leader;
@@ -2417,7 +2419,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
* operation like this one can lead to a deadlock through kernfs
* active_ref protection. Let's break the protection. Losing the
* protection is okay as we check whether @cs is online after
- * grabbing cpuset_mutex anyway. This only happens on the legacy
+ * grabbing cpuset_rwsem anyway. This only happens on the legacy
* hierarchies.
*/
css_get(&cs->css);
@@ -3672,7 +3674,7 @@ void __cpuset_memory_pressure_bump(void)
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
- * and we take cpuset_mutex, keeping cpuset_attach() from changing it
+ * and we take cpuset_rwsem, keeping cpuset_attach() from changing it
* anyway.
*/
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
diff --git a/kernel/cred.c b/kernel/cred.c
index f784e08c2fbd..1ae0b4948a5a 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -225,8 +225,6 @@ struct cred *cred_alloc_blank(void)
#ifdef CONFIG_DEBUG_CREDENTIALS
new->magic = CRED_MAGIC;
#endif
- new->ucounts = get_ucounts(&init_ucounts);
-
if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
goto error;
@@ -501,7 +499,7 @@ int commit_creds(struct cred *new)
inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
rcu_assign_pointer(task->real_cred, new);
rcu_assign_pointer(task->cred, new);
- if (new->user != old->user)
+ if (new->user != old->user || new->user_ns != old->user_ns)
dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
alter_cred_subscribers(old, -2);
@@ -669,7 +667,7 @@ int set_cred_ucounts(struct cred *new)
{
struct task_struct *task = current;
const struct cred *old = task->real_cred;
- struct ucounts *old_ucounts = new->ucounts;
+ struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
if (new->user == old->user && new->user_ns == old->user_ns)
return 0;
@@ -681,9 +679,10 @@ int set_cred_ucounts(struct cred *new)
if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
return 0;
- if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid)))
+ if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
return -EAGAIN;
+ new->ucounts = new_ucounts;
if (old_ucounts)
put_ucounts(old_ucounts);
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 6c90c69e5311..7a14ca29c377 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -552,7 +552,7 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
* Wrapper function for adding an entry to the hash.
* This function takes care of locking itself.
*/
-static void add_dma_entry(struct dma_debug_entry *entry)
+static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
{
struct hash_bucket *bucket;
unsigned long flags;
@@ -566,8 +566,9 @@ static void add_dma_entry(struct dma_debug_entry *entry)
if (rc == -ENOMEM) {
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
- } else if (rc == -EEXIST) {
- pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
+ } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ err_printk(entry->dev, entry,
+ "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
}
}
@@ -1190,7 +1191,8 @@ void debug_dma_map_single(struct device *dev, const void *addr,
EXPORT_SYMBOL(debug_dma_map_single);
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
- size_t size, int direction, dma_addr_t dma_addr)
+ size_t size, int direction, dma_addr_t dma_addr,
+ unsigned long attrs)
{
struct dma_debug_entry *entry;
@@ -1221,7 +1223,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
check_for_illegal_area(dev, addr, size);
}
- add_dma_entry(entry);
+ add_dma_entry(entry, attrs);
}
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -1279,7 +1281,8 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
}
void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction)
+ int nents, int mapped_ents, int direction,
+ unsigned long attrs)
{
struct dma_debug_entry *entry;
struct scatterlist *s;
@@ -1288,6 +1291,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (unlikely(dma_debug_disabled()))
return;
+ for_each_sg(sg, s, nents, i) {
+ check_for_stack(dev, sg_page(s), s->offset);
+ if (!PageHighMem(sg_page(s)))
+ check_for_illegal_area(dev, sg_virt(s), s->length);
+ }
+
for_each_sg(sg, s, mapped_ents, i) {
entry = dma_entry_alloc();
if (!entry)
@@ -1303,15 +1312,9 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
entry->sg_call_ents = nents;
entry->sg_mapped_ents = mapped_ents;
- check_for_stack(dev, sg_page(s), s->offset);
-
- if (!PageHighMem(sg_page(s))) {
- check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
- }
-
check_sg_segment(dev, s);
- add_dma_entry(entry);
+ add_dma_entry(entry, attrs);
}
}
@@ -1367,7 +1370,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
}
void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt)
+ dma_addr_t dma_addr, void *virt,
+ unsigned long attrs)
{
struct dma_debug_entry *entry;
@@ -1397,7 +1401,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
else
entry->pfn = page_to_pfn(virt_to_page(virt));
- add_dma_entry(entry);
+ add_dma_entry(entry, attrs);
}
void debug_dma_free_coherent(struct device *dev, size_t size,
@@ -1428,7 +1432,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
}
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
- int direction, dma_addr_t dma_addr)
+ int direction, dma_addr_t dma_addr,
+ unsigned long attrs)
{
struct dma_debug_entry *entry;
@@ -1448,7 +1453,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
entry->direction = direction;
entry->map_err_type = MAP_ERR_NOT_CHECKED;
- add_dma_entry(entry);
+ add_dma_entry(entry, attrs);
}
void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
diff --git a/kernel/dma/debug.h b/kernel/dma/debug.h
index 83643b3010b2..f525197d3cae 100644
--- a/kernel/dma/debug.h
+++ b/kernel/dma/debug.h
@@ -11,26 +11,30 @@
#ifdef CONFIG_DMA_API_DEBUG
extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
- int direction, dma_addr_t dma_addr);
+ int direction, dma_addr_t dma_addr,
+ unsigned long attrs);
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction);
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction);
+ int nents, int mapped_ents, int direction,
+ unsigned long attrs);
extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir);
extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt);
+ dma_addr_t dma_addr, void *virt,
+ unsigned long attrs);
extern void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr);
extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
size_t size, int direction,
- dma_addr_t dma_addr);
+ dma_addr_t dma_addr,
+ unsigned long attrs);
extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction);
@@ -53,7 +57,8 @@ extern void debug_dma_sync_sg_for_device(struct device *dev,
#else /* CONFIG_DMA_API_DEBUG */
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
- int direction, dma_addr_t dma_addr)
+ int direction, dma_addr_t dma_addr,
+ unsigned long attrs)
{
}
@@ -63,7 +68,8 @@ static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
}
static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction)
+ int nents, int mapped_ents, int direction,
+ unsigned long attrs)
{
}
@@ -74,7 +80,8 @@ static inline void debug_dma_unmap_sg(struct device *dev,
}
static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt)
+ dma_addr_t dma_addr, void *virt,
+ unsigned long attrs)
{
}
@@ -85,7 +92,8 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
size_t size, int direction,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr,
+ unsigned long attrs)
{
}
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 7ee5284bff58..8349a9f2c345 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -156,7 +156,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
- debug_dma_map_page(dev, page, offset, size, dir, addr);
+ debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
return addr;
}
@@ -195,7 +195,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
ents = ops->map_sg(dev, sg, nents, dir, attrs);
if (ents > 0)
- debug_dma_map_sg(dev, sg, nents, ents, dir);
+ debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
ents != -EIO))
return -EIO;
@@ -206,7 +206,8 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
/**
* dma_map_sg_attrs - Map the given buffer for DMA
* @dev: The device for which to perform the DMA operation
- * @sg: The sg_table object describing the buffer
+ * @sg: The sg_table object describing the buffer
+ * @nents: Number of entries to map
* @dir: DMA direction
* @attrs: Optional DMA attributes for the map operation
*
@@ -248,12 +249,12 @@ EXPORT_SYMBOL(dma_map_sg_attrs);
* Returns 0 on success or a negative error code on error. The following
* error codes are supported with the given meaning:
*
- * -EINVAL - An invalid argument, unaligned access or other error
- * in usage. Will not succeed if retried.
- * -ENOMEM - Insufficient resources (like memory or IOVA space) to
- * complete the mapping. Should succeed if retried later.
- * -EIO - Legacy error code with an unknown meaning. eg. this is
- * returned if a lower level call returned DMA_MAPPING_ERROR.
+ * -EINVAL An invalid argument, unaligned access or other error
+ * in usage. Will not succeed if retried.
+ * -ENOMEM Insufficient resources (like memory or IOVA space) to
+ * complete the mapping. Should succeed if retried later.
+ * -EIO Legacy error code with an unknown meaning. eg. this is
+ * returned if a lower level call returned DMA_MAPPING_ERROR.
*/
int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
@@ -304,7 +305,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
else if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
- debug_dma_map_resource(dev, phys_addr, size, dir, addr);
+ debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
return addr;
}
EXPORT_SYMBOL(dma_map_resource);
@@ -509,7 +510,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
else
return NULL;
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
return cpu_addr;
}
EXPORT_SYMBOL(dma_alloc_attrs);
@@ -565,7 +566,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
if (page)
- debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
+ debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
return page;
}
EXPORT_SYMBOL_GPL(dma_alloc_pages);
@@ -643,7 +644,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
if (sgt) {
sgt->nents = 1;
- debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir);
+ debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
}
return sgt;
}
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index bf16395b9e13..d5a61d565ad5 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -171,10 +171,8 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
handle_signal_work(regs, ti_work);
- if (ti_work & _TIF_NOTIFY_RESUME) {
+ if (ti_work & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(NULL, regs);
- }
/* Architecture specific TIF work */
arch_exit_to_user_mode_work(regs, ti_work);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 349f80aa9e7d..ef29882c9dc4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3707,6 +3707,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
return 0;
}
+static inline bool event_update_userpage(struct perf_event *event)
+{
+ if (likely(!atomic_read(&event->mmap_count)))
+ return false;
+
+ perf_event_update_time(event);
+ perf_set_shadow_time(event, event->ctx);
+ perf_event_update_userpage(event);
+
+ return true;
+}
+
+static inline void group_update_userpage(struct perf_event *group_event)
+{
+ struct perf_event *event;
+
+ if (!event_update_userpage(group_event))
+ return;
+
+ for_each_sibling_event(event, group_event)
+ event_update_userpage(event);
+}
+
static int merge_sched_in(struct perf_event *event, void *data)
{
struct perf_event_context *ctx = event->ctx;
@@ -3725,14 +3748,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
}
if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ *can_add_hw = 0;
if (event->attr.pinned) {
perf_cgroup_event_disable(event, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+ } else {
+ ctx->rotate_necessary = 1;
+ perf_mux_hrtimer_restart(cpuctx);
+ group_update_userpage(event);
}
-
- *can_add_hw = 0;
- ctx->rotate_necessary = 1;
- perf_mux_hrtimer_restart(cpuctx);
}
return 0;
@@ -6324,6 +6348,8 @@ accounting:
ring_buffer_attach(event, rb);
+ perf_event_update_time(event);
+ perf_set_shadow_time(event, event->ctx);
perf_event_init_userpage(event);
perf_event_update_userpage(event);
} else {
@@ -10193,7 +10219,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
return;
if (ifh->nr_file_filters) {
- mm = get_task_mm(event->ctx->task);
+ mm = get_task_mm(task);
if (!mm)
goto restart;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 19e83e9b723c..4d8fc65cf38f 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -136,7 +136,7 @@ EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
* Allocates and initializes an irq_domain structure.
* Returns pointer to IRQ domain, or NULL on failure.
*/
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data)
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
index 4ba15088e640..88191f6e252c 100644
--- a/kernel/locking/rwbase_rt.c
+++ b/kernel/locking/rwbase_rt.c
@@ -41,6 +41,12 @@
* The risk of writer starvation is there, but the pathological use cases
* which trigger it are not necessarily the typical RT workloads.
*
+ * Fast-path orderings:
+ * The lock/unlock of readers can run in fast paths: lock and unlock are only
+ * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
+ * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
+ * and _release() (or stronger).
+ *
* Common code shared between RT rw_semaphore and rwlock
*/
@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
* set.
*/
for (r = atomic_read(&rwb->readers); r < 0;) {
+ /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
return 1;
}
@@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
/*
* rwb->readers can only hit 0 when a writer is waiting for the
* active readers to leave the critical section.
+ *
+ * dec_and_test() is fully ordered, provides RELEASE.
*/
if (unlikely(atomic_dec_and_test(&rwb->readers)))
__rwbase_read_unlock(rwb, state);
@@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
- atomic_add(READER_BIAS - bias, &rwb->readers);
+ /*
+ * _release() is needed in case that reader is in fast path, pairing
+ * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
+ */
+ (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
rwbase_rtmutex_unlock(rtm);
}
@@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
}
+static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
+{
+ /* Can do without CAS because we're serialized by wait_lock. */
+ lockdep_assert_held(&rwb->rtmutex.wait_lock);
+
+ /*
+ * _acquire is needed in case the reader is in the fast path, pairing
+ * with rwbase_read_unlock(), provides ACQUIRE.
+ */
+ if (!atomic_read_acquire(&rwb->readers)) {
+ atomic_set(&rwb->readers, WRITER_BIAS);
+ return 1;
+ }
+
+ return 0;
+}
+
static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
unsigned int state)
{
@@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
- /*
- * set_current_state() for rw_semaphore
- * current_save_and_set_rtlock_wait_state() for rwlock
- */
- rwbase_set_and_save_current_state(state);
+ if (__rwbase_write_trylock(rwb))
+ goto out_unlock;
- /* Block until all readers have left the critical section. */
- for (; atomic_read(&rwb->readers);) {
+ rwbase_set_and_save_current_state(state);
+ for (;;) {
/* Optimized out for rwlocks */
if (rwbase_signal_pending_state(state, current)) {
- __set_current_state(TASK_RUNNING);
+ rwbase_restore_current_state();
__rwbase_write_unlock(rwb, 0, flags);
return -EINTR;
}
+
+ if (__rwbase_write_trylock(rwb))
+ break;
+
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+ rwbase_schedule();
+ raw_spin_lock_irqsave(&rtm->wait_lock, flags);
- /*
- * Schedule and wait for the readers to leave the critical
- * section. The last reader leaving it wakes the waiter.
- */
- if (atomic_read(&rwb->readers) != 0)
- rwbase_schedule();
set_current_state(state);
- raw_spin_lock_irqsave(&rtm->wait_lock, flags);
}
-
- atomic_set(&rwb->readers, WRITER_BIAS);
rwbase_restore_current_state();
+
+out_unlock:
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 0;
}
@@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
- if (!atomic_read(&rwb->readers)) {
- atomic_set(&rwb->readers, WRITER_BIAS);
+ if (__rwbase_write_trylock(rwb)) {
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 1;
}
diff --git a/kernel/module.c b/kernel/module.c
index 40ec9a030eec..5c26a76e800b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4489,8 +4489,10 @@ static void cfi_init(struct module *mod)
/* Fix init/exit functions to point to the CFI jump table */
if (init)
mod->init = *init;
+#ifdef CONFIG_MODULE_UNLOAD
if (exit)
mod->exit = *exit;
+#endif
cfi_module_add(mod, module_addr_min);
#endif
diff --git a/kernel/rseq.c b/kernel/rseq.c
index 35f7bd0fced0..6d45ac3dae7f 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -282,9 +282,17 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
if (unlikely(t->flags & PF_EXITING))
return;
- ret = rseq_ip_fixup(regs);
- if (unlikely(ret < 0))
- goto error;
+
+ /*
+ * regs is NULL if and only if the caller is in a syscall path. Skip
+ * fixup and leave rseq_cs as is so that rseq_sycall() will detect and
+ * kill a misbehaving userspace on debug kernels.
+ */
+ if (regs) {
+ ret = rseq_ip_fixup(regs);
+ if (unlikely(ret < 0))
+ goto error;
+ }
if (unlikely(rseq_update_cpu_id(t)))
goto error;
return;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 49716228efb4..17a653b67006 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -173,16 +173,22 @@ static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[16];
+ unsigned int scaling;
if (cnt > 15)
cnt = 15;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
+ buf[cnt] = '\0';
- if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
+ if (kstrtouint(buf, 10, &scaling))
return -EINVAL;
+ if (scaling >= SCHED_TUNABLESCALING_END)
+ return -EINVAL;
+
+ sysctl_sched_tunable_scaling = scaling;
if (sched_update_scaling())
return -EINVAL;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ff69f245b939..f6a05d9b5443 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4936,8 +4936,12 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
/* update hierarchical throttle state */
walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
- if (!cfs_rq->load.weight)
+ /* Nothing to run but something to decay (on_list)? Complete the branch */
+ if (!cfs_rq->load.weight) {
+ if (cfs_rq->on_list)
+ goto unthrottle_throttle;
return;
+ }
task_delta = cfs_rq->h_nr_running;
idle_task_delta = cfs_rq->idle_h_nr_running;
diff --git a/kernel/signal.c b/kernel/signal.c
index 952741f6d0f9..487bf4f5dadf 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -426,22 +426,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
*/
rcu_read_lock();
ucounts = task_ucounts(t);
- sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
- switch (sigpending) {
- case 1:
- if (likely(get_ucounts(ucounts)))
- break;
- fallthrough;
- case LONG_MAX:
- /*
- * we need to decrease the ucount in the userns tree on any
- * failure to avoid counts leaking.
- */
- dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
- rcu_read_unlock();
- return NULL;
- }
+ sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
rcu_read_unlock();
+ if (!sigpending)
+ return NULL;
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
@@ -450,8 +438,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
}
if (unlikely(q == NULL)) {
- if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
- put_ucounts(ucounts);
+ dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
} else {
INIT_LIST_HEAD(&q->list);
q->flags = sigqueue_flags;
@@ -464,8 +451,8 @@ static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
- if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
- put_ucounts(q->ucounts);
+ if (q->ucounts) {
+ dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
q->ucounts = NULL;
}
kmem_cache_free(sigqueue_cachep, q);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index ee736861b18f..643d412ac623 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1404,7 +1404,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
}
}
- *newval += now;
+ if (*newval)
+ *newval += now;
}
/*
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index c221e4c3f625..fa91f398f28b 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1605,6 +1605,14 @@ static int blk_trace_remove_queue(struct request_queue *q)
if (bt == NULL)
return -EINVAL;
+ if (bt->trace_state == Blktrace_running) {
+ bt->trace_state = Blktrace_stopped;
+ spin_lock_irq(&running_trace_lock);
+ list_del_init(&bt->running_list);
+ spin_unlock_irq(&running_trace_lock);
+ relay_flush(bt->rchan);
+ }
+
put_probe_ref();
synchronize_rcu();
blk_trace_free(bt);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 067e88c3d2ee..6b3153841a33 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -398,7 +398,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
.arg2_type = ARG_CONST_SIZE,
};
-const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
+static void __set_printk_clr_event(void)
{
/*
* This program might be calling bpf_trace_printk,
@@ -410,11 +410,57 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
*/
if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
pr_warn_ratelimited("could not enable bpf_trace_printk events");
+}
+const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
+{
+ __set_printk_clr_event();
return &bpf_trace_printk_proto;
}
-#define MAX_SEQ_PRINTF_VARARGS 12
+BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
+ u32, data_len)
+{
+ static char buf[BPF_TRACE_PRINTK_SIZE];
+ unsigned long flags;
+ int ret, num_args;
+ u32 *bin_args;
+
+ if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
+ (data_len && !data))
+ return -EINVAL;
+ num_args = data_len / 8;
+
+ ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
+ if (ret < 0)
+ return ret;
+
+ raw_spin_lock_irqsave(&trace_printk_lock, flags);
+ ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
+
+ trace_bpf_trace_printk(buf);
+ raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
+
+ bpf_bprintf_cleanup();
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_trace_vprintk_proto = {
+ .func = bpf_trace_vprintk,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
+ .arg4_type = ARG_CONST_SIZE_OR_ZERO,
+};
+
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
+{
+ __set_printk_clr_event();
+ return &bpf_trace_vprintk_proto;
+}
BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
const void *, data, u32, data_len)
@@ -422,7 +468,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
int err, num_args;
u32 *bin_args;
- if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 ||
+ if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
(data_len && !data))
return -EINVAL;
num_args = data_len / 8;
@@ -1162,6 +1208,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_func_ip_proto_tracing;
case BPF_FUNC_get_branch_snapshot:
return &bpf_get_branch_snapshot_proto;
+ case BPF_FUNC_trace_vprintk:
+ return bpf_get_trace_vprintk_proto();
default:
return bpf_base_func_proto(func_id);
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7efbc8aaf7f6..635fbdc9d589 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -6977,7 +6977,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op;
int bit;
- bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
+ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
if (bit < 0)
return;
@@ -7052,7 +7052,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
{
int bit;
- bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
+ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
if (bit < 0)
return;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7896d30d90f7..bc677cd64224 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1744,16 +1744,15 @@ void latency_fsnotify(struct trace_array *tr)
irq_work_queue(&tr->fsnotify_irqwork);
}
-/*
- * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- * defined(CONFIG_FSNOTIFY)
- */
-#else
+#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+ || defined(CONFIG_OSNOISE_TRACER)
#define trace_create_maxlat_file(tr, d_tracer) \
trace_create_file("tracing_max_latency", 0644, d_tracer, \
&tr->max_latency, &tracing_max_lat_fops)
+#else
+#define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
#endif
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -9473,9 +9472,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
create_trace_options_dir(tr);
-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
trace_create_maxlat_file(tr, d_tracer);
-#endif
if (ftrace_create_function_files(tr, d_tracer))
MEM_FAIL(1, "Could not allocate function filter files");
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
index 3044b762cbd7..c4a15aef36af 100644
--- a/kernel/trace/trace_eprobe.c
+++ b/kernel/trace/trace_eprobe.c
@@ -119,10 +119,58 @@ static bool eprobe_dyn_event_match(const char *system, const char *event,
int argc, const char **argv, struct dyn_event *ev)
{
struct trace_eprobe *ep = to_trace_eprobe(ev);
+ const char *slash;
- return strcmp(trace_probe_name(&ep->tp), event) == 0 &&
- (!system || strcmp(trace_probe_group_name(&ep->tp), system) == 0) &&
- trace_probe_match_command_args(&ep->tp, argc, argv);
+ /*
+ * We match the following:
+ * event only - match all eprobes with event name
+ * system and event only - match all system/event probes
+ *
+ * The below has the above satisfied with more arguments:
+ *
+ * attached system/event - If the arg has the system and event
+ * the probe is attached to, match
+ * probes with the attachment.
+ *
+ * If any more args are given, then it requires a full match.
+ */
+
+ /*
+ * If system exists, but this probe is not part of that system
+ * do not match.
+ */
+ if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0)
+ return false;
+
+ /* Must match the event name */
+ if (strcmp(trace_probe_name(&ep->tp), event) != 0)
+ return false;
+
+ /* No arguments match all */
+ if (argc < 1)
+ return true;
+
+ /* First argument is the system/event the probe is attached to */
+
+ slash = strchr(argv[0], '/');
+ if (!slash)
+ slash = strchr(argv[0], '.');
+ if (!slash)
+ return false;
+
+ if (strncmp(ep->event_system, argv[0], slash - argv[0]))
+ return false;
+ if (strcmp(ep->event_name, slash + 1))
+ return false;
+
+ argc--;
+ argv++;
+
+ /* If there are no other args, then match */
+ if (argc < 1)
+ return true;
+
+ return trace_probe_match_command_args(&ep->tp, argc, argv);
}
static struct dyn_event_operations eprobe_dyn_event_ops = {
@@ -632,6 +680,13 @@ static int disable_eprobe(struct trace_eprobe *ep,
trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file);
+
+ /* Make sure nothing is using the edata or trigger */
+ tracepoint_synchronize_unregister();
+
+ kfree(edata);
+ kfree(trigger);
+
return 0;
}
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index a6061a69aa84..f01e442716e2 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2506,7 +2506,7 @@ find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
* events. However, for convenience, users are allowed to directly
* specify an event field in an action, which will be automatically
* converted into a variable on their behalf.
-
+ *
* If a user specifies a field on an event that isn't the event the
* histogram currently being defined (the target event histogram), the
* only way that can be accomplished is if a new hist trigger is
diff --git a/kernel/ucount.c b/kernel/ucount.c
index bb51849e6375..eb03f3c68375 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -284,6 +284,55 @@ bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
return (new == 0);
}
+static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
+ struct ucounts *last, enum ucount_type type)
+{
+ struct ucounts *iter, *next;
+ for (iter = ucounts; iter != last; iter = next) {
+ long dec = atomic_long_add_return(-1, &iter->ucount[type]);
+ WARN_ON_ONCE(dec < 0);
+ next = iter->ns->ucounts;
+ if (dec == 0)
+ put_ucounts(iter);
+ }
+}
+
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type)
+{
+ do_dec_rlimit_put_ucounts(ucounts, NULL, type);
+}
+
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
+{
+ /* Caller must hold a reference to ucounts */
+ struct ucounts *iter;
+ long dec, ret = 0;
+
+ for (iter = ucounts; iter; iter = iter->ns->ucounts) {
+ long max = READ_ONCE(iter->ns->ucount_max[type]);
+ long new = atomic_long_add_return(1, &iter->ucount[type]);
+ if (new < 0 || new > max)
+ goto unwind;
+ if (iter == ucounts)
+ ret = new;
+ /*
+ * Grab an extra ucount reference for the caller when
+ * the rlimit count was previously 0.
+ */
+ if (new != 1)
+ continue;
+ if (!get_ucounts(iter))
+ goto dec_unwind;
+ }
+ return ret;
+dec_unwind:
+ dec = atomic_long_add_return(-1, &iter->ucount[type]);
+ WARN_ON_ONCE(dec < 0);
+unwind:
+ do_dec_rlimit_put_ucounts(ucounts, iter, type);
+ return 0;
+}
+
bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
{
struct ucounts *iter;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 33a6b4a2443d..1b3eb1e9531f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4830,8 +4830,16 @@ void show_workqueue_state(void)
for_each_pwq(pwq, wq) {
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
- if (pwq->nr_active || !list_empty(&pwq->inactive_works))
+ if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+ /*
+ * Defer printing to avoid deadlocks in console
+ * drivers that queue work while holding locks
+ * also taken in their write paths.
+ */
+ printk_deferred_enter();
show_pwq(pwq);
+ printk_deferred_exit();
+ }
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
@@ -4849,7 +4857,12 @@ void show_workqueue_state(void)
raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
-
+ /*
+ * Defer printing to avoid deadlocks in console drivers that
+ * queue work while holding locks also taken in their write
+ * paths.
+ */
+ printk_deferred_enter();
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" hung=%us workers=%d",
@@ -4864,6 +4877,7 @@ void show_workqueue_state(void)
first = false;
}
pr_cont("\n");
+ printk_deferred_exit();
next_pool:
raw_spin_unlock_irqrestore(&pool->lock, flags);
/*
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d566f601780f..2a9b6dcdac4f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -346,7 +346,7 @@ config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
- default 1536 if (!64BIT && PARISC)
+ default 1536 if (!64BIT && (PARISC || XTENSA))
default 1024 if (!64BIT && !PARISC)
default 2048 if 64BIT
help
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 1e2d10f86011..cdc842d090db 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -66,6 +66,7 @@ choice
config KASAN_GENERIC
bool "Generic mode"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
@@ -86,6 +87,7 @@ config KASAN_GENERIC
config KASAN_SW_TAGS
bool "Software tag-based mode"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
diff --git a/lib/Makefile b/lib/Makefile
index 5efd1b435a37..a841be5244ac 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -351,7 +351,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
-CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f2d50d69a6c3..755c10c5138c 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1972,3 +1972,39 @@ int import_single_range(int rw, void __user *buf, size_t len,
return 0;
}
EXPORT_SYMBOL(import_single_range);
+
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ * iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
+{
+ if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
+ !iov_iter_is_kvec(i))
+ return;
+ i->iov_offset = state->iov_offset;
+ i->count = state->count;
+ /*
+ * For the *vec iters, nr_segs + iov is constant - if we increment
+ * the vec, then we also decrement the nr_segs count. Hence we don't
+ * need to track both of these, just one is enough and we can deduct
+ * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+ * size, so we can just increment the iov pointer as they are unionzed.
+ * ITER_BVEC _may_ be the same size on some archs, but on others it is
+ * not. Be safe and handle it separately.
+ */
+ BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+ if (iov_iter_is_bvec(i))
+ i->bvec -= state->nr_segs - i->nr_segs;
+ else
+ i->iov -= state->nr_segs - i->nr_segs;
+ i->nr_segs = state->nr_segs;
+}
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index cdbe54b16501..e14a18af573d 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -116,8 +116,8 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
/* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
if (IS_ERR_OR_NULL(to_free))
return;
- kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
- (void *)to_free);
+ kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL,
+ (void *)to_free);
}
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
diff --git a/lib/packing.c b/lib/packing.c
index 6ed72dccfdb5..9a72f4bbf0e2 100644
--- a/lib/packing.c
+++ b/lib/packing.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#include <linux/packing.h>
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index 2d3eb1cb73b8..ce39ce9f3526 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
return pci_iomap_wc_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+ iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
#endif /* CONFIG_PCI */
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0018d51b93b0..b9fc330fc83b 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -52,6 +52,7 @@
#define FLAG_NO_DATA BIT(0)
#define FLAG_EXPECTED_FAIL BIT(1)
#define FLAG_SKB_FRAG BIT(2)
+#define FLAG_VERIFIER_ZEXT BIT(3)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -80,6 +81,7 @@ struct bpf_test {
int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
__u8 frag_data[MAX_DATA];
int stack_depth; /* for eBPF only, since tests don't call verifier */
+ int nr_testruns; /* Custom run count, defaults to MAX_TESTRUNS if 0 */
};
/* Large test cases need separate allocation and fill handler. */
@@ -461,41 +463,2520 @@ static int bpf_fill_stxdw(struct bpf_test *self)
return __bpf_fill_stxdw(self, BPF_DW);
}
-static int bpf_fill_long_jmp(struct bpf_test *self)
+static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
{
- unsigned int len = BPF_MAXINSNS;
- struct bpf_insn *insn;
+ struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)};
+
+ memcpy(insns, tmp, sizeof(tmp));
+ return 2;
+}
+
+/*
+ * Branch conversion tests. Complex operations can expand to a lot
+ * of instructions when JITed. This in turn may cause jump offsets
+ * to overflow the field size of the native instruction, triggering
+ * a branch conversion mechanism in some JITs.
+ */
+static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
+{
+ struct bpf_insn *insns;
+ int len = S16_MAX + 5;
int i;
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ i = __bpf_ld_imm64(insns, R1, 0x0123456789abcdefULL);
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX);
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 2);
+ insns[i++] = BPF_EXIT_INSN();
+
+ while (i < len - 1) {
+ static const int ops[] = {
+ BPF_LSH, BPF_RSH, BPF_ARSH, BPF_ADD,
+ BPF_SUB, BPF_MUL, BPF_DIV, BPF_MOD,
+ };
+ int op = ops[(i >> 1) % ARRAY_SIZE(ops)];
+
+ if (i & 1)
+ insns[i++] = BPF_ALU32_REG(op, R0, R1);
+ else
+ insns[i++] = BPF_ALU64_REG(op, R0, R1);
+ }
+
+ insns[i++] = BPF_EXIT_INSN();
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* Branch taken by runtime decision */
+static int bpf_fill_max_jmp_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
+}
+
+/* Branch not taken by runtime decision */
+static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
+}
+
+/* Branch always taken, known at JIT time */
+static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JGE, 0);
+}
+
+/* Branch never taken, known at JIT time */
+static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JLT, 0);
+}
+
+/* ALU result computation used in tests */
+static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
+{
+ *res = 0;
+ switch (op) {
+ case BPF_MOV:
+ *res = v2;
+ break;
+ case BPF_AND:
+ *res = v1 & v2;
+ break;
+ case BPF_OR:
+ *res = v1 | v2;
+ break;
+ case BPF_XOR:
+ *res = v1 ^ v2;
+ break;
+ case BPF_LSH:
+ *res = v1 << v2;
+ break;
+ case BPF_RSH:
+ *res = v1 >> v2;
+ break;
+ case BPF_ARSH:
+ *res = v1 >> v2;
+ if (v2 > 0 && v1 > S64_MAX)
+ *res |= ~0ULL << (64 - v2);
+ break;
+ case BPF_ADD:
+ *res = v1 + v2;
+ break;
+ case BPF_SUB:
+ *res = v1 - v2;
+ break;
+ case BPF_MUL:
+ *res = v1 * v2;
+ break;
+ case BPF_DIV:
+ if (v2 == 0)
+ return false;
+ *res = div64_u64(v1, v2);
+ break;
+ case BPF_MOD:
+ if (v2 == 0)
+ return false;
+ div64_u64_rem(v1, v2, res);
+ break;
+ }
+ return true;
+}
+
+/* Test an ALU shift operation for all valid shift values */
+static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
+ u8 mode, bool alu32)
+{
+ static const s64 regs[] = {
+ 0x0123456789abcdefLL, /* dword > 0, word < 0 */
+ 0xfedcba9876543210LL, /* dowrd < 0, word > 0 */
+ 0xfedcba0198765432LL, /* dowrd < 0, word < 0 */
+ 0x0123458967abcdefLL, /* dword > 0, word > 0 */
+ };
+ int bits = alu32 ? 32 : 64;
+ int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3;
+ struct bpf_insn *insn;
+ int imm, k;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (k = 0; k < ARRAY_SIZE(regs); k++) {
+ s64 reg = regs[k];
+
+ i += __bpf_ld_imm64(&insn[i], R3, reg);
+
+ for (imm = 0; imm < bits; imm++) {
+ u64 val;
+
+ /* Perform operation */
+ insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
+ if (alu32) {
+ if (mode == BPF_K)
+ insn[i++] = BPF_ALU32_IMM(op, R1, imm);
+ else
+ insn[i++] = BPF_ALU32_REG(op, R1, R2);
+
+ if (op == BPF_ARSH)
+ reg = (s32)reg;
+ else
+ reg = (u32)reg;
+ __bpf_alu_result(&val, reg, imm, op);
+ val = (u32)val;
+ } else {
+ if (mode == BPF_K)
+ insn[i++] = BPF_ALU64_IMM(op, R1, imm);
+ else
+ insn[i++] = BPF_ALU64_REG(op, R1, R2);
+ __bpf_alu_result(&val, reg, imm, op);
+ }
+
+ /*
+ * When debugging a JIT that fails this test, one
+ * can write the immediate value to R0 here to find
+ * out which operand values that fail.
+ */
+
+ /* Load reference and check the result */
+ i += __bpf_ld_imm64(&insn[i], R4, val);
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+static int bpf_fill_alu64_lsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_rsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_arsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_lsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false);
+}
+
+static int bpf_fill_alu64_rsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false);
+}
+
+static int bpf_fill_alu64_arsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false);
+}
+
+static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
+}
+
+/*
+ * Test an ALU register shift operation for all valid shift values
+ * for the case when the source and destination are the same.
+ */
+static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
+ bool alu32)
+{
+ int bits = alu32 ? 32 : 64;
+ int len = 3 + 6 * bits;
+ struct bpf_insn *insn;
+ int i = 0;
+ u64 val;
+
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
- insn[0] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
- insn[1] = BPF_JMP_IMM(BPF_JEQ, R0, 1, len - 2 - 1);
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (val = 0; val < bits; val++) {
+ u64 res;
+
+ /* Perform operation */
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val);
+ if (alu32)
+ insn[i++] = BPF_ALU32_REG(op, R1, R1);
+ else
+ insn[i++] = BPF_ALU64_REG(op, R1, R1);
+
+ /* Compute the reference result */
+ __bpf_alu_result(&res, val, val, op);
+ if (alu32)
+ res = (u32)res;
+ i += __bpf_ld_imm64(&insn[i], R2, res);
+
+ /* Check the actual result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true);
+}
+
+/*
+ * Common operand pattern generator for exhaustive power-of-two magnitudes
+ * tests. The block size parameters can be adjusted to increase/reduce the
+ * number of combinatons tested and thereby execution speed and memory
+ * footprint.
+ */
+
+static inline s64 value(int msb, int delta, int sign)
+{
+ return sign * (1LL << msb) + delta;
+}
+
+static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
+ int dbits, int sbits, int block1, int block2,
+ int (*emit)(struct bpf_test*, void*,
+ struct bpf_insn*, s64, s64))
+{
+ static const int sgn[][2] = {{1, 1}, {1, -1}, {-1, 1}, {-1, -1}};
+ struct bpf_insn *insns;
+ int di, si, bt, db, sb;
+ int count, len, k;
+ int extra = 1 + 2;
+ int i = 0;
+
+ /* Total number of iterations for the two pattern */
+ count = (dbits - 1) * (sbits - 1) * block1 * block1 * ARRAY_SIZE(sgn);
+ count += (max(dbits, sbits) - 1) * block2 * block2 * ARRAY_SIZE(sgn);
+
+ /* Compute the maximum number of insns and allocate the buffer */
+ len = extra + count * (*emit)(self, arg, NULL, 0, 0);
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Add head instruction(s) */
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
/*
- * Fill with a complex 64-bit operation that expands to a lot of
- * instructions on 32-bit JITs. The large jump offset can then
- * overflow the conditional branch field size, triggering a branch
- * conversion mechanism in some JITs.
- *
- * Note: BPF_MAXINSNS of ALU64 MUL is enough to trigger such branch
- * conversion on the 32-bit MIPS JIT. For other JITs, the instruction
- * count and/or operation may need to be modified to trigger the
- * branch conversion.
+ * Pattern 1: all combinations of power-of-two magnitudes and sign,
+ * and with a block of contiguous values around each magnitude.
*/
- for (i = 2; i < len - 1; i++)
- insn[i] = BPF_ALU64_IMM(BPF_MUL, R0, (i << 16) + i);
+ for (di = 0; di < dbits - 1; di++) /* Dst magnitudes */
+ for (si = 0; si < sbits - 1; si++) /* Src magnitudes */
+ for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
+ for (db = -(block1 / 2);
+ db < (block1 + 1) / 2; db++)
+ for (sb = -(block1 / 2);
+ sb < (block1 + 1) / 2; sb++) {
+ s64 dst, src;
+
+ dst = value(di, db, sgn[k][0]);
+ src = value(si, sb, sgn[k][1]);
+ i += (*emit)(self, arg,
+ &insns[i],
+ dst, src);
+ }
+ /*
+ * Pattern 2: all combinations for a larger block of values
+ * for each power-of-two magnitude and sign, where the magnitude is
+ * the same for both operands.
+ */
+ for (bt = 0; bt < max(dbits, sbits) - 1; bt++) /* Magnitude */
+ for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
+ for (db = -(block2 / 2); db < (block2 + 1) / 2; db++)
+ for (sb = -(block2 / 2);
+ sb < (block2 + 1) / 2; sb++) {
+ s64 dst, src;
+
+ dst = value(bt % dbits, db, sgn[k][0]);
+ src = value(bt % sbits, sb, sgn[k][1]);
+ i += (*emit)(self, arg, &insns[i],
+ dst, src);
+ }
+
+ /* Append tail instructions */
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ BUG_ON(i > len);
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = i;
- insn[len - 1] = BPF_EXIT_INSN();
+ return 0;
+}
+
+/*
+ * Block size parameters used in pattern tests below. une as needed to
+ * increase/reduce the number combinations tested, see following examples.
+ * block values per operand MSB
+ * ----------------------------------------
+ * 0 none
+ * 1 (1 << MSB)
+ * 2 (1 << MSB) + [-1, 0]
+ * 3 (1 << MSB) + [-1, 0, 1]
+ */
+#define PATTERN_BLOCK1 1
+#define PATTERN_BLOCK2 5
+
+/* Number of test runs for a pattern test */
+#define NR_PATTERN_RUNS 1
+
+/*
+ * Exhaustive tests of ALU operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the ALU and ALU64 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 7;
+
+ if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ insns[i++] = BPF_ALU64_IMM(op, R1, imm);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 7;
+
+ if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ insns[i++] = BPF_ALU32_IMM(op, R1, imm);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 9;
+
+ if (__bpf_alu_result(&res, dst, src, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ insns[i++] = BPF_ALU64_REG(op, R1, R2);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 9;
+
+ if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ insns[i++] = BPF_ALU32_REG(op, R1, R2);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu64_imm);
+}
+
+static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu32_imm);
+}
+
+static int __bpf_fill_alu64_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu64_reg);
+}
+
+static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu32_reg);
+}
+
+/* ALU64 immediate operations */
+static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MOD);
+}
+
+/* ALU32 immediate operations */
+static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MOD);
+}
+
+/* ALU64 register operations */
+static int bpf_fill_alu64_mov_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MOD);
+}
+
+/* ALU32 register operations */
+static int bpf_fill_alu32_mov_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MOD);
+}
+
+/*
+ * Test JITs that implement complex ALU operations as function
+ * calls, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
+{
+ int len = 2 + 10 * 10;
+ struct bpf_insn *insns;
+ u64 dst, res;
+ int i = 0;
+ u32 imm;
+ int rd;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Operand and result values according to operation */
+ if (alu32)
+ dst = 0x76543210U;
+ else
+ dst = 0x7edcba9876543210ULL;
+ imm = 0x01234567U;
+
+ if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
+ imm &= 31;
+
+ __bpf_alu_result(&res, dst, imm, op);
+
+ if (alu32)
+ res = (u32)res;
+
+ /* Check all operand registers */
+ for (rd = R0; rd <= R9; rd++) {
+ i += __bpf_ld_imm64(&insns[i], rd, dst);
+
+ if (alu32)
+ insns[i++] = BPF_ALU32_IMM(op, rd, imm);
+ else
+ insns[i++] = BPF_ALU64_IMM(op, rd, imm);
+
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res >> 32, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ insns[i++] = BPF_MOV64_IMM(R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* ALU64 K registers */
+static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOV, false);
+}
+
+static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_AND, false);
+}
+
+static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_OR, false);
+}
+
+static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_XOR, false);
+}
+
+static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ADD, false);
+}
+
+static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_SUB, false);
+}
+
+static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MUL, false);
+}
+
+static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_DIV, false);
+}
+
+static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOD, false);
+}
+
+/* ALU32 K registers */
+static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOV, true);
+}
+
+static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_AND, true);
+}
+
+static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_OR, true);
+}
+
+static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_XOR, true);
+}
+
+static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true);
+}
+
+static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ADD, true);
+}
+
+static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_SUB, true);
+}
+
+static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MUL, true);
+}
+
+static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_DIV, true);
+}
+
+static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOD, true);
+}
+
+/*
+ * Test JITs that implement complex ALU operations as function
+ * calls, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
+{
+ int len = 2 + 10 * 10 * 12;
+ u64 dst, src, res, same;
+ struct bpf_insn *insns;
+ int rd, rs;
+ int i = 0;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Operand and result values according to operation */
+ if (alu32) {
+ dst = 0x76543210U;
+ src = 0x01234567U;
+ } else {
+ dst = 0x7edcba9876543210ULL;
+ src = 0x0123456789abcdefULL;
+ }
+
+ if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
+ src &= 31;
+
+ __bpf_alu_result(&res, dst, src, op);
+ __bpf_alu_result(&same, src, src, op);
+
+ if (alu32) {
+ res = (u32)res;
+ same = (u32)same;
+ }
+
+ /* Check all combinations of operand registers */
+ for (rd = R0; rd <= R9; rd++) {
+ for (rs = R0; rs <= R9; rs++) {
+ u64 val = rd == rs ? same : res;
+
+ i += __bpf_ld_imm64(&insns[i], rd, dst);
+ i += __bpf_ld_imm64(&insns[i], rs, src);
+
+ if (alu32)
+ insns[i++] = BPF_ALU32_REG(op, rd, rs);
+ else
+ insns[i++] = BPF_ALU64_REG(op, rd, rs);
+
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val >> 32, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insns[i++] = BPF_MOV64_IMM(R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* ALU64 X register combinations */
+static int bpf_fill_alu64_mov_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOV, false);
+}
+
+static int bpf_fill_alu64_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_AND, false);
+}
+
+static int bpf_fill_alu64_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_OR, false);
+}
+
+static int bpf_fill_alu64_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_XOR, false);
+}
+
+static int bpf_fill_alu64_lsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu64_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ADD, false);
+}
+
+static int bpf_fill_alu64_sub_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_SUB, false);
+}
+
+static int bpf_fill_alu64_mul_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MUL, false);
+}
+
+static int bpf_fill_alu64_div_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_DIV, false);
+}
+
+static int bpf_fill_alu64_mod_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOD, false);
+}
+
+/* ALU32 X register combinations */
+static int bpf_fill_alu32_mov_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOV, true);
+}
+
+static int bpf_fill_alu32_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_AND, true);
+}
+
+static int bpf_fill_alu32_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_OR, true);
+}
+
+static int bpf_fill_alu32_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_XOR, true);
+}
+
+static int bpf_fill_alu32_lsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, true);
+}
+
+static int bpf_fill_alu32_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ADD, true);
+}
+
+static int bpf_fill_alu32_sub_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_SUB, true);
+}
+
+static int bpf_fill_alu32_mul_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MUL, true);
+}
+
+static int bpf_fill_alu32_div_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_DIV, true);
+}
+
+static int bpf_fill_alu32_mod_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOD, true);
+}
+
+/*
+ * Exhaustive tests of atomic operations for all power-of-two operand
+ * magnitudes, both for positive and negative values.
+ */
+
+static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ u64 keep, fetch, res;
+ int i = 0;
+
+ if (!insns)
+ return 21;
+
+ switch (op) {
+ case BPF_XCHG:
+ res = src;
+ break;
+ default:
+ __bpf_alu_result(&res, dst, src, BPF_OP(op));
+ }
+
+ keep = 0x0123456789abcdefULL;
+ if (op & BPF_FETCH)
+ fetch = dst;
+ else
+ fetch = src;
+
+ i += __bpf_ld_imm64(&insns[i], R0, keep);
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ i += __bpf_ld_imm64(&insns[i], R4, fetch);
+ i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+ insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+ insns[i++] = BPF_ATOMIC_OP(BPF_DW, op, R10, R2, -8);
+ insns[i++] = BPF_LDX_MEM(BPF_DW, R1, R10, -8);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ u64 keep, fetch, res;
+ int i = 0;
+
+ if (!insns)
+ return 21;
+
+ switch (op) {
+ case BPF_XCHG:
+ res = src;
+ break;
+ default:
+ __bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
+ }
+
+ keep = 0x0123456789abcdefULL;
+ if (op & BPF_FETCH)
+ fetch = (u32)dst;
+ else
+ fetch = src;
+
+ i += __bpf_ld_imm64(&insns[i], R0, keep);
+ i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ i += __bpf_ld_imm64(&insns[i], R4, fetch);
+ i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+ insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+ insns[i++] = BPF_ATOMIC_OP(BPF_W, op, R10, R2, -4);
+ insns[i++] = BPF_LDX_MEM(BPF_W, R1, R10, -4);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int i = 0;
+
+ if (!insns)
+ return 23;
+
+ i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+
+ /* Result unsuccessful */
+ insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+ insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+ insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ /* Result successful */
+ insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+ insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R3, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int i = 0;
+
+ if (!insns)
+ return 27;
+
+ i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+ i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+
+ /* Result unsuccessful */
+ insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+ insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+ insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+ insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+ insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ /* Result successful */
+ i += __bpf_ld_imm64(&insns[i], R0, dst);
+ insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+ insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+ insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+ insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_fill_atomic64(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ 0, PATTERN_BLOCK2,
+ &__bpf_emit_atomic64);
+}
+
+static int __bpf_fill_atomic32(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ 0, PATTERN_BLOCK2,
+ &__bpf_emit_atomic32);
+}
+
+/* 64-bit atomic operations */
+static int bpf_fill_atomic64_add(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic64_and(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_AND);
+}
+
+static int bpf_fill_atomic64_or(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_OR);
+}
+
+static int bpf_fill_atomic64_xor(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic64_add_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_and_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_or_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xchg(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg64(struct bpf_test *self)
+{
+ return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+ &__bpf_emit_cmpxchg64);
+}
+
+/* 32-bit atomic operations */
+static int bpf_fill_atomic32_add(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic32_and(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_AND);
+}
+
+static int bpf_fill_atomic32_or(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_OR);
+}
+
+static int bpf_fill_atomic32_xor(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic32_add_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_and_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_or_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xchg(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg32(struct bpf_test *self)
+{
+ return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+ &__bpf_emit_cmpxchg32);
+}
+
+/*
+ * Test JITs that implement ATOMIC operations as function calls or
+ * other primitives, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
+{
+ struct bpf_insn *insn;
+ int len = 2 + 34 * 10 * 10;
+ u64 mem, upd, res;
+ int rd, rs, i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ /* Operand and memory values */
+ if (width == BPF_DW) {
+ mem = 0x0123456789abcdefULL;
+ upd = 0xfedcba9876543210ULL;
+ } else { /* BPF_W */
+ mem = 0x01234567U;
+ upd = 0x76543210U;
+ }
+
+ /* Memory updated according to operation */
+ switch (op) {
+ case BPF_XCHG:
+ res = upd;
+ break;
+ case BPF_CMPXCHG:
+ res = mem;
+ break;
+ default:
+ __bpf_alu_result(&res, mem, upd, BPF_OP(op));
+ }
+
+ /* Test all operand registers */
+ for (rd = R0; rd <= R9; rd++) {
+ for (rs = R0; rs <= R9; rs++) {
+ u64 cmp, src;
+
+ /* Initialize value in memory */
+ i += __bpf_ld_imm64(&insn[i], R0, mem);
+ insn[i++] = BPF_STX_MEM(width, R10, R0, -8);
+
+ /* Initialize registers in order */
+ i += __bpf_ld_imm64(&insn[i], R0, ~mem);
+ i += __bpf_ld_imm64(&insn[i], rs, upd);
+ insn[i++] = BPF_MOV64_REG(rd, R10);
+
+ /* Perform atomic operation */
+ insn[i++] = BPF_ATOMIC_OP(width, op, rd, rs, -8);
+ if (op == BPF_CMPXCHG && width == BPF_W)
+ insn[i++] = BPF_ZEXT_REG(R0);
+
+ /* Check R0 register value */
+ if (op == BPF_CMPXCHG)
+ cmp = mem; /* Expect value from memory */
+ else if (R0 == rd || R0 == rs)
+ cmp = 0; /* Aliased, checked below */
+ else
+ cmp = ~mem; /* Expect value to be preserved */
+ if (cmp) {
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
+ (u32)cmp, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ insn[i++] = BPF_ALU64_IMM(BPF_RSH, R0, 32);
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
+ cmp >> 32, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ /* Check source register value */
+ if (rs == R0 && op == BPF_CMPXCHG)
+ src = 0; /* Aliased with R0, checked above */
+ else if (rs == rd && (op == BPF_CMPXCHG ||
+ !(op & BPF_FETCH)))
+ src = 0; /* Aliased with rd, checked below */
+ else if (op == BPF_CMPXCHG)
+ src = upd; /* Expect value to be preserved */
+ else if (op & BPF_FETCH)
+ src = mem; /* Expect fetched value from mem */
+ else /* no fetch */
+ src = upd; /* Expect value to be preserved */
+ if (src) {
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
+ (u32)src, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ insn[i++] = BPF_ALU64_IMM(BPF_RSH, rs, 32);
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
+ src >> 32, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ /* Check destination register value */
+ if (!(rd == R0 && op == BPF_CMPXCHG) &&
+ !(rd == rs && (op & BPF_FETCH))) {
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, rd, R10, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ /* Check value in memory */
+ if (rs != rd) { /* No aliasing */
+ i += __bpf_ld_imm64(&insn[i], R1, res);
+ } else if (op == BPF_XCHG) { /* Aliased, XCHG */
+ insn[i++] = BPF_MOV64_REG(R1, R10);
+ } else if (op == BPF_CMPXCHG) { /* Aliased, CMPXCHG */
+ i += __bpf_ld_imm64(&insn[i], R1, mem);
+ } else { /* Aliased, ALU oper */
+ i += __bpf_ld_imm64(&insn[i], R1, mem);
+ insn[i++] = BPF_ALU64_REG(BPF_OP(op), R1, R10);
+ }
+
+ insn[i++] = BPF_LDX_MEM(width, R0, R10, -8);
+ if (width == BPF_DW)
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+ else /* width == BPF_W */
+ insn[i++] = BPF_JMP32_REG(BPF_JEQ, R0, R1, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insn[i++] = BPF_MOV64_IMM(R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = i;
+ BUG_ON(i > len);
+
+ return 0;
+}
+
+/* 64-bit atomic register tests */
+static int bpf_fill_atomic64_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD);
+}
+
+static int bpf_fill_atomic64_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND);
+}
+
+static int bpf_fill_atomic64_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR);
+}
+
+static int bpf_fill_atomic64_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR);
+}
+
+static int bpf_fill_atomic64_add_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_and_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_or_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xor_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XCHG);
+}
+
+static int bpf_fill_atomic64_cmpxchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_CMPXCHG);
+}
+
+/* 32-bit atomic register tests */
+static int bpf_fill_atomic32_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD);
+}
+
+static int bpf_fill_atomic32_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND);
+}
+
+static int bpf_fill_atomic32_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR);
+}
+
+static int bpf_fill_atomic32_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR);
+}
+
+static int bpf_fill_atomic32_add_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_and_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_or_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xor_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XCHG);
+}
+
+static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_CMPXCHG);
+}
+
+/*
+ * Test the two-instruction 64-bit immediate load operation for all
+ * power-of-two magnitudes of the immediate operand. For each MSB, a block
+ * of immediate values centered around the power-of-two MSB are tested,
+ * both for positive and negative values. The test is designed to verify
+ * the operation for JITs that emit different code depending on the magnitude
+ * of the immediate value. This is often the case if the native instruction
+ * immediate field width is narrower than 32 bits.
+ */
+static int bpf_fill_ld_imm64(struct bpf_test *self)
+{
+ int block = 64; /* Increase for more tests per MSB position */
+ int len = 3 + 8 * 63 * block * 2;
+ struct bpf_insn *insn;
+ int bit, adj, sign;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (bit = 0; bit <= 62; bit++) {
+ for (adj = -block / 2; adj < block / 2; adj++) {
+ for (sign = -1; sign <= 1; sign += 2) {
+ s64 imm = sign * ((1LL << bit) + adj);
+
+ /* Perform operation */
+ i += __bpf_ld_imm64(&insn[i], R1, imm);
+
+ /* Load reference */
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3,
+ (u32)(imm >> 32));
+ insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
+ insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
+
+ /* Check result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = len;
+ BUG_ON(i != len);
return 0;
}
+/*
+ * Exhaustive tests of JMP operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the JMP and JMP32 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+
+static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op)
+{
+ switch (op) {
+ case BPF_JSET:
+ return !!(v1 & v2);
+ case BPF_JEQ:
+ return v1 == v2;
+ case BPF_JNE:
+ return v1 != v2;
+ case BPF_JGT:
+ return (u64)v1 > (u64)v2;
+ case BPF_JGE:
+ return (u64)v1 >= (u64)v2;
+ case BPF_JLT:
+ return (u64)v1 < (u64)v2;
+ case BPF_JLE:
+ return (u64)v1 <= (u64)v2;
+ case BPF_JSGT:
+ return v1 > v2;
+ case BPF_JSGE:
+ return v1 >= v2;
+ case BPF_JSLT:
+ return v1 < v2;
+ case BPF_JSLE:
+ return v1 <= v2;
+ }
+ return false;
+}
+
+static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond(dst, (s32)imm, op);
+ int i = 0;
+
+ insns[i++] = BPF_ALU32_IMM(BPF_MOV, R0, match);
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ insns[i++] = BPF_JMP_IMM(op, R1, imm, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 5 + 1;
+}
+
+static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond((s32)dst, (s32)imm, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ insns[i++] = BPF_JMP32_IMM(op, R1, imm, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 5;
+}
+
+static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond(dst, src, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ insns[i++] = BPF_JMP_REG(op, R1, R2, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 7;
+}
+
+static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond((s32)dst, (s32)src, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ insns[i++] = BPF_JMP32_REG(op, R1, R2, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 7;
+}
+
+static int __bpf_fill_jmp_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp_imm);
+}
+
+static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp32_imm);
+}
+
+static int __bpf_fill_jmp_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp_reg);
+}
+
+static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp32_reg);
+}
+
+/* JMP immediate tests */
+static int bpf_fill_jmp_jset_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSLE);
+}
+
+/* JMP32 immediate tests */
+static int bpf_fill_jmp32_jset_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSLE);
+}
+
+/* JMP register tests */
+static int bpf_fill_jmp_jset_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSLE);
+}
+
+/* JMP32 register tests */
+static int bpf_fill_jmp32_jset_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSLE);
+}
+
+/*
+ * Set up a sequence of staggered jumps, forwards and backwards with
+ * increasing offset. This tests the conversion of relative jumps to
+ * JITed native jumps. On some architectures, for example MIPS, a large
+ * PC-relative jump offset may overflow the immediate field of the native
+ * conditional branch instruction, triggering a conversion to use an
+ * absolute jump instead. Since this changes the jump offsets, another
+ * offset computation pass is necessary, and that may in turn trigger
+ * another branch conversion. This jump sequence is particularly nasty
+ * in that regard.
+ *
+ * The sequence generation is parameterized by size and jump type.
+ * The size must be even, and the expected result is always size + 1.
+ * Below is an example with size=8 and result=9.
+ *
+ * ________________________Start
+ * R0 = 0
+ * R1 = r1
+ * R2 = r2
+ * ,------- JMP +4 * 3______________Preamble: 4 insns
+ * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------.
+ * | | R0 = 8 |
+ * | | JMP +7 * 3 ------------------------.
+ * | ,--------|-----1- if R0 != 5 JMP 7 * 3 + 1 <--------------. | |
+ * | | | R0 = 6 | | |
+ * | | | JMP +5 * 3 ------------------. | |
+ * | | ,------|-----2- if R0 != 3 JMP 6 * 3 + 1 <--------. | | | |
+ * | | | | R0 = 4 | | | | |
+ * | | | | JMP +3 * 3 ------------. | | | |
+ * | | | ,----|-----3- if R0 != 1 JMP 5 * 3 + 1 <--. | | | | | |
+ * | | | | | R0 = 2 | | | | | | |
+ * | | | | | JMP +1 * 3 ------. | | | | | |
+ * | | | | ,--t=====4> if R0 != 0 JMP 4 * 3 + 1 1 2 3 4 5 6 7 8 loc
+ * | | | | | R0 = 1 -1 +2 -3 +4 -5 +6 -7 +8 off
+ * | | | | | JMP -2 * 3 ---' | | | | | | |
+ * | | | | | ,------5- if R0 != 2 JMP 3 * 3 + 1 <-----' | | | | | |
+ * | | | | | | R0 = 3 | | | | | |
+ * | | | | | | JMP -4 * 3 ---------' | | | | |
+ * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------' | | | |
+ * | | | | | | | R0 = 5 | | | |
+ * | | | | | | | JMP -6 * 3 ---------------' | | |
+ * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------' | |
+ * | | | | | | | | R0 = 7 | |
+ * | | Error | | | JMP -8 * 3 ---------------------' |
+ * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------'
+ * | | | | | | | | | R0 = 9__________________Sequence: 3 * size - 1 insns
+ * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn
+ *
+ */
+
+/* The maximum size parameter */
+#define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1)
+
+/* We use a reduced number of iterations to get a reasonable execution time */
+#define NR_STAGGERED_JMP_RUNS 10
+
+static int __bpf_fill_staggered_jumps(struct bpf_test *self,
+ const struct bpf_insn *jmp,
+ u64 r1, u64 r2)
+{
+ int size = self->test[0].result - 1;
+ int len = 4 + 3 * (size + 1);
+ struct bpf_insn *insns;
+ int off, ind;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Preamble */
+ insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+ insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1);
+ insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2);
+ insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2);
+
+ /* Sequence */
+ for (ind = 0, off = size; ind <= size; ind++, off -= 2) {
+ struct bpf_insn *ins = &insns[4 + 3 * ind];
+ int loc;
+
+ if (off == 0)
+ off--;
+
+ loc = abs(off);
+ ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1,
+ 3 * (size - ind) + 1);
+ ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc);
+ ins[2] = *jmp;
+ ins[2].off = 3 * (off - 1);
+ }
+
+ /* Return */
+ insns[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+/* 64-bit unconditional jump */
+static int bpf_fill_staggered_ja(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0, 0);
+}
+
+/* 64-bit immediate jumps */
+static int bpf_fill_staggered_jeq_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 64-bit register jumps */
+static int bpf_fill_staggered_jeq_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
+/* 32-bit immediate jumps */
+static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 32-bit register jumps */
+static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -1951,147 +4432,6 @@ static struct bpf_test tests[] = {
{ },
{ { 0, -1 } }
},
- {
- /*
- * Register (non-)clobbering test, in the case where a 32-bit
- * JIT implements complex ALU64 operations via function calls.
- * If so, the function call must be invisible in the eBPF
- * registers. The JIT must then save and restore relevant
- * registers during the call. The following tests check that
- * the eBPF registers retain their values after such a call.
- */
- "INT: Register clobbering, R1 updated",
- .u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0),
- BPF_ALU32_IMM(BPF_MOV, R1, 123456789),
- BPF_ALU32_IMM(BPF_MOV, R2, 2),
- BPF_ALU32_IMM(BPF_MOV, R3, 3),
- BPF_ALU32_IMM(BPF_MOV, R4, 4),
- BPF_ALU32_IMM(BPF_MOV, R5, 5),
- BPF_ALU32_IMM(BPF_MOV, R6, 6),
- BPF_ALU32_IMM(BPF_MOV, R7, 7),
- BPF_ALU32_IMM(BPF_MOV, R8, 8),
- BPF_ALU32_IMM(BPF_MOV, R9, 9),
- BPF_ALU64_IMM(BPF_DIV, R1, 123456789),
- BPF_JMP_IMM(BPF_JNE, R0, 0, 10),
- BPF_JMP_IMM(BPF_JNE, R1, 1, 9),
- BPF_JMP_IMM(BPF_JNE, R2, 2, 8),
- BPF_JMP_IMM(BPF_JNE, R3, 3, 7),
- BPF_JMP_IMM(BPF_JNE, R4, 4, 6),
- BPF_JMP_IMM(BPF_JNE, R5, 5, 5),
- BPF_JMP_IMM(BPF_JNE, R6, 6, 4),
- BPF_JMP_IMM(BPF_JNE, R7, 7, 3),
- BPF_JMP_IMM(BPF_JNE, R8, 8, 2),
- BPF_JMP_IMM(BPF_JNE, R9, 9, 1),
- BPF_ALU32_IMM(BPF_MOV, R0, 1),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { },
- { { 0, 1 } }
- },
- {
- "INT: Register clobbering, R2 updated",
- .u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0),
- BPF_ALU32_IMM(BPF_MOV, R1, 1),
- BPF_ALU32_IMM(BPF_MOV, R2, 2 * 123456789),
- BPF_ALU32_IMM(BPF_MOV, R3, 3),
- BPF_ALU32_IMM(BPF_MOV, R4, 4),
- BPF_ALU32_IMM(BPF_MOV, R5, 5),
- BPF_ALU32_IMM(BPF_MOV, R6, 6),
- BPF_ALU32_IMM(BPF_MOV, R7, 7),
- BPF_ALU32_IMM(BPF_MOV, R8, 8),
- BPF_ALU32_IMM(BPF_MOV, R9, 9),
- BPF_ALU64_IMM(BPF_DIV, R2, 123456789),
- BPF_JMP_IMM(BPF_JNE, R0, 0, 10),
- BPF_JMP_IMM(BPF_JNE, R1, 1, 9),
- BPF_JMP_IMM(BPF_JNE, R2, 2, 8),
- BPF_JMP_IMM(BPF_JNE, R3, 3, 7),
- BPF_JMP_IMM(BPF_JNE, R4, 4, 6),
- BPF_JMP_IMM(BPF_JNE, R5, 5, 5),
- BPF_JMP_IMM(BPF_JNE, R6, 6, 4),
- BPF_JMP_IMM(BPF_JNE, R7, 7, 3),
- BPF_JMP_IMM(BPF_JNE, R8, 8, 2),
- BPF_JMP_IMM(BPF_JNE, R9, 9, 1),
- BPF_ALU32_IMM(BPF_MOV, R0, 1),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { },
- { { 0, 1 } }
- },
- {
- /*
- * Test 32-bit JITs that implement complex ALU64 operations as
- * function calls R0 = f(R1, R2), and must re-arrange operands.
- */
-#define NUMER 0xfedcba9876543210ULL
-#define DENOM 0x0123456789abcdefULL
- "ALU64_DIV X: Operand register permutations",
- .u.insns_int = {
- /* R0 / R2 */
- BPF_LD_IMM64(R0, NUMER),
- BPF_LD_IMM64(R2, DENOM),
- BPF_ALU64_REG(BPF_DIV, R0, R2),
- BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1),
- BPF_EXIT_INSN(),
- /* R1 / R0 */
- BPF_LD_IMM64(R1, NUMER),
- BPF_LD_IMM64(R0, DENOM),
- BPF_ALU64_REG(BPF_DIV, R1, R0),
- BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1),
- BPF_EXIT_INSN(),
- /* R0 / R1 */
- BPF_LD_IMM64(R0, NUMER),
- BPF_LD_IMM64(R1, DENOM),
- BPF_ALU64_REG(BPF_DIV, R0, R1),
- BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1),
- BPF_EXIT_INSN(),
- /* R2 / R0 */
- BPF_LD_IMM64(R2, NUMER),
- BPF_LD_IMM64(R0, DENOM),
- BPF_ALU64_REG(BPF_DIV, R2, R0),
- BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1),
- BPF_EXIT_INSN(),
- /* R2 / R1 */
- BPF_LD_IMM64(R2, NUMER),
- BPF_LD_IMM64(R1, DENOM),
- BPF_ALU64_REG(BPF_DIV, R2, R1),
- BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1),
- BPF_EXIT_INSN(),
- /* R1 / R2 */
- BPF_LD_IMM64(R1, NUMER),
- BPF_LD_IMM64(R2, DENOM),
- BPF_ALU64_REG(BPF_DIV, R1, R2),
- BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1),
- BPF_EXIT_INSN(),
- /* R1 / R1 */
- BPF_LD_IMM64(R1, NUMER),
- BPF_ALU64_REG(BPF_DIV, R1, R1),
- BPF_JMP_IMM(BPF_JEQ, R1, 1, 1),
- BPF_EXIT_INSN(),
- /* R2 / R2 */
- BPF_LD_IMM64(R2, DENOM),
- BPF_ALU64_REG(BPF_DIV, R2, R2),
- BPF_JMP_IMM(BPF_JEQ, R2, 1, 1),
- BPF_EXIT_INSN(),
- /* R3 / R4 */
- BPF_LD_IMM64(R3, NUMER),
- BPF_LD_IMM64(R4, DENOM),
- BPF_ALU64_REG(BPF_DIV, R3, R4),
- BPF_JMP_IMM(BPF_JEQ, R3, NUMER / DENOM, 1),
- BPF_EXIT_INSN(),
- /* Successful return */
- BPF_LD_IMM64(R0, 1),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { },
- { { 0, 1 } },
-#undef NUMER
-#undef DENOM
- },
#ifdef CONFIG_32BIT
{
"INT: 32-bit context pointer word order and zero-extension",
@@ -5255,6 +7595,67 @@ static struct bpf_test tests[] = {
{ },
{ { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
},
+ {
+ "ALU_END_FROM_BE 64: 0x0123456789abcdef >> 32 -> 0x01234567",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_be64(0x0123456789abcdefLL) >> 32) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_BE, reversed */
+ {
+ "ALU_END_FROM_BE 16: 0xfedcba9876543210 -> 0x3210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be16(0x3210) } },
+ },
+ {
+ "ALU_END_FROM_BE 32: 0xfedcba9876543210 -> 0x76543210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be32(0x76543210) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0xfedcba9876543210 -> 0x76543210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_be64(0xfedcba9876543210ULL) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0xfedcba9876543210 >> 32 -> 0xfedcba98",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_be64(0xfedcba9876543210ULL) >> 32) } },
+ },
/* BPF_ALU | BPF_END | BPF_FROM_LE */
{
"ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
@@ -5292,6 +7693,321 @@ static struct bpf_test tests[] = {
{ },
{ { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
},
+ {
+ "ALU_END_FROM_LE 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_le64(0x0123456789abcdefLL) >> 32) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_LE, reversed */
+ {
+ "ALU_END_FROM_LE 16: 0xfedcba9876543210 -> 0x1032",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le16(0x3210) } },
+ },
+ {
+ "ALU_END_FROM_LE 32: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le32(0x76543210) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_le64(0xfedcba9876543210ULL) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0xfedcba9876543210 >> 32 -> 0x98badcfe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
+ },
+ /* BPF_LDX_MEM B/H/W/DW */
+ {
+ "BPF_LDX_MEM | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R2, 0x0000000000000008ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_B, R0, R10, -1),
+#else
+ BPF_LDX_MEM(BPF_B, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R2, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_B, R0, R10, -1),
+#else
+ BPF_LDX_MEM(BPF_B, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R2, 0x0000000000000708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_H, R0, R10, -2),
+#else
+ BPF_LDX_MEM(BPF_H, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R2, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_H, R0, R10, -2),
+#else
+ BPF_LDX_MEM(BPF_H, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R2, 0x0000000005060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_W, R0, R10, -4),
+#else
+ BPF_LDX_MEM(BPF_W, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R2, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_W, R0, R10, -4),
+#else
+ BPF_LDX_MEM(BPF_W, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ /* BPF_STX_MEM B/H/W/DW */
+ {
+ "BPF_STX_MEM | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d0e008ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_B, R10, R2, -1),
+#else
+ BPF_STX_MEM(BPF_B, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_B, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d0e088ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_B, R10, R2, -1),
+#else
+ BPF_STX_MEM(BPF_B, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d00708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_H, R10, R2, -2),
+#else
+ BPF_STX_MEM(BPF_H, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_H, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d08788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_H, R10, R2, -2),
+#else
+ BPF_STX_MEM(BPF_H, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b005060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_W, R10, R2, -4),
+#else
+ BPF_STX_MEM(BPF_W, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_W, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b085868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_W, R10, R2, -4),
+#else
+ BPF_STX_MEM(BPF_W, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
/* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
{
"ST_MEM_B: Store/Load byte: max negative",
@@ -5529,15 +8245,20 @@ static struct bpf_test tests[] = {
* Individual tests are expanded from template macros for all
* combinations of ALU operation, word size and fetching.
*/
+#define BPF_ATOMIC_POISON(width) ((width) == BPF_W ? (0xbaadf00dULL << 32) : 0)
+
#define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result) \
{ \
"BPF_ATOMIC | " #width ", " #op ": Test: " \
#old " " #logic " " #update " = " #result, \
.u.insns_int = { \
- BPF_ALU32_IMM(BPF_MOV, R5, update), \
+ BPF_LD_IMM64(R5, (update) | BPF_ATOMIC_POISON(width)), \
BPF_ST_MEM(width, R10, -40, old), \
BPF_ATOMIC_OP(width, op, R10, R5, -40), \
BPF_LDX_MEM(width, R0, R10, -40), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
@@ -5551,11 +8272,14 @@ static struct bpf_test tests[] = {
#old " " #logic " " #update " = " #result, \
.u.insns_int = { \
BPF_ALU64_REG(BPF_MOV, R1, R10), \
- BPF_ALU32_IMM(BPF_MOV, R0, update), \
+ BPF_LD_IMM64(R0, (update) | BPF_ATOMIC_POISON(width)), \
BPF_ST_MEM(BPF_W, R10, -40, old), \
BPF_ATOMIC_OP(width, op, R10, R0, -40), \
BPF_ALU64_REG(BPF_MOV, R0, R10), \
BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
@@ -5569,10 +8293,13 @@ static struct bpf_test tests[] = {
#old " " #logic " " #update " = " #result, \
.u.insns_int = { \
BPF_ALU64_REG(BPF_MOV, R0, R10), \
- BPF_ALU32_IMM(BPF_MOV, R1, update), \
+ BPF_LD_IMM64(R1, (update) | BPF_ATOMIC_POISON(width)), \
BPF_ST_MEM(width, R10, -40, old), \
BPF_ATOMIC_OP(width, op, R10, R1, -40), \
BPF_ALU64_REG(BPF_SUB, R0, R10), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
@@ -5585,10 +8312,10 @@ static struct bpf_test tests[] = {
"BPF_ATOMIC | " #width ", " #op ": Test fetch: " \
#old " " #logic " " #update " = " #result, \
.u.insns_int = { \
- BPF_ALU32_IMM(BPF_MOV, R3, update), \
+ BPF_LD_IMM64(R3, (update) | BPF_ATOMIC_POISON(width)), \
BPF_ST_MEM(width, R10, -40, old), \
BPF_ATOMIC_OP(width, op, R10, R3, -40), \
- BPF_ALU64_REG(BPF_MOV, R0, R3), \
+ BPF_ALU32_REG(BPF_MOV, R0, R3), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
@@ -5686,6 +8413,7 @@ static struct bpf_test tests[] = {
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+#undef BPF_ATOMIC_POISON
#undef BPF_ATOMIC_OP_TEST1
#undef BPF_ATOMIC_OP_TEST2
#undef BPF_ATOMIC_OP_TEST3
@@ -5770,7 +8498,7 @@ static struct bpf_test tests[] = {
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
- BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
@@ -5787,7 +8515,7 @@ static struct bpf_test tests[] = {
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
- BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_STX_MEM(BPF_DW, R10, R0, -40),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
@@ -5805,7 +8533,7 @@ static struct bpf_test tests[] = {
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
- BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_ALU64_IMM(BPF_ADD, R0, 1),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
@@ -5823,7 +8551,7 @@ static struct bpf_test tests[] = {
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
- BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_ALU64_IMM(BPF_ADD, R0, 1),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
@@ -5842,11 +8570,11 @@ static struct bpf_test tests[] = {
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
- BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
- BPF_LD_IMM64(R0, 0xfecdba9876543210ULL),
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_REG(BPF_SUB, R0, R2),
BPF_EXIT_INSN(),
@@ -7192,14 +9920,6 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
- { /* Mainly checking JIT here. */
- "BPF_MAXINSNS: Very long conditional jump",
- { },
- INTERNAL | FLAG_NO_DATA,
- { },
- { { 0, 1 } },
- .fill_helper = bpf_fill_long_jmp,
- },
{
"JMP_JA: Jump, gap, jump, ...",
{ },
@@ -8413,6 +11133,2809 @@ static struct bpf_test tests[] = {
{},
{ { 0, 2 } },
},
+ /* BPF_LDX_MEM with operand aliasing */
+ {
+ "LDX_MEM_B: operand register aliasing",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_B, R10, -8, 123),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_B, R0, R0, -8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } },
+ .stack_depth = 8,
+ },
+ {
+ "LDX_MEM_H: operand register aliasing",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_H, R10, -8, 12345),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_H, R0, R0, -8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 12345 } },
+ .stack_depth = 8,
+ },
+ {
+ "LDX_MEM_W: operand register aliasing",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -8, 123456789),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_W, R0, R0, -8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123456789 } },
+ .stack_depth = 8,
+ },
+ {
+ "LDX_MEM_DW: operand register aliasing",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x123456789abcdefULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_DW, R0, R0, -8),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_MOV64_REG(R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU64_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ /*
+ * Register (non-)clobbering tests for the case where a JIT implements
+ * complex ALU or ATOMIC operations via function calls. If so, the
+ * function call must be transparent to the eBPF registers. The JIT
+ * must therefore save and restore relevant registers across the call.
+ * The following tests check that the eBPF registers retain their
+ * values after such an operation. Mainly intended for complex ALU
+ * and atomic operation, but we run it for all. You never know...
+ *
+ * Note that each operations should be tested twice with different
+ * destinations, to check preservation for all registers.
+ */
+#define BPF_TEST_CLOBBER_ALU(alu, op, dst, src) \
+ { \
+ #alu "_" #op " to " #dst ": no clobbering", \
+ .u.insns_int = { \
+ BPF_ALU64_IMM(BPF_MOV, R0, R0), \
+ BPF_ALU64_IMM(BPF_MOV, R1, R1), \
+ BPF_ALU64_IMM(BPF_MOV, R2, R2), \
+ BPF_ALU64_IMM(BPF_MOV, R3, R3), \
+ BPF_ALU64_IMM(BPF_MOV, R4, R4), \
+ BPF_ALU64_IMM(BPF_MOV, R5, R5), \
+ BPF_ALU64_IMM(BPF_MOV, R6, R6), \
+ BPF_ALU64_IMM(BPF_MOV, R7, R7), \
+ BPF_ALU64_IMM(BPF_MOV, R8, R8), \
+ BPF_ALU64_IMM(BPF_MOV, R9, R9), \
+ BPF_##alu(BPF_ ##op, dst, src), \
+ BPF_ALU32_IMM(BPF_MOV, dst, dst), \
+ BPF_JMP_IMM(BPF_JNE, R0, R0, 10), \
+ BPF_JMP_IMM(BPF_JNE, R1, R1, 9), \
+ BPF_JMP_IMM(BPF_JNE, R2, R2, 8), \
+ BPF_JMP_IMM(BPF_JNE, R3, R3, 7), \
+ BPF_JMP_IMM(BPF_JNE, R4, R4, 6), \
+ BPF_JMP_IMM(BPF_JNE, R5, R5, 5), \
+ BPF_JMP_IMM(BPF_JNE, R6, R6, 4), \
+ BPF_JMP_IMM(BPF_JNE, R7, R7, 3), \
+ BPF_JMP_IMM(BPF_JNE, R8, R8, 2), \
+ BPF_JMP_IMM(BPF_JNE, R9, R9, 1), \
+ BPF_ALU64_IMM(BPF_MOV, R0, 1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 1 } } \
+ }
+ /* ALU64 operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R9, 123456789),
+ /* ALU32 immediate operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R9, 123456789),
+ /* ALU64 register operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R9, R1),
+ /* ALU32 register operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R9, R1),
+#undef BPF_TEST_CLOBBER_ALU
+#define BPF_TEST_CLOBBER_ATOMIC(width, op) \
+ { \
+ "Atomic_" #width " " #op ": no clobbering", \
+ .u.insns_int = { \
+ BPF_ALU64_IMM(BPF_MOV, R0, 0), \
+ BPF_ALU64_IMM(BPF_MOV, R1, 1), \
+ BPF_ALU64_IMM(BPF_MOV, R2, 2), \
+ BPF_ALU64_IMM(BPF_MOV, R3, 3), \
+ BPF_ALU64_IMM(BPF_MOV, R4, 4), \
+ BPF_ALU64_IMM(BPF_MOV, R5, 5), \
+ BPF_ALU64_IMM(BPF_MOV, R6, 6), \
+ BPF_ALU64_IMM(BPF_MOV, R7, 7), \
+ BPF_ALU64_IMM(BPF_MOV, R8, 8), \
+ BPF_ALU64_IMM(BPF_MOV, R9, 9), \
+ BPF_ST_MEM(width, R10, -8, \
+ (op) == BPF_CMPXCHG ? 0 : \
+ (op) & BPF_FETCH ? 1 : 0), \
+ BPF_ATOMIC_OP(width, op, R10, R1, -8), \
+ BPF_JMP_IMM(BPF_JNE, R0, 0, 10), \
+ BPF_JMP_IMM(BPF_JNE, R1, 1, 9), \
+ BPF_JMP_IMM(BPF_JNE, R2, 2, 8), \
+ BPF_JMP_IMM(BPF_JNE, R3, 3, 7), \
+ BPF_JMP_IMM(BPF_JNE, R4, 4, 6), \
+ BPF_JMP_IMM(BPF_JNE, R5, 5, 5), \
+ BPF_JMP_IMM(BPF_JNE, R6, 6, 4), \
+ BPF_JMP_IMM(BPF_JNE, R7, 7, 3), \
+ BPF_JMP_IMM(BPF_JNE, R8, 8, 2), \
+ BPF_JMP_IMM(BPF_JNE, R9, 9, 1), \
+ BPF_ALU64_IMM(BPF_MOV, R0, 1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 1 } }, \
+ .stack_depth = 8, \
+ }
+ /* 64-bit atomic operations, register clobbering */
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XCHG),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_CMPXCHG),
+ /* 32-bit atomic operations, register clobbering */
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XCHG),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_CMPXCHG),
+#undef BPF_TEST_CLOBBER_ATOMIC
+ /* Checking that ALU32 src is not zero extended in place */
+#define BPF_ALU32_SRC_ZEXT(op) \
+ { \
+ "ALU32_" #op "_X: src preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R1, 0x0123456789acbdefULL),\
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),\
+ BPF_ALU64_REG(BPF_MOV, R0, R1), \
+ BPF_ALU32_REG(BPF_##op, R2, R1), \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ }
+ BPF_ALU32_SRC_ZEXT(MOV),
+ BPF_ALU32_SRC_ZEXT(AND),
+ BPF_ALU32_SRC_ZEXT(OR),
+ BPF_ALU32_SRC_ZEXT(XOR),
+ BPF_ALU32_SRC_ZEXT(ADD),
+ BPF_ALU32_SRC_ZEXT(SUB),
+ BPF_ALU32_SRC_ZEXT(MUL),
+ BPF_ALU32_SRC_ZEXT(DIV),
+ BPF_ALU32_SRC_ZEXT(MOD),
+#undef BPF_ALU32_SRC_ZEXT
+ /* Checking that ATOMIC32 src is not zero extended in place */
+#define BPF_ATOMIC32_SRC_ZEXT(op) \
+ { \
+ "ATOMIC_W_" #op ": src preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R0, 0x0123456789acbdefULL), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ST_MEM(BPF_W, R10, -4, 0), \
+ BPF_ATOMIC_OP(BPF_W, BPF_##op, R10, R1, -4), \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ .stack_depth = 8, \
+ }
+ BPF_ATOMIC32_SRC_ZEXT(ADD),
+ BPF_ATOMIC32_SRC_ZEXT(AND),
+ BPF_ATOMIC32_SRC_ZEXT(OR),
+ BPF_ATOMIC32_SRC_ZEXT(XOR),
+#undef BPF_ATOMIC32_SRC_ZEXT
+ /* Checking that CMPXCHG32 src is not zero extended in place */
+ {
+ "ATOMIC_W_CMPXCHG: src preserved in zext",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789acbdefULL),
+ BPF_ALU64_REG(BPF_MOV, R2, R1),
+ BPF_ALU64_REG(BPF_MOV, R0, 0),
+ BPF_ST_MEM(BPF_W, R10, -4, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R1, -4),
+ BPF_ALU64_REG(BPF_SUB, R1, R2),
+ BPF_ALU64_REG(BPF_MOV, R2, R1),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_REG(BPF_OR, R1, R2),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ /* Checking that JMP32 immediate src is not zero extended in place */
+#define BPF_JMP32_IMM_ZEXT(op) \
+ { \
+ "JMP32_" #op "_K: operand preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_JMP32_IMM(BPF_##op, R0, 1234, 1), \
+ BPF_JMP_A(0), /* Nop */ \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ }
+ BPF_JMP32_IMM_ZEXT(JEQ),
+ BPF_JMP32_IMM_ZEXT(JNE),
+ BPF_JMP32_IMM_ZEXT(JSET),
+ BPF_JMP32_IMM_ZEXT(JGT),
+ BPF_JMP32_IMM_ZEXT(JGE),
+ BPF_JMP32_IMM_ZEXT(JLT),
+ BPF_JMP32_IMM_ZEXT(JLE),
+ BPF_JMP32_IMM_ZEXT(JSGT),
+ BPF_JMP32_IMM_ZEXT(JSGE),
+ BPF_JMP32_IMM_ZEXT(JSGT),
+ BPF_JMP32_IMM_ZEXT(JSLT),
+ BPF_JMP32_IMM_ZEXT(JSLE),
+#undef BPF_JMP2_IMM_ZEXT
+ /* Checking that JMP32 dst & src are not zero extended in place */
+#define BPF_JMP32_REG_ZEXT(op) \
+ { \
+ "JMP32_" #op "_X: operands preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
+ BPF_LD_IMM64(R1, 0xfedcba9876543210ULL),\
+ BPF_ALU64_REG(BPF_MOV, R2, R0), \
+ BPF_ALU64_REG(BPF_MOV, R3, R1), \
+ BPF_JMP32_IMM(BPF_##op, R0, R1, 1), \
+ BPF_JMP_A(0), /* Nop */ \
+ BPF_ALU64_REG(BPF_SUB, R0, R2), \
+ BPF_ALU64_REG(BPF_SUB, R1, R3), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ }
+ BPF_JMP32_REG_ZEXT(JEQ),
+ BPF_JMP32_REG_ZEXT(JNE),
+ BPF_JMP32_REG_ZEXT(JSET),
+ BPF_JMP32_REG_ZEXT(JGT),
+ BPF_JMP32_REG_ZEXT(JGE),
+ BPF_JMP32_REG_ZEXT(JLT),
+ BPF_JMP32_REG_ZEXT(JLE),
+ BPF_JMP32_REG_ZEXT(JSGT),
+ BPF_JMP32_REG_ZEXT(JSGE),
+ BPF_JMP32_REG_ZEXT(JSGT),
+ BPF_JMP32_REG_ZEXT(JSLT),
+ BPF_JMP32_REG_ZEXT(JSLE),
+#undef BPF_JMP2_REG_ZEXT
+ /* ALU64 K register combinations */
+ {
+ "ALU64_MOV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_imm_regs,
+ },
+ {
+ "ALU64_AND_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_imm_regs,
+ },
+ {
+ "ALU64_OR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_imm_regs,
+ },
+ {
+ "ALU64_XOR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_imm_regs,
+ },
+ {
+ "ALU64_LSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_imm_regs,
+ },
+ {
+ "ALU64_RSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_imm_regs,
+ },
+ {
+ "ALU64_ARSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_imm_regs,
+ },
+ {
+ "ALU64_ADD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_imm_regs,
+ },
+ {
+ "ALU64_SUB_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_imm_regs,
+ },
+ {
+ "ALU64_MUL_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_imm_regs,
+ },
+ {
+ "ALU64_DIV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_imm_regs,
+ },
+ {
+ "ALU64_MOD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_imm_regs,
+ },
+ /* ALU32 K registers */
+ {
+ "ALU32_MOV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_imm_regs,
+ },
+ {
+ "ALU32_AND_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_imm_regs,
+ },
+ {
+ "ALU32_OR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_imm_regs,
+ },
+ {
+ "ALU32_XOR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_imm_regs,
+ },
+ {
+ "ALU32_LSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_imm_regs,
+ },
+ {
+ "ALU32_RSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_imm_regs,
+ },
+ {
+ "ALU32_ARSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_imm_regs,
+ },
+ {
+ "ALU32_ADD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_imm_regs,
+ },
+ {
+ "ALU32_SUB_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_imm_regs,
+ },
+ {
+ "ALU32_MUL_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_imm_regs,
+ },
+ {
+ "ALU32_DIV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_imm_regs,
+ },
+ {
+ "ALU32_MOD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_imm_regs,
+ },
+ /* ALU64 X register combinations */
+ {
+ "ALU64_MOV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_reg_pairs,
+ },
+ {
+ "ALU64_AND_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_reg_pairs,
+ },
+ {
+ "ALU64_OR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_reg_pairs,
+ },
+ {
+ "ALU64_XOR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_reg_pairs,
+ },
+ {
+ "ALU64_LSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_reg_pairs,
+ },
+ {
+ "ALU64_RSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_reg_pairs,
+ },
+ {
+ "ALU64_ARSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_reg_pairs,
+ },
+ {
+ "ALU64_ADD_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_reg_pairs,
+ },
+ {
+ "ALU64_SUB_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_reg_pairs,
+ },
+ {
+ "ALU64_MUL_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_reg_pairs,
+ },
+ {
+ "ALU64_DIV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_reg_pairs,
+ },
+ {
+ "ALU64_MOD_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_reg_pairs,
+ },
+ /* ALU32 X register combinations */
+ {
+ "ALU32_MOV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_reg_pairs,
+ },
+ {
+ "ALU32_AND_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_reg_pairs,
+ },
+ {
+ "ALU32_OR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_reg_pairs,
+ },
+ {
+ "ALU32_XOR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_reg_pairs,
+ },
+ {
+ "ALU32_LSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_reg_pairs,
+ },
+ {
+ "ALU32_RSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_reg_pairs,
+ },
+ {
+ "ALU32_ARSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_reg_pairs,
+ },
+ {
+ "ALU32_ADD_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_reg_pairs,
+ },
+ {
+ "ALU32_SUB_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_reg_pairs,
+ },
+ {
+ "ALU32_MUL_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_reg_pairs,
+ },
+ {
+ "ALU32_DIV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_reg_pairs,
+ },
+ {
+ "ALU32_MOD_X register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_reg_pairs,
+ },
+ /* Exhaustive test of ALU64 shift operations */
+ {
+ "ALU64_LSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_imm,
+ },
+ {
+ "ALU64_RSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_imm,
+ },
+ {
+ "ALU64_ARSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_imm,
+ },
+ {
+ "ALU64_LSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_reg,
+ },
+ {
+ "ALU64_RSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_reg,
+ },
+ {
+ "ALU64_ARSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_reg,
+ },
+ /* Exhaustive test of ALU32 shift operations */
+ {
+ "ALU32_LSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_imm,
+ },
+ {
+ "ALU32_RSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_imm,
+ },
+ {
+ "ALU32_ARSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_imm,
+ },
+ {
+ "ALU32_LSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_reg,
+ },
+ {
+ "ALU32_RSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_reg,
+ },
+ {
+ "ALU32_ARSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_reg,
+ },
+ /*
+ * Exhaustive test of ALU64 shift operations when
+ * source and destination register are the same.
+ */
+ {
+ "ALU64_LSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_same_reg,
+ },
+ {
+ "ALU64_RSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_same_reg,
+ },
+ {
+ "ALU64_ARSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_same_reg,
+ },
+ /*
+ * Exhaustive test of ALU32 shift operations when
+ * source and destination register are the same.
+ */
+ {
+ "ALU32_LSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_same_reg,
+ },
+ {
+ "ALU32_RSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_same_reg,
+ },
+ {
+ "ALU32_ARSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_same_reg,
+ },
+ /* ALU64 immediate magnitudes */
+ {
+ "ALU64_MOV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_AND_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_OR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_XOR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_ADD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_SUB_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MUL_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_DIV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MOD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU32 immediate magnitudes */
+ {
+ "ALU32_MOV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_AND_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_OR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_XOR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_ADD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_SUB_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MUL_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_DIV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MOD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU64 register magnitudes */
+ {
+ "ALU64_MOV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_AND_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_OR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_XOR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_ADD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_SUB_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MUL_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_DIV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MOD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU32 register magnitudes */
+ {
+ "ALU32_MOV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_AND_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_OR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_XOR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_ADD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_SUB_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MUL_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_DIV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MOD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* LD_IMM64 immediate magnitudes */
+ {
+ "LD_IMM64: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64,
+ },
+ /* 64-bit ATOMIC register combinations */
+ {
+ "ATOMIC_DW_ADD: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_AND: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_OR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_XOR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_ADD_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_AND_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_OR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_XOR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_XCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_CMPXCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_cmpxchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ /* 32-bit ATOMIC register combinations */
+ {
+ "ATOMIC_W_ADD: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_AND: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_OR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_XOR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_ADD_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_AND_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_OR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_XOR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_XCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_CMPXCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_cmpxchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ /* 64-bit ATOMIC magnitudes */
+ {
+ "ATOMIC_DW_ADD: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_AND: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_OR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_XOR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_ADD_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_AND_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_OR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_XOR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_XCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xchg,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_CMPXCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_cmpxchg64,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* 64-bit atomic magnitudes */
+ {
+ "ATOMIC_W_ADD: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_AND: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_OR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_XOR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_ADD_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_AND_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_OR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_XOR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_XCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xchg,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_CMPXCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_cmpxchg32,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP immediate magnitudes */
+ {
+ "JMP_JSET_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jset_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JEQ_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jeq_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JNE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jne_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jlt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jslt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP register magnitudes */
+ {
+ "JMP_JSET_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jset_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JEQ_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jeq_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JNE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jne_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jlt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jslt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP32 immediate magnitudes */
+ {
+ "JMP32_JSET_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jset_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JEQ_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jeq_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JNE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jne_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jlt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jslt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP32 register magnitudes */
+ {
+ "JMP32_JSET_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jset_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JEQ_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jeq_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JNE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jne_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jlt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jslt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* Conditional jumps with constant decision */
+ {
+ "JMP_JSET_K: imm = 0 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JSET, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLT_K: imm = 0 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JLT, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JGE_K: imm = 0 -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JGE, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGT_K: imm = 0xffffffff -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JGT, R1, U32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLE_K: imm = 0xffffffff -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JLE, R1, U32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP32_JSGT_K: imm = 0x7fffffff -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R1, S32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP32_JSGE_K: imm = -0x80000000 -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R1, S32_MIN, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP32_JSLT_K: imm = -0x80000000 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R1, S32_MIN, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP32_JSLE_K: imm = 0x7fffffff -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R1, S32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JEQ_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JEQ, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JGE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JLE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSGE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSLE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JNE_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JNE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JGT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JGT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JLT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JSGT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSGT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JSLT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSLT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ /* Short relative jumps */
+ {
+ "Short relative jump: offset=0",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=1",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=2",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=3",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 3),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=4",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 4),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ /* Conditional branch conversions */
+ {
+ "Long conditional jump: taken at runtime",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_taken,
+ },
+ {
+ "Long conditional jump: not taken at runtime",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_not_taken,
+ },
+ {
+ "Long conditional jump: always taken, known at JIT time",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_always_taken,
+ },
+ {
+ "Long conditional jump: never taken, known at JIT time",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_never_taken,
+ },
+ /* Staggered jump sequences, immediate */
+ {
+ "Staggered jumps: JMP_JA",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_ja,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JEQ_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JNE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSET_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, register */
+ {
+ "Staggered jumps: JMP_JEQ_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JNE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSET_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, JMP32 immediate */
+ {
+ "Staggered jumps: JMP32_JEQ_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JNE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSET_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, JMP32 register */
+ {
+ "Staggered jumps: JMP32_JEQ_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JNE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSET_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
};
static struct net_device dev;
@@ -8576,6 +14099,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
fp->aux->stack_depth = tests[which].stack_depth;
+ fp->aux->verifier_zext = !!(tests[which].aux &
+ FLAG_VERIFIER_ZEXT);
/* We cannot error here as we don't need type compatibility
* checks.
@@ -8631,6 +14156,9 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
{
int err_cnt = 0, i, runs = MAX_TESTRUNS;
+ if (test->nr_testruns)
+ runs = min(test->nr_testruns, MAX_TESTRUNS);
+
for (i = 0; i < MAX_SUBTESTS; i++) {
void *data;
u64 duration;
@@ -8690,8 +14218,6 @@ static __init int find_test_index(const char *test_name)
static __init int prepare_bpf_tests(void)
{
- int i;
-
if (test_id >= 0) {
/*
* if a test_id was specified, use test_range to
@@ -8735,23 +14261,11 @@ static __init int prepare_bpf_tests(void)
}
}
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (tests[i].fill_helper &&
- tests[i].fill_helper(&tests[i]) < 0)
- return -ENOMEM;
- }
-
return 0;
}
static __init void destroy_bpf_tests(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (tests[i].fill_helper)
- kfree(tests[i].u.ptr.insns);
- }
}
static bool exclude_test(int test_id)
@@ -8956,7 +14470,19 @@ static __init int test_bpf(void)
pr_info("#%d %s ", i, tests[i].descr);
+ if (tests[i].fill_helper &&
+ tests[i].fill_helper(&tests[i]) < 0) {
+ pr_cont("FAIL to prog_fill\n");
+ continue;
+ }
+
fp = generate_filter(i, &err);
+
+ if (tests[i].fill_helper) {
+ kfree(tests[i].u.ptr.insns);
+ tests[i].u.ptr.insns = NULL;
+ }
+
if (fp == NULL) {
if (err == 0) {
pass_cnt++;
@@ -8993,10 +14519,15 @@ static __init int test_bpf(void)
struct tail_call_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
+ int flags;
int result;
int stack_depth;
};
+/* Flags that can be passed to tail call test cases */
+#define FLAG_NEED_STATE BIT(0)
+#define FLAG_RESULT_IN_STATE BIT(1)
+
/*
* Magic marker used in test snippets for tail calls below.
* BPF_LD/MOV to R2 and R2 with this immediate value is replaced
@@ -9017,6 +14548,30 @@ struct tail_call_test {
BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
/*
+ * A test function to be called from a BPF program, clobbering a lot of
+ * CPU registers in the process. A JITed BPF program calling this function
+ * must save and restore any caller-saved registers it uses for internal
+ * state, for example the current tail call count.
+ */
+BPF_CALL_1(bpf_test_func, u64, arg)
+{
+ char buf[64];
+ long a = 0;
+ long b = 1;
+ long c = 2;
+ long d = 3;
+ long e = 4;
+ long f = 5;
+ long g = 6;
+ long h = 7;
+
+ return snprintf(buf, sizeof(buf),
+ "%ld %lu %lx %ld %lu %lx %ld %lu %x",
+ a, b, c, d, e, f, g, h, (int)arg);
+}
+#define BPF_FUNC_test_func __BPF_FUNC_MAX_ID
+
+/*
* Tail call tests. Each test case may call any other test in the table,
* including itself, specified as a relative index offset from the calling
* test. The index TAIL_CALL_NULL can be used to specify a NULL target
@@ -9066,32 +14621,60 @@ static struct tail_call_test tail_call_tests[] = {
{
"Tail call error path, max count reached",
.insns = {
- BPF_ALU64_IMM(BPF_ADD, R1, 1),
- BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ TAIL_CALL(0),
+ BPF_EXIT_INSN(),
+ },
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS,
+ },
+ {
+ "Tail call count preserved across function calls",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_CALL_REL(BPF_FUNC_get_numa_node_id),
+ BPF_CALL_REL(BPF_FUNC_ktime_get_ns),
+ BPF_CALL_REL(BPF_FUNC_ktime_get_boot_ns),
+ BPF_CALL_REL(BPF_FUNC_ktime_get_coarse_ns),
+ BPF_CALL_REL(BPF_FUNC_jiffies64),
+ BPF_CALL_REL(BPF_FUNC_test_func),
+ BPF_LDX_MEM(BPF_DW, R1, R10, -8),
+ BPF_ALU32_REG(BPF_MOV, R0, R1),
TAIL_CALL(0),
BPF_EXIT_INSN(),
},
- .result = MAX_TAIL_CALL_CNT + 1,
+ .stack_depth = 8,
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS,
},
{
"Tail call error path, NULL target",
.insns = {
- BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
TAIL_CALL(TAIL_CALL_NULL),
- BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
- .result = 1,
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = MAX_TESTRUNS,
},
{
"Tail call error path, index out of range",
.insns = {
- BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
TAIL_CALL(TAIL_CALL_INVALID),
- BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
- .result = 1,
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = MAX_TESTRUNS,
},
};
@@ -9147,17 +14730,19 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
/* Relocate runtime tail call offsets and addresses */
for (i = 0; i < len; i++) {
struct bpf_insn *insn = &fp->insnsi[i];
-
- if (insn->imm != TAIL_CALL_MARKER)
- continue;
+ long addr = 0;
switch (insn->code) {
case BPF_LD | BPF_DW | BPF_IMM:
+ if (insn->imm != TAIL_CALL_MARKER)
+ break;
insn[0].imm = (u32)(long)progs;
insn[1].imm = ((u64)(long)progs) >> 32;
break;
case BPF_ALU | BPF_MOV | BPF_K:
+ if (insn->imm != TAIL_CALL_MARKER)
+ break;
if (insn->off == TAIL_CALL_NULL)
insn->imm = ntests;
else if (insn->off == TAIL_CALL_INVALID)
@@ -9165,6 +14750,38 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
else
insn->imm = which + insn->off;
insn->off = 0;
+ break;
+
+ case BPF_JMP | BPF_CALL:
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ break;
+ switch (insn->imm) {
+ case BPF_FUNC_get_numa_node_id:
+ addr = (long)&numa_node_id;
+ break;
+ case BPF_FUNC_ktime_get_ns:
+ addr = (long)&ktime_get_ns;
+ break;
+ case BPF_FUNC_ktime_get_boot_ns:
+ addr = (long)&ktime_get_boot_fast_ns;
+ break;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ addr = (long)&ktime_get_coarse_ns;
+ break;
+ case BPF_FUNC_jiffies64:
+ addr = (long)&get_jiffies_64;
+ break;
+ case BPF_FUNC_test_func:
+ addr = (long)&bpf_test_func;
+ break;
+ default:
+ err = -EFAULT;
+ goto out_err;
+ }
+ *insn = BPF_EMIT_CALL(addr);
+ if ((long)__bpf_call_base + insn->imm != addr)
+ *insn = BPF_JMP_A(0); /* Skip: NOP */
+ break;
}
}
@@ -9197,6 +14814,8 @@ static __init int test_tail_calls(struct bpf_array *progs)
for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
struct tail_call_test *test = &tail_call_tests[i];
struct bpf_prog *fp = progs->ptrs[i];
+ int *data = NULL;
+ int state = 0;
u64 duration;
int ret;
@@ -9213,7 +14832,11 @@ static __init int test_tail_calls(struct bpf_array *progs)
if (fp->jited)
jit_cnt++;
- ret = __run_one(fp, NULL, MAX_TESTRUNS, &duration);
+ if (test->flags & FLAG_NEED_STATE)
+ data = &state;
+ ret = __run_one(fp, data, MAX_TESTRUNS, &duration);
+ if (test->flags & FLAG_RESULT_IN_STATE)
+ ret = state;
if (ret == test->result) {
pr_cont("%lld PASS", duration);
pass_cnt++;
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index f19c4fbe1be7..2843f9bb42ac 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -253,13 +253,12 @@ void inflate_fast(z_streamp strm, unsigned start)
sfrom = (unsigned short *)(from);
loops = len >> 1;
- do
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- *sout++ = *sfrom++;
-#else
- *sout++ = get_unaligned16(sfrom++);
-#endif
- while (--loops);
+ do {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ *sout++ = *sfrom++;
+ else
+ *sout++ = get_unaligned16(sfrom++);
+ } while (--loops);
out = (unsigned char *)sout;
from = (unsigned char *)sfrom;
} else { /* dist == 1 or dist == 2 */
diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h
index 930e83bceef0..4eddcfa73996 100644
--- a/mm/damon/dbgfs-test.h
+++ b/mm/damon/dbgfs-test.h
@@ -20,27 +20,27 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
ssize_t nr_integers = 0, i;
question = "123";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
kfree(answers);
question = "123abc";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
kfree(answers);
question = "a123";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
kfree(answers);
question = "12 35";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
for (i = 0; i < nr_integers; i++)
@@ -48,7 +48,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
kfree(answers);
question = "12 35 46";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers);
for (i = 0; i < nr_integers; i++)
@@ -56,7 +56,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
kfree(answers);
question = "12 35 abc 46";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
for (i = 0; i < 2; i++)
@@ -64,13 +64,13 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
kfree(answers);
question = "";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
kfree(answers);
question = "\n";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
kfree(answers);
diff --git a/mm/debug.c b/mm/debug.c
index e73fe0a8ec3d..fae0f81ad831 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -24,7 +24,9 @@ const char *migrate_reason_names[MR_TYPES] = {
"syscall_or_cpuset",
"mempolicy_mbind",
"numa_misplaced",
- "cma",
+ "contig_range",
+ "longterm_pin",
+ "demotion",
};
const struct trace_print_flags pageflag_names[] = {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5e9ef0fc261e..92192cb086c7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2700,12 +2700,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
if (mapping) {
int nr = thp_nr_pages(head);
- if (PageSwapBacked(head))
+ if (PageSwapBacked(head)) {
__mod_lruvec_page_state(head, NR_SHMEM_THPS,
-nr);
- else
+ } else {
__mod_lruvec_page_state(head, NR_FILE_THPS,
-nr);
+ filemap_nr_thps_dec(mapping);
+ }
}
__split_huge_page(page, list, end);
diff --git a/mm/memblock.c b/mm/memblock.c
index 184dcd2e5d99..5096500b2647 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -932,6 +932,9 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
* covered by the memory map. The struct page representing NOMAP memory
* frames in the memory map will be PageReserved()
*
+ * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
+ * memblock, the caller must inform kmemleak to ignore that memory
+ *
* Return: 0 on success, -errno on failure.
*/
int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
@@ -1687,7 +1690,7 @@ void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
if (!size)
return;
- if (memblock.memory.cnt <= 1) {
+ if (!memblock_memory->total_size) {
pr_warn("%s: No memory registered yet\n", __func__);
return;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b762215d73eb..6da5020a8656 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -106,9 +106,6 @@ static bool do_memsw_account(void)
/* memcg and lruvec stats flushing */
static void flush_memcg_stats_dwork(struct work_struct *w);
static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
-static void flush_memcg_stats_work(struct work_struct *w);
-static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work);
-static DEFINE_PER_CPU(unsigned int, stats_flush_threshold);
static DEFINE_SPINLOCK(stats_flush_lock);
#define THRESHOLDS_EVENTS_TARGET 128
@@ -682,8 +679,6 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
/* Update lruvec */
__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
- if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
- queue_work(system_unbound_wq, &stats_flush_work);
}
/**
@@ -5361,11 +5356,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
}
-static void flush_memcg_stats_work(struct work_struct *w)
-{
- mem_cgroup_flush_stats();
-}
-
static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 54879c339024..3e6449f2102a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -306,6 +306,7 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
struct vm_area_struct *vma)
{
unsigned long address = vma_address(page, vma);
+ unsigned long ret = 0;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
@@ -329,11 +330,10 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
if (pmd_devmap(*pmd))
return PMD_SHIFT;
pte = pte_offset_map(pmd, address);
- if (!pte_present(*pte))
- return 0;
- if (pte_devmap(*pte))
- return PAGE_SHIFT;
- return 0;
+ if (pte_present(*pte) && pte_devmap(*pte))
+ ret = PAGE_SHIFT;
+ pte_unmap(pte);
+ return ret;
}
/*
@@ -1126,7 +1126,7 @@ static int page_action(struct page_state *ps, struct page *p,
*/
static inline bool HWPoisonHandlable(struct page *page)
{
- return PageLRU(page) || __PageMovable(page);
+ return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page);
}
static int __get_hwpoison_page(struct page *page)
diff --git a/mm/memory.c b/mm/memory.c
index 25fc46e87214..adf9b9ef8277 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3403,6 +3403,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
unmap_mapping_range_tree(&mapping->i_mmap, &details);
i_mmap_unlock_write(mapping);
}
+EXPORT_SYMBOL_GPL(unmap_mapping_pages);
/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1592b081c58e..d12e0608fced 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -856,16 +856,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
goto out;
}
- if (flags & MPOL_F_NUMA_BALANCING) {
- if (new && new->mode == MPOL_BIND) {
- new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
- } else {
- ret = -EINVAL;
- mpol_put(new);
- goto out;
- }
- }
-
ret = mpol_set_nodemask(new, nodes, scratch);
if (ret) {
mpol_put(new);
@@ -1458,7 +1448,11 @@ static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
return -EINVAL;
if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
return -EINVAL;
-
+ if (*flags & MPOL_F_NUMA_BALANCING) {
+ if (*mode != MPOL_BIND)
+ return -EINVAL;
+ *flags |= (MPOL_F_MOF | MPOL_F_MORON);
+ }
return 0;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index a6a7743ee98f..1852d787e6ab 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -3066,7 +3066,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
EXPORT_SYMBOL(migrate_vma_finalize);
#endif /* CONFIG_DEVICE_PRIVATE */
-#if defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_HOTPLUG_CPU)
/* Disable reclaim-based migration. */
static void __disable_all_migrate_targets(void)
{
@@ -3209,25 +3209,6 @@ static void set_migration_target_nodes(void)
}
/*
- * React to hotplug events that might affect the migration targets
- * like events that online or offline NUMA nodes.
- *
- * The ordering is also currently dependent on which nodes have
- * CPUs. That means we need CPU on/offline notification too.
- */
-static int migration_online_cpu(unsigned int cpu)
-{
- set_migration_target_nodes();
- return 0;
-}
-
-static int migration_offline_cpu(unsigned int cpu)
-{
- set_migration_target_nodes();
- return 0;
-}
-
-/*
* This leaves migrate-on-reclaim transiently disabled between
* the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs
* whether reclaim-based migration is enabled or not, which
@@ -3239,8 +3220,18 @@ static int migration_offline_cpu(unsigned int cpu)
* set_migration_target_nodes().
*/
static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
- unsigned long action, void *arg)
+ unsigned long action, void *_arg)
{
+ struct memory_notify *arg = _arg;
+
+ /*
+ * Only update the node migration order when a node is
+ * changing status, like online->offline. This avoids
+ * the overhead of synchronize_rcu() in most cases.
+ */
+ if (arg->status_change_nid < 0)
+ return notifier_from_errno(0);
+
switch (action) {
case MEM_GOING_OFFLINE:
/*
@@ -3274,13 +3265,31 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
return notifier_from_errno(0);
}
+/*
+ * React to hotplug events that might affect the migration targets
+ * like events that online or offline NUMA nodes.
+ *
+ * The ordering is also currently dependent on which nodes have
+ * CPUs. That means we need CPU on/offline notification too.
+ */
+static int migration_online_cpu(unsigned int cpu)
+{
+ set_migration_target_nodes();
+ return 0;
+}
+
+static int migration_offline_cpu(unsigned int cpu)
+{
+ set_migration_target_nodes();
+ return 0;
+}
+
static int __init migrate_on_reclaim_init(void)
{
int ret;
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "migrate on reclaim",
- migration_online_cpu,
- migration_offline_cpu);
+ ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
+ NULL, migration_offline_cpu);
/*
* In the unlikely case that this fails, the automatic
* migration targets may become suboptimal for nodes
@@ -3288,9 +3297,12 @@ static int __init migrate_on_reclaim_init(void)
* rare case, do not bother trying to do anything special.
*/
WARN_ON(ret < 0);
+ ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online",
+ migration_online_cpu, NULL);
+ WARN_ON(ret < 0);
hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
return 0;
}
late_initcall(migrate_on_reclaim_init);
-#endif /* CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/mm/page_ext.c b/mm/page_ext.c
index dfb91653d359..2a52fd9ed464 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -269,7 +269,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
total_usage += table_size;
return 0;
}
-#ifdef CONFIG_MEMORY_HOTPLUG
+
static void free_page_ext(void *addr)
{
if (is_vmalloc_addr(addr)) {
@@ -374,8 +374,6 @@ static int __meminit page_ext_callback(struct notifier_block *self,
return notifier_from_errno(ret);
}
-#endif
-
void __init page_ext_init(void)
{
unsigned long pfn;
diff --git a/mm/shmem.c b/mm/shmem.c
index 88742953532c..b5860f4a2738 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -490,9 +490,9 @@ bool shmem_is_huge(struct vm_area_struct *vma,
case SHMEM_HUGE_ALWAYS:
return true;
case SHMEM_HUGE_WITHIN_SIZE:
- index = round_up(index, HPAGE_PMD_NR);
+ index = round_up(index + 1, HPAGE_PMD_NR);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
+ if (i_size >> PAGE_SHIFT >= index)
return true;
fallthrough;
case SHMEM_HUGE_ADVISE:
diff --git a/mm/slab.c b/mm/slab.c
index d0f725637663..874b3f8fe80d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1095,7 +1095,7 @@ static int slab_offline_cpu(unsigned int cpu)
return 0;
}
-#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_NUMA)
/*
* Drains freelist for a node on each slab cache, used for memory hot-remove.
* Returns -EBUSY if all objects cannot be drained so that the node is not
@@ -1157,7 +1157,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
out:
return notifier_from_errno(ret);
}
-#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_NUMA */
/*
* swap the static kmem_cache_node with kmalloced memory
diff --git a/mm/slub.c b/mm/slub.c
index 3d2025f7163b..d8f77346376d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1701,7 +1701,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
}
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
- void **head, void **tail)
+ void **head, void **tail,
+ int *cnt)
{
void *object;
@@ -1728,6 +1729,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
*head = object;
if (!*tail)
*tail = object;
+ } else {
+ /*
+ * Adjust the reconstructed freelist depth
+ * accordingly if object's reuse is delayed.
+ */
+ --(*cnt);
}
} while (object != old_tail);
@@ -3413,7 +3420,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
struct kmem_cache_cpu *c;
unsigned long tid;
- memcg_slab_free_hook(s, &head, 1);
+ /* memcg_slab_free_hook() is already called for bulk free. */
+ if (!tail)
+ memcg_slab_free_hook(s, &head, 1);
redo:
/*
* Determine the currently cpus per cpu slab.
@@ -3480,7 +3489,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
* With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed.
*/
- if (slab_free_freelist_hook(s, &head, &tail))
+ if (slab_free_freelist_hook(s, &head, &tail, &cnt))
do_slab_free(s, page, head, tail, cnt, addr);
}
@@ -4203,8 +4212,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
if (alloc_kmem_cache_cpus(s))
return 0;
- free_kmem_cache_nodes(s);
error:
+ __kmem_cache_release(s);
return -EINVAL;
}
@@ -4880,13 +4889,15 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
return 0;
err = sysfs_slab_add(s);
- if (err)
+ if (err) {
__kmem_cache_release(s);
+ return err;
+ }
if (s->flags & SLAB_STORE_USER)
debugfs_slab_add(s);
- return err;
+ return 0;
}
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
@@ -6108,9 +6119,14 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
struct kmem_cache *s = file_inode(filep)->i_private;
unsigned long *obj_map;
+ if (!t)
+ return -ENOMEM;
+
obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
- if (!obj_map)
+ if (!obj_map) {
+ seq_release_private(inode, filep);
return -ENOMEM;
+ }
if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
alloc = TRACK_ALLOC;
@@ -6119,6 +6135,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
bitmap_free(obj_map);
+ seq_release_private(inode, filep);
return -ENOMEM;
}
diff --git a/mm/swap.c b/mm/swap.c
index 897200d27dd0..af3cad4e5378 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -620,7 +620,6 @@ void lru_add_drain_cpu(int cpu)
pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
activate_page_drain(cpu);
- invalidate_bh_lrus_cpu(cpu);
}
/**
@@ -703,6 +702,20 @@ void lru_add_drain(void)
local_unlock(&lru_pvecs.lock);
}
+/*
+ * It's called from per-cpu workqueue context in SMP case so
+ * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
+ * the same cpu. It shouldn't be a problem in !SMP case since
+ * the core is only one and the locks will disable preemption.
+ */
+static void lru_add_and_bh_lrus_drain(void)
+{
+ local_lock(&lru_pvecs.lock);
+ lru_add_drain_cpu(smp_processor_id());
+ local_unlock(&lru_pvecs.lock);
+ invalidate_bh_lrus_cpu();
+}
+
void lru_add_drain_cpu_zone(struct zone *zone)
{
local_lock(&lru_pvecs.lock);
@@ -717,7 +730,7 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
- lru_add_drain();
+ lru_add_and_bh_lrus_drain();
}
/*
@@ -858,7 +871,7 @@ void lru_cache_disable(void)
*/
__lru_add_drain_all(true);
#else
- lru_add_drain();
+ lru_add_and_bh_lrus_drain();
#endif
}
diff --git a/mm/util.c b/mm/util.c
index 499b6b5767ed..bacabe446906 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -787,7 +787,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
- int new_policy;
+ int new_policy = -1;
int ret;
/*
@@ -805,7 +805,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
t = *table;
t.data = &new_policy;
ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
- if (ret)
+ if (ret || new_policy == -1)
return ret;
mm_compute_batch(new_policy);
diff --git a/mm/workingset.c b/mm/workingset.c
index d4268d8e9a82..d5b81e4f4cbe 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -352,6 +352,7 @@ void workingset_refault(struct page *page, void *shadow)
inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
+ mem_cgroup_flush_stats();
/*
* Compare the distance to the existing workingset size. We
* don't activate pages that couldn't stay resident even if
diff --git a/net/802/hippi.c b/net/802/hippi.c
index f80b33a8f7e0..887e73d520e4 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -121,7 +121,7 @@ int hippi_mac_addr(struct net_device *dev, void *p)
struct sockaddr *addr = p;
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev_addr_set(dev, addr->sa_data);
return 0;
}
EXPORT_SYMBOL(hippi_mac_addr);
diff --git a/net/802/p8022.c b/net/802/p8022.c
index a6585627051d..79c23173116c 100644
--- a/net/802/p8022.c
+++ b/net/802/p8022.c
@@ -23,7 +23,7 @@
#include <net/p8022.h>
static int p8022_request(struct datalink_proto *dl, struct sk_buff *skb,
- unsigned char *dest)
+ const unsigned char *dest)
{
llc_build_and_send_ui_pkt(dl->sap, skb, dest, dl->sap->laddr.lsap);
return 0;
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 4492e8d7ad20..1406bfdbda13 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -79,7 +79,7 @@ drop:
* Put a SNAP header on a frame and pass to 802.2
*/
static int snap_request(struct datalink_proto *dl,
- struct sk_buff *skb, u8 *dest)
+ struct sk_buff *skb, const u8 *dest)
{
memcpy(skb_push(skb, 5), dl->type, 5);
llc_build_and_send_ui_pkt(snap_sap, skb, dest, snap_sap->laddr.lsap);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 0c21d1fec852..90330b893134 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -250,7 +250,7 @@ bool vlan_dev_inherit_address(struct net_device *dev,
if (dev->addr_assign_type != NET_ADDR_STOLEN)
return false;
- ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ eth_hw_addr_set(dev, real_dev->dev_addr);
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return true;
}
@@ -349,7 +349,7 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
dev_uc_del(real_dev, dev->dev_addr);
out:
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -586,7 +586,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->dev_id = real_dev->dev_id;
if (is_zero_ether_addr(dev->dev_addr)) {
- ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ eth_hw_addr_set(dev, real_dev->dev_addr);
dev->addr_assign_type = NET_ADDR_STOLEN;
}
if (is_zero_ether_addr(dev->broadcast))
diff --git a/net/Kconfig b/net/Kconfig
index fb13460c6dab..074472dfa94a 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -294,7 +294,7 @@ config CGROUP_NET_CLASSID
config NET_RX_BUSY_POLL
bool
- default y
+ default y if !PREEMPT_RT
config BQL
bool
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index dd2a8dabed84..11854fde52db 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -578,7 +578,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
unsigned char *esi = atmvcc->dev->esi;
if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
- memcpy(net_dev->dev_addr, esi, net_dev->addr_len);
+ dev_addr_set(net_dev, esi);
else
net_dev->dev_addr[2] = 1;
}
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 7226c784dbe0..8eaea4a4bbd6 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -355,8 +355,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type);
switch (mesg->type) {
case l_set_mac_addr:
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = mesg->content.normal.mac_addr[i];
+ eth_hw_addr_set(dev, mesg->content.normal.mac_addr);
break;
case l_del_mac_addr:
for (i = 0; i < 6; i++)
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 2631efc6e359..2f34bbdde0e8 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -202,7 +202,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
* Find an AX.25 control block given both ends. It will only pick up
* floating AX.25 control blocks or non Raw socket bound control blocks.
*/
-ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr,
+ax25_cb *ax25_find_cb(const ax25_address *src_addr, ax25_address *dest_addr,
ax25_digi *digi, struct net_device *dev)
{
ax25_cb *s;
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 4ac2e0847652..d0a043a51848 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -35,7 +35,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
spin_lock_bh(&ax25_dev_lock);
for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
- if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) {
+ if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) {
res = ax25_dev;
}
spin_unlock_bh(&ax25_dev_lock);
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index b4083f30af0d..979bc4b828a0 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -98,7 +98,7 @@ void ax25_linkfail_release(struct ax25_linkfail *lf)
EXPORT_SYMBOL(ax25_linkfail_release);
-int ax25_listen_register(ax25_address *callsign, struct net_device *dev)
+int ax25_listen_register(const ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *listen;
@@ -121,7 +121,7 @@ int ax25_listen_register(ax25_address *callsign, struct net_device *dev)
EXPORT_SYMBOL(ax25_listen_register);
-void ax25_listen_release(ax25_address *callsign, struct net_device *dev)
+void ax25_listen_release(const ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *s, *listen;
@@ -171,7 +171,7 @@ int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *)
return res;
}
-int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
+int ax25_listen_mine(const ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *listen;
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index cd6afe895db9..1cac25aca637 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -181,7 +181,7 @@ static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, i
}
static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
- ax25_address *dev_addr, struct packet_type *ptype)
+ const ax25_address *dev_addr, struct packet_type *ptype)
{
ax25_address src, dest, *next_digi = NULL;
int type = 0, mine = 0, dama;
@@ -447,5 +447,5 @@ int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */
- return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);
+ return ax25_rcv(skb, dev, (const ax25_address *)dev->dev_addr, ptype);
}
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index 22f2f66c6e0a..3db76d2470e9 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -29,7 +29,7 @@
static DEFINE_SPINLOCK(ax25_frag_lock);
-ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
+ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_cb *ax25;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 1669744304c5..7242b32fff80 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -254,7 +254,7 @@ batadv_claim_hash_find(struct batadv_priv *bat_priv,
* Return: backbone gateway if found or NULL otherwise
*/
static struct batadv_bla_backbone_gw *
-batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
+batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
@@ -336,7 +336,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
*/
-static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
+static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac,
unsigned short vid, int claimtype)
{
struct sk_buff *skb;
@@ -488,7 +488,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work)
* Return: the (possibly created) backbone gateway or NULL on error
*/
static struct batadv_bla_backbone_gw *
-batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
+batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig,
unsigned short vid, bool own_backbone)
{
struct batadv_bla_backbone_gw *entry;
@@ -926,7 +926,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv,
*/
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@ -964,7 +964,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
*/
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@ -2126,7 +2126,7 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
struct batadv_hard_iface *primary_if,
struct batadv_bla_claim *claim)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
void *hdr;
@@ -2294,7 +2294,7 @@ batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
struct batadv_hard_iface *primary_if,
struct batadv_bla_backbone_gw *backbone_gw)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
int msecs;
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index a3b6658ed789..433901dcf0c3 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -89,7 +89,7 @@ static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
rcu_read_lock();
do {
upper = netdev_master_upper_dev_get_rcu(upper);
- } while (upper && !(upper->priv_flags & IFF_EBRIDGE));
+ } while (upper && !netif_is_bridge_master(upper));
dev_hold(upper);
rcu_read_unlock();
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 970d0d7ccc98..83f31494ea4d 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -747,7 +747,8 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node *orig_node = NULL;
struct batadv_hard_iface *primary_if = NULL;
bool ret = false;
- u8 *orig_addr, orig_ttvn;
+ const u8 *orig_addr;
+ u8 orig_ttvn;
if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
primary_if = batadv_primary_if_get_selected(bat_priv);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 0604b0279573..7ee09337fc40 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -134,7 +134,7 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
return -EADDRNOTAVAIL;
ether_addr_copy(old_addr, dev->dev_addr);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
/* only modify transtable if it has been initialized before */
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 56b9fe97b3b4..fbcb15c7c29b 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -631,9 +631,9 @@ static void batadv_tp_recv_ack(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node = NULL;
const struct batadv_icmp_tp_packet *icmp;
struct batadv_tp_vars *tp_vars;
+ const unsigned char *dev_addr;
size_t packet_len, mss;
u32 rtt, recv_ack, cwnd;
- unsigned char *dev_addr;
packet_len = BATADV_TP_PLEN;
mss = BATADV_TP_PLEN;
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index 992773376e51..0cb58eb04093 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -587,8 +587,8 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
* @tvlv_value: tvlv content
* @tvlv_value_len: tvlv content length
*/
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
- u8 *dst, u8 type, u8 version,
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src,
+ const u8 *dst, u8 type, u8 version,
void *tvlv_value, u16 tvlv_value_len)
{
struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
index 54f2a35653d0..4cf8af00fc11 100644
--- a/net/batman-adv/tvlv.h
+++ b/net/batman-adv/tvlv.h
@@ -42,8 +42,8 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
u8 *src, u8 *dst,
void *tvlv_buff, u16 tvlv_buff_len);
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
- u8 *dst, u8 type, u8 version,
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src,
+ const u8 *dst, u8 type, u8 version,
void *tvlv_value, u16 tvlv_value_len);
#endif /* _NET_BATMAN_ADV_TVLV_H_ */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index cc0995301f93..291770fc9551 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -14,7 +14,8 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
- ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o
+ ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \
+ eir.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c
new file mode 100644
index 000000000000..7e930f77ecab
--- /dev/null
+++ b/net/bluetooth/eir.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
+
+#include "eir.h"
+
+#define PNP_INFO_SVCLASS_ID 0x1200
+
+u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
+{
+ size_t short_len;
+ size_t complete_len;
+
+ /* no space left for name (+ NULL + type + len) */
+ if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
+ return ad_len;
+
+ /* use complete name if present and fits */
+ complete_len = strlen(hdev->dev_name);
+ if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
+ return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
+ hdev->dev_name, complete_len + 1);
+
+ /* use short name if present */
+ short_len = strlen(hdev->short_name);
+ if (short_len)
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
+ hdev->short_name, short_len + 1);
+
+ /* use shortened full name if present, we already know that name
+ * is longer then HCI_MAX_SHORT_NAME_LENGTH
+ */
+ if (complete_len) {
+ u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
+
+ memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
+ name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
+
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
+ sizeof(name));
+ }
+
+ return ad_len;
+}
+
+u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
+{
+ return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
+}
+
+static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+ u8 *ptr = data, *uuids_start = NULL;
+ struct bt_uuid *uuid;
+
+ if (len < 4)
+ return ptr;
+
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ u16 uuid16;
+
+ if (uuid->size != 16)
+ continue;
+
+ uuid16 = get_unaligned_le16(&uuid->uuid[12]);
+ if (uuid16 < 0x1100)
+ continue;
+
+ if (uuid16 == PNP_INFO_SVCLASS_ID)
+ continue;
+
+ if (!uuids_start) {
+ uuids_start = ptr;
+ uuids_start[0] = 1;
+ uuids_start[1] = EIR_UUID16_ALL;
+ ptr += 2;
+ }
+
+ /* Stop if not enough space to put next UUID */
+ if ((ptr - data) + sizeof(u16) > len) {
+ uuids_start[1] = EIR_UUID16_SOME;
+ break;
+ }
+
+ *ptr++ = (uuid16 & 0x00ff);
+ *ptr++ = (uuid16 & 0xff00) >> 8;
+ uuids_start[0] += sizeof(uuid16);
+ }
+
+ return ptr;
+}
+
+static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+ u8 *ptr = data, *uuids_start = NULL;
+ struct bt_uuid *uuid;
+
+ if (len < 6)
+ return ptr;
+
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ if (uuid->size != 32)
+ continue;
+
+ if (!uuids_start) {
+ uuids_start = ptr;
+ uuids_start[0] = 1;
+ uuids_start[1] = EIR_UUID32_ALL;
+ ptr += 2;
+ }
+
+ /* Stop if not enough space to put next UUID */
+ if ((ptr - data) + sizeof(u32) > len) {
+ uuids_start[1] = EIR_UUID32_SOME;
+ break;
+ }
+
+ memcpy(ptr, &uuid->uuid[12], sizeof(u32));
+ ptr += sizeof(u32);
+ uuids_start[0] += sizeof(u32);
+ }
+
+ return ptr;
+}
+
+static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+ u8 *ptr = data, *uuids_start = NULL;
+ struct bt_uuid *uuid;
+
+ if (len < 18)
+ return ptr;
+
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ if (uuid->size != 128)
+ continue;
+
+ if (!uuids_start) {
+ uuids_start = ptr;
+ uuids_start[0] = 1;
+ uuids_start[1] = EIR_UUID128_ALL;
+ ptr += 2;
+ }
+
+ /* Stop if not enough space to put next UUID */
+ if ((ptr - data) + 16 > len) {
+ uuids_start[1] = EIR_UUID128_SOME;
+ break;
+ }
+
+ memcpy(ptr, uuid->uuid, 16);
+ ptr += 16;
+ uuids_start[0] += 16;
+ }
+
+ return ptr;
+}
+
+void eir_create(struct hci_dev *hdev, u8 *data)
+{
+ u8 *ptr = data;
+ size_t name_len;
+
+ name_len = strlen(hdev->dev_name);
+
+ if (name_len > 0) {
+ /* EIR Data type */
+ if (name_len > 48) {
+ name_len = 48;
+ ptr[1] = EIR_NAME_SHORT;
+ } else {
+ ptr[1] = EIR_NAME_COMPLETE;
+ }
+
+ /* EIR Data length */
+ ptr[0] = name_len + 1;
+
+ memcpy(ptr + 2, hdev->dev_name, name_len);
+
+ ptr += (name_len + 2);
+ }
+
+ if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8)hdev->inq_tx_power;
+
+ ptr += 3;
+ }
+
+ if (hdev->devid_source > 0) {
+ ptr[0] = 9;
+ ptr[1] = EIR_DEVICE_ID;
+
+ put_unaligned_le16(hdev->devid_source, ptr + 2);
+ put_unaligned_le16(hdev->devid_vendor, ptr + 4);
+ put_unaligned_le16(hdev->devid_product, ptr + 6);
+ put_unaligned_le16(hdev->devid_version, ptr + 8);
+
+ ptr += 10;
+ }
+
+ ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+ ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+ ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+}
+
+u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
+{
+ struct adv_info *adv = NULL;
+ u8 ad_len = 0, flags = 0;
+ u32 instance_flags;
+
+ /* Return 0 when the current instance identifier is invalid. */
+ if (instance) {
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return 0;
+ }
+
+ instance_flags = hci_adv_instance_flags(hdev, instance);
+
+ /* If instance already has the flags set skip adding it once
+ * again.
+ */
+ if (adv && eir_get_data(adv->adv_data, adv->adv_data_len, EIR_FLAGS,
+ NULL))
+ goto skip_flags;
+
+ /* The Add Advertising command allows userspace to set both the general
+ * and limited discoverable flags.
+ */
+ if (instance_flags & MGMT_ADV_FLAG_DISCOV)
+ flags |= LE_AD_GENERAL;
+
+ if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
+ flags |= LE_AD_LIMITED;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ flags |= LE_AD_NO_BREDR;
+
+ if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
+ /* If a discovery flag wasn't provided, simply use the global
+ * settings.
+ */
+ if (!flags)
+ flags |= mgmt_get_adv_discov_flags(hdev);
+
+ /* If flags would still be empty, then there is no need to
+ * include the "Flags" AD field".
+ */
+ if (flags) {
+ ptr[0] = 0x02;
+ ptr[1] = EIR_FLAGS;
+ ptr[2] = flags;
+
+ ad_len += 3;
+ ptr += 3;
+ }
+ }
+
+skip_flags:
+ if (adv) {
+ memcpy(ptr, adv->adv_data, adv->adv_data_len);
+ ad_len += adv->adv_data_len;
+ ptr += adv->adv_data_len;
+ }
+
+ if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
+ s8 adv_tx_power;
+
+ if (ext_adv_capable(hdev)) {
+ if (adv)
+ adv_tx_power = adv->tx_power;
+ else
+ adv_tx_power = hdev->adv_tx_power;
+ } else {
+ adv_tx_power = hdev->adv_tx_power;
+ }
+
+ /* Provide Tx Power only if we can provide a valid value for it */
+ if (adv_tx_power != HCI_TX_POWER_INVALID) {
+ ptr[0] = 0x02;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8)adv_tx_power;
+
+ ad_len += 3;
+ ptr += 3;
+ }
+ }
+
+ return ad_len;
+}
+
+static u8 create_default_scan_rsp(struct hci_dev *hdev, u8 *ptr)
+{
+ u8 scan_rsp_len = 0;
+
+ if (hdev->appearance)
+ scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len);
+
+ return eir_append_local_name(hdev, ptr, scan_rsp_len);
+}
+
+u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr)
+{
+ struct adv_info *adv;
+ u8 scan_rsp_len = 0;
+
+ if (!instance)
+ return create_default_scan_rsp(hdev, ptr);
+
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return 0;
+
+ if ((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
+ scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len);
+
+ memcpy(&ptr[scan_rsp_len], adv->scan_rsp_data, adv->scan_rsp_len);
+
+ scan_rsp_len += adv->scan_rsp_len;
+
+ if (adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
+ scan_rsp_len = eir_append_local_name(hdev, ptr, scan_rsp_len);
+
+ return scan_rsp_len;
+}
diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h
new file mode 100644
index 000000000000..724662f8f8b1
--- /dev/null
+++ b/net/bluetooth/eir.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+void eir_create(struct hci_dev *hdev, u8 *data);
+
+u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr);
+u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr);
+
+u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len);
+u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
+
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
+ u8 *data, u8 data_len)
+{
+ eir[eir_len++] = sizeof(type) + data_len;
+ eir[eir_len++] = type;
+ memcpy(&eir[eir_len], data, data_len);
+ eir_len += data_len;
+
+ return eir_len;
+}
+
+static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
+{
+ eir[eir_len++] = sizeof(type) + sizeof(data);
+ eir[eir_len++] = type;
+ put_unaligned_le16(data, &eir[eir_len]);
+ eir_len += sizeof(data);
+
+ return eir_len;
+}
+
+static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type,
+ size_t *data_len)
+{
+ size_t parsed = 0;
+
+ if (eir_len < 2)
+ return NULL;
+
+ while (parsed < eir_len - 1) {
+ u8 field_len = eir[0];
+
+ if (field_len == 0)
+ break;
+
+ parsed += field_len + 1;
+
+ if (parsed > eir_len)
+ break;
+
+ if (eir[1] != type) {
+ eir += field_len + 1;
+ continue;
+ }
+
+ /* Zero length data */
+ if (field_len == 1)
+ return NULL;
+
+ if (data_len)
+ *data_len = field_len - 1;
+
+ return &eir[2];
+ }
+
+ return NULL;
+}
diff --git a/net/bluetooth/hci_codec.c b/net/bluetooth/hci_codec.c
new file mode 100644
index 000000000000..f0421d0edaa3
--- /dev/null
+++ b/net/bluetooth/hci_codec.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include "hci_codec.h"
+
+static int hci_codec_list_add(struct list_head *list,
+ struct hci_op_read_local_codec_caps *sent,
+ struct hci_rp_read_local_codec_caps *rp,
+ void *caps,
+ __u32 len)
+{
+ struct codec_list *entry;
+
+ entry = kzalloc(sizeof(*entry) + len, GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->id = sent->id;
+ if (sent->id == 0xFF) {
+ entry->cid = __le16_to_cpu(sent->cid);
+ entry->vid = __le16_to_cpu(sent->vid);
+ }
+ entry->transport = sent->transport;
+ entry->len = len;
+ entry->num_caps = rp->num_caps;
+ if (rp->num_caps)
+ memcpy(entry->caps, caps, len);
+ list_add(&entry->list, list);
+
+ return 0;
+}
+
+void hci_codec_list_clear(struct list_head *codec_list)
+{
+ struct codec_list *c, *n;
+
+ list_for_each_entry_safe(c, n, codec_list, list) {
+ list_del(&c->list);
+ kfree(c);
+ }
+}
+
+static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport,
+ struct hci_op_read_local_codec_caps
+ *cmd)
+{
+ __u8 i;
+
+ for (i = 0; i < TRANSPORT_TYPE_MAX; i++) {
+ if (transport & BIT(i)) {
+ struct hci_rp_read_local_codec_caps *rp;
+ struct hci_codec_caps *caps;
+ struct sk_buff *skb;
+ __u8 j;
+ __u32 len;
+
+ cmd->transport = i;
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS,
+ sizeof(*cmd), cmd,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Failed to read codec capabilities (%ld)",
+ PTR_ERR(skb));
+ continue;
+ }
+
+ if (skb->len < sizeof(*rp))
+ goto error;
+
+ rp = (void *)skb->data;
+
+ if (rp->status)
+ goto error;
+
+ if (!rp->num_caps) {
+ len = 0;
+ /* this codec doesn't have capabilities */
+ goto skip_caps_parse;
+ }
+
+ skb_pull(skb, sizeof(*rp));
+
+ for (j = 0, len = 0; j < rp->num_caps; j++) {
+ caps = (void *)skb->data;
+ if (skb->len < sizeof(*caps))
+ goto error;
+ if (skb->len < caps->len)
+ goto error;
+ len += sizeof(caps->len) + caps->len;
+ skb_pull(skb, sizeof(caps->len) + caps->len);
+ }
+
+skip_caps_parse:
+ hci_dev_lock(hdev);
+ hci_codec_list_add(&hdev->local_codecs, cmd, rp,
+ (__u8 *)rp + sizeof(*rp), len);
+ hci_dev_unlock(hdev);
+error:
+ kfree_skb(skb);
+ }
+ }
+}
+
+void hci_read_supported_codecs(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct hci_rp_read_local_supported_codecs *rp;
+ struct hci_std_codecs *std_codecs;
+ struct hci_vnd_codecs *vnd_codecs;
+ struct hci_op_read_local_codec_caps caps;
+ __u8 i;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL,
+ HCI_CMD_TIMEOUT);
+
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Failed to read local supported codecs (%ld)",
+ PTR_ERR(skb));
+ return;
+ }
+
+ if (skb->len < sizeof(*rp))
+ goto error;
+
+ rp = (void *)skb->data;
+
+ if (rp->status)
+ goto error;
+
+ skb_pull(skb, sizeof(rp->status));
+
+ std_codecs = (void *)skb->data;
+
+ /* validate codecs length before accessing */
+ if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num)
+ + sizeof(std_codecs->num))
+ goto error;
+
+ /* enumerate codec capabilities of standard codecs */
+ memset(&caps, 0, sizeof(caps));
+ for (i = 0; i < std_codecs->num; i++) {
+ caps.id = std_codecs->codec[i];
+ caps.direction = 0x00;
+ hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps);
+ }
+
+ skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num)
+ + sizeof(std_codecs->num));
+
+ vnd_codecs = (void *)skb->data;
+
+ /* validate vendor codecs length before accessing */
+ if (skb->len <
+ flex_array_size(vnd_codecs, codec, vnd_codecs->num)
+ + sizeof(vnd_codecs->num))
+ goto error;
+
+ /* enumerate vendor codec capabilities */
+ for (i = 0; i < vnd_codecs->num; i++) {
+ caps.id = 0xFF;
+ caps.cid = vnd_codecs->codec[i].cid;
+ caps.vid = vnd_codecs->codec[i].vid;
+ caps.direction = 0x00;
+ hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps);
+ }
+
+error:
+ kfree_skb(skb);
+}
+
+void hci_read_supported_codecs_v2(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct hci_rp_read_local_supported_codecs_v2 *rp;
+ struct hci_std_codecs_v2 *std_codecs;
+ struct hci_vnd_codecs_v2 *vnd_codecs;
+ struct hci_op_read_local_codec_caps caps;
+ __u8 i;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL,
+ HCI_CMD_TIMEOUT);
+
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Failed to read local supported codecs (%ld)",
+ PTR_ERR(skb));
+ return;
+ }
+
+ if (skb->len < sizeof(*rp))
+ goto error;
+
+ rp = (void *)skb->data;
+
+ if (rp->status)
+ goto error;
+
+ skb_pull(skb, sizeof(rp->status));
+
+ std_codecs = (void *)skb->data;
+
+ /* check for payload data length before accessing */
+ if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num)
+ + sizeof(std_codecs->num))
+ goto error;
+
+ memset(&caps, 0, sizeof(caps));
+
+ for (i = 0; i < std_codecs->num; i++) {
+ caps.id = std_codecs->codec[i].id;
+ hci_read_codec_capabilities(hdev, std_codecs->codec[i].transport,
+ &caps);
+ }
+
+ skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num)
+ + sizeof(std_codecs->num));
+
+ vnd_codecs = (void *)skb->data;
+
+ /* check for payload data length before accessing */
+ if (skb->len <
+ flex_array_size(vnd_codecs, codec, vnd_codecs->num)
+ + sizeof(vnd_codecs->num))
+ goto error;
+
+ for (i = 0; i < vnd_codecs->num; i++) {
+ caps.id = 0xFF;
+ caps.cid = vnd_codecs->codec[i].cid;
+ caps.vid = vnd_codecs->codec[i].vid;
+ hci_read_codec_capabilities(hdev, vnd_codecs->codec[i].transport,
+ &caps);
+ }
+
+error:
+ kfree_skb(skb);
+}
diff --git a/net/bluetooth/hci_codec.h b/net/bluetooth/hci_codec.h
new file mode 100644
index 000000000000..a2751930f123
--- /dev/null
+++ b/net/bluetooth/hci_codec.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2014 Intel Corporation */
+
+void hci_read_supported_codecs(struct hci_dev *hdev);
+void hci_read_supported_codecs_v2(struct hci_dev *hdev);
+void hci_codec_list_clear(struct list_head *codec_list);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 2b5059a56cda..bd669c95b9a7 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -307,13 +307,133 @@ static bool find_next_esco_param(struct hci_conn *conn,
return conn->attempt <= size;
}
-bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
+static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_cp_enhanced_setup_sync_conn cp;
+ const struct sco_param *param;
+
+ bt_dev_dbg(hdev, "hcon %p", conn);
+
+ /* for offload use case, codec needs to configured before opening SCO */
+ if (conn->codec.data_path)
+ hci_req_configure_datapath(hdev, &conn->codec);
+
+ conn->state = BT_CONNECT;
+ conn->out = true;
+
+ conn->attempt++;
+
+ memset(&cp, 0x00, sizeof(cp));
+
+ cp.handle = cpu_to_le16(handle);
+
+ cp.tx_bandwidth = cpu_to_le32(0x00001f40);
+ cp.rx_bandwidth = cpu_to_le32(0x00001f40);
+
+ switch (conn->codec.id) {
+ case BT_CODEC_MSBC:
+ if (!find_next_esco_param(conn, esco_param_msbc,
+ ARRAY_SIZE(esco_param_msbc)))
+ return false;
+
+ param = &esco_param_msbc[conn->attempt - 1];
+ cp.tx_coding_format.id = 0x05;
+ cp.rx_coding_format.id = 0x05;
+ cp.tx_codec_frame_size = __cpu_to_le16(60);
+ cp.rx_codec_frame_size = __cpu_to_le16(60);
+ cp.in_bandwidth = __cpu_to_le32(32000);
+ cp.out_bandwidth = __cpu_to_le32(32000);
+ cp.in_coding_format.id = 0x04;
+ cp.out_coding_format.id = 0x04;
+ cp.in_coded_data_size = __cpu_to_le16(16);
+ cp.out_coded_data_size = __cpu_to_le16(16);
+ cp.in_pcm_data_format = 2;
+ cp.out_pcm_data_format = 2;
+ cp.in_pcm_sample_payload_msb_pos = 0;
+ cp.out_pcm_sample_payload_msb_pos = 0;
+ cp.in_data_path = conn->codec.data_path;
+ cp.out_data_path = conn->codec.data_path;
+ cp.in_transport_unit_size = 1;
+ cp.out_transport_unit_size = 1;
+ break;
+
+ case BT_CODEC_TRANSPARENT:
+ if (!find_next_esco_param(conn, esco_param_msbc,
+ ARRAY_SIZE(esco_param_msbc)))
+ return false;
+ param = &esco_param_msbc[conn->attempt - 1];
+ cp.tx_coding_format.id = 0x03;
+ cp.rx_coding_format.id = 0x03;
+ cp.tx_codec_frame_size = __cpu_to_le16(60);
+ cp.rx_codec_frame_size = __cpu_to_le16(60);
+ cp.in_bandwidth = __cpu_to_le32(0x1f40);
+ cp.out_bandwidth = __cpu_to_le32(0x1f40);
+ cp.in_coding_format.id = 0x03;
+ cp.out_coding_format.id = 0x03;
+ cp.in_coded_data_size = __cpu_to_le16(16);
+ cp.out_coded_data_size = __cpu_to_le16(16);
+ cp.in_pcm_data_format = 2;
+ cp.out_pcm_data_format = 2;
+ cp.in_pcm_sample_payload_msb_pos = 0;
+ cp.out_pcm_sample_payload_msb_pos = 0;
+ cp.in_data_path = conn->codec.data_path;
+ cp.out_data_path = conn->codec.data_path;
+ cp.in_transport_unit_size = 1;
+ cp.out_transport_unit_size = 1;
+ break;
+
+ case BT_CODEC_CVSD:
+ if (lmp_esco_capable(conn->link)) {
+ if (!find_next_esco_param(conn, esco_param_cvsd,
+ ARRAY_SIZE(esco_param_cvsd)))
+ return false;
+ param = &esco_param_cvsd[conn->attempt - 1];
+ } else {
+ if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
+ return false;
+ param = &sco_param_cvsd[conn->attempt - 1];
+ }
+ cp.tx_coding_format.id = 2;
+ cp.rx_coding_format.id = 2;
+ cp.tx_codec_frame_size = __cpu_to_le16(60);
+ cp.rx_codec_frame_size = __cpu_to_le16(60);
+ cp.in_bandwidth = __cpu_to_le32(16000);
+ cp.out_bandwidth = __cpu_to_le32(16000);
+ cp.in_coding_format.id = 4;
+ cp.out_coding_format.id = 4;
+ cp.in_coded_data_size = __cpu_to_le16(16);
+ cp.out_coded_data_size = __cpu_to_le16(16);
+ cp.in_pcm_data_format = 2;
+ cp.out_pcm_data_format = 2;
+ cp.in_pcm_sample_payload_msb_pos = 0;
+ cp.out_pcm_sample_payload_msb_pos = 0;
+ cp.in_data_path = conn->codec.data_path;
+ cp.out_data_path = conn->codec.data_path;
+ cp.in_transport_unit_size = 16;
+ cp.out_transport_unit_size = 16;
+ break;
+ default:
+ return false;
+ }
+
+ cp.retrans_effort = param->retrans_effort;
+ cp.pkt_type = __cpu_to_le16(param->pkt_type);
+ cp.max_latency = __cpu_to_le16(param->max_latency);
+
+ if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
+ return false;
+
+ return true;
+}
+
+static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_setup_sync_conn cp;
const struct sco_param *param;
- BT_DBG("hcon %p", conn);
+ bt_dev_dbg(hdev, "hcon %p", conn);
conn->state = BT_CONNECT;
conn->out = true;
@@ -359,6 +479,14 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
return true;
}
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
+{
+ if (enhanced_sco_capable(conn->hdev))
+ return hci_enhanced_setup_sync_conn(conn, handle);
+
+ return hci_setup_sync_conn(conn, handle);
+}
+
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier)
{
@@ -1040,8 +1168,8 @@ static void hci_req_directed_advertising(struct hci_request *req,
}
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
- u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role, bdaddr_t *direct_rpa)
+ u8 dst_type, bool dst_resolved, u8 sec_level,
+ u16 conn_timeout, u8 role, bdaddr_t *direct_rpa)
{
struct hci_conn_params *params;
struct hci_conn *conn;
@@ -1078,19 +1206,24 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-EBUSY);
}
- /* When given an identity address with existing identity
- * resolving key, the connection needs to be established
- * to a resolvable random address.
- *
- * Storing the resolvable random address is required here
- * to handle connection failures. The address will later
- * be resolved back into the original identity address
- * from the connect request.
+ /* Check if the destination address has been resolved by the controller
+ * since if it did then the identity address shall be used.
*/
- irk = hci_find_irk_by_addr(hdev, dst, dst_type);
- if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
- dst = &irk->rpa;
- dst_type = ADDR_LE_DEV_RANDOM;
+ if (!dst_resolved) {
+ /* When given an identity address with existing identity
+ * resolving key, the connection needs to be established
+ * to a resolvable random address.
+ *
+ * Storing the resolvable random address is required here
+ * to handle connection failures. The address will later
+ * be resolved back into the original identity address
+ * from the connect request.
+ */
+ irk = hci_find_irk_by_addr(hdev, dst, dst_type);
+ if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
+ dst = &irk->rpa;
+ dst_type = ADDR_LE_DEV_RANDOM;
+ }
}
if (conn) {
@@ -1319,7 +1452,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
}
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting)
+ __u16 setting, struct bt_codec *codec)
{
struct hci_conn *acl;
struct hci_conn *sco;
@@ -1344,6 +1477,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
hci_conn_hold(sco);
sco->setting = setting;
+ sco->codec = *codec;
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8a47a3017d61..8d33aa64846b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -45,6 +45,7 @@
#include "leds.h"
#include "msft.h"
#include "aosp.h"
+#include "hci_codec.h"
static void hci_rx_work(struct work_struct *work);
static void hci_cmd_work(struct work_struct *work);
@@ -61,130 +62,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);
-/* ---- HCI debugfs entries ---- */
-
-static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- char buf[3];
-
- buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
- buf[1] = '\n';
- buf[2] = '\0';
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- struct sk_buff *skb;
- bool enable;
- int err;
-
- if (!test_bit(HCI_UP, &hdev->flags))
- return -ENETDOWN;
-
- err = kstrtobool_from_user(user_buf, count, &enable);
- if (err)
- return err;
-
- if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
- return -EALREADY;
-
- hci_req_sync_lock(hdev);
- if (enable)
- skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
- HCI_CMD_TIMEOUT);
- else
- skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
- HCI_CMD_TIMEOUT);
- hci_req_sync_unlock(hdev);
-
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- kfree_skb(skb);
-
- hci_dev_change_flag(hdev, HCI_DUT_MODE);
-
- return count;
-}
-
-static const struct file_operations dut_mode_fops = {
- .open = simple_open,
- .read = dut_mode_read,
- .write = dut_mode_write,
- .llseek = default_llseek,
-};
-
-static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- char buf[3];
-
- buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
- buf[1] = '\n';
- buf[2] = '\0';
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct hci_dev *hdev = file->private_data;
- bool enable;
- int err;
-
- err = kstrtobool_from_user(user_buf, count, &enable);
- if (err)
- return err;
-
- /* When the diagnostic flags are not persistent and the transport
- * is not active or in user channel operation, then there is no need
- * for the vendor callback. Instead just store the desired value and
- * the setting will be programmed when the controller gets powered on.
- */
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
- (!test_bit(HCI_RUNNING, &hdev->flags) ||
- hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
- goto done;
-
- hci_req_sync_lock(hdev);
- err = hdev->set_diag(hdev, enable);
- hci_req_sync_unlock(hdev);
-
- if (err < 0)
- return err;
-
-done:
- if (enable)
- hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
- else
- hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
-
- return count;
-}
-
-static const struct file_operations vendor_diag_fops = {
- .open = simple_open,
- .read = vendor_diag_read,
- .write = vendor_diag_write,
- .llseek = default_llseek,
-};
-
-static void hci_debugfs_create_basic(struct hci_dev *hdev)
-{
- debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
- &dut_mode_fops);
-
- if (hdev->set_diag)
- debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
- &vendor_diag_fops);
-}
-
static int hci_reset_req(struct hci_request *req, unsigned long opt)
{
BT_DBG("%s %ld", req->hdev->name, opt);
@@ -838,10 +715,6 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[22] & 0x04)
hci_set_event_mask_page_2(req);
- /* Read local codec list if the HCI command is supported */
- if (hdev->commands[29] & 0x20)
- hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
-
/* Read local pairing options if the HCI command is supported */
if (hdev->commands[41] & 0x08)
hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
@@ -937,6 +810,12 @@ static int __hci_init(struct hci_dev *hdev)
if (err < 0)
return err;
+ /* Read local codec list if the HCI command is supported */
+ if (hdev->commands[45] & 0x04)
+ hci_read_supported_codecs_v2(hdev);
+ else if (hdev->commands[29] & 0x20)
+ hci_read_supported_codecs(hdev);
+
/* This function is only called when the controller is actually in
* configured state. When the controller is marked as unconfigured,
* this initialization procedure is not run.
@@ -1848,6 +1727,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
memset(hdev->eir, 0, sizeof(hdev->eir));
memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
bacpy(&hdev->random_addr, BDADDR_ANY);
+ hci_codec_list_clear(&hdev->local_codecs);
hci_req_sync_unlock(hdev);
@@ -3081,6 +2961,60 @@ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
}
/* This function requires the caller holds hdev->lock */
+u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
+{
+ u32 flags;
+ struct adv_info *adv;
+
+ if (instance == 0x00) {
+ /* Instance 0 always manages the "Tx Power" and "Flags"
+ * fields
+ */
+ flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
+
+ /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
+ * corresponds to the "connectable" instance flag.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
+ flags |= MGMT_ADV_FLAG_CONNECTABLE;
+
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
+ flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
+ else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
+ flags |= MGMT_ADV_FLAG_DISCOV;
+
+ return flags;
+ }
+
+ adv = hci_find_adv_instance(hdev, instance);
+
+ /* Return 0 when we got an invalid instance identifier. */
+ if (!adv)
+ return 0;
+
+ return adv->flags;
+}
+
+bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
+{
+ struct adv_info *adv;
+
+ /* Instance 0x00 always set local name */
+ if (instance == 0x00)
+ return true;
+
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return false;
+
+ if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
+ adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
+ return true;
+
+ return adv->scan_rsp_len ? true : false;
+}
+
+/* This function requires the caller holds hdev->lock */
void hci_adv_monitors_clear(struct hci_dev *hdev)
{
struct adv_monitor *monitor;
@@ -3487,15 +3421,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
{
struct hci_conn_params *param;
- switch (addr_type) {
- case ADDR_LE_DEV_PUBLIC_RESOLVED:
- addr_type = ADDR_LE_DEV_PUBLIC;
- break;
- case ADDR_LE_DEV_RANDOM_RESOLVED:
- addr_type = ADDR_LE_DEV_RANDOM;
- break;
- }
-
list_for_each_entry(param, list, action) {
if (bacmp(&param->addr, addr) == 0 &&
param->addr_type == addr_type)
@@ -3701,55 +3626,12 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
struct hci_dev *hdev =
container_of(nb, struct hci_dev, suspend_notifier);
int ret = 0;
- u8 state = BT_RUNNING;
- /* If powering down, wait for completion. */
- if (mgmt_powering_down(hdev)) {
- set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
- ret = hci_suspend_wait_event(hdev);
- if (ret)
- goto done;
- }
-
- /* Suspend notifier should only act on events when powered. */
- if (!hdev_is_powered(hdev) ||
- hci_dev_test_flag(hdev, HCI_UNREGISTER))
- goto done;
+ if (action == PM_SUSPEND_PREPARE)
+ ret = hci_suspend_dev(hdev);
+ else if (action == PM_POST_SUSPEND)
+ ret = hci_resume_dev(hdev);
- if (action == PM_SUSPEND_PREPARE) {
- /* Suspend consists of two actions:
- * - First, disconnect everything and make the controller not
- * connectable (disabling scanning)
- * - Second, program event filter/accept list and enable scan
- */
- ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
- if (!ret)
- state = BT_SUSPEND_DISCONNECT;
-
- /* Only configure accept list if disconnect succeeded and wake
- * isn't being prevented.
- */
- if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
- ret = hci_change_suspend_state(hdev,
- BT_SUSPEND_CONFIGURE_WAKE);
- if (!ret)
- state = BT_SUSPEND_CONFIGURE_WAKE;
- }
-
- hci_clear_wake_reason(hdev);
- mgmt_suspending(hdev, state);
-
- } else if (action == PM_POST_SUSPEND) {
- ret = hci_change_suspend_state(hdev, BT_RUNNING);
-
- mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
- hdev->wake_addr_type);
- }
-
-done:
- /* We always allow suspend even if suspend preparation failed and
- * attempt to recover in resume.
- */
if (ret)
bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
action, ret);
@@ -3857,6 +3739,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
INIT_LIST_HEAD(&hdev->adv_instances);
INIT_LIST_HEAD(&hdev->blocked_keys);
+ INIT_LIST_HEAD(&hdev->local_codecs);
INIT_WORK(&hdev->rx_work, hci_rx_work);
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
INIT_WORK(&hdev->tx_work, hci_tx_work);
@@ -3994,6 +3877,7 @@ int hci_register_dev(struct hci_dev *hdev)
queue_work(hdev->req_workqueue, &hdev->power_on);
idr_init(&hdev->adv_monitors_idr);
+ msft_register(hdev);
return id;
@@ -4026,6 +3910,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
cancel_work_sync(&hdev->suspend_prepare);
}
+ msft_unregister(hdev);
+
hci_dev_do_close(hdev);
if (!test_bit(HCI_INIT, &hdev->flags) &&
@@ -4088,16 +3974,78 @@ EXPORT_SYMBOL(hci_release_dev);
/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
+ int ret;
+ u8 state = BT_RUNNING;
+
+ bt_dev_dbg(hdev, "");
+
+ /* Suspend should only act on when powered. */
+ if (!hdev_is_powered(hdev) ||
+ hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ return 0;
+
+ /* If powering down, wait for completion. */
+ if (mgmt_powering_down(hdev)) {
+ set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
+ ret = hci_suspend_wait_event(hdev);
+ if (ret)
+ goto done;
+ }
+
+ /* Suspend consists of two actions:
+ * - First, disconnect everything and make the controller not
+ * connectable (disabling scanning)
+ * - Second, program event filter/accept list and enable scan
+ */
+ ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
+ if (ret)
+ goto clear;
+
+ state = BT_SUSPEND_DISCONNECT;
+
+ /* Only configure accept list if device may wakeup. */
+ if (hdev->wakeup && hdev->wakeup(hdev)) {
+ ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE);
+ if (!ret)
+ state = BT_SUSPEND_CONFIGURE_WAKE;
+ }
+
+clear:
+ hci_clear_wake_reason(hdev);
+ mgmt_suspending(hdev, state);
+
+done:
+ /* We always allow suspend even if suspend preparation failed and
+ * attempt to recover in resume.
+ */
hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(hci_suspend_dev);
/* Resume HCI device */
int hci_resume_dev(struct hci_dev *hdev)
{
+ int ret;
+
+ bt_dev_dbg(hdev, "");
+
+ /* Resume should only act on when powered. */
+ if (!hdev_is_powered(hdev) ||
+ hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ return 0;
+
+ /* If powering down don't attempt to resume */
+ if (mgmt_powering_down(hdev))
+ return 0;
+
+ ret = hci_change_suspend_state(hdev, BT_RUNNING);
+
+ mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
+ hdev->wake_addr_type);
+
hci_sock_dev_event(hdev, HCI_DEV_RESUME);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(hci_resume_dev);
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 841393389f7b..902b40a90b91 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -27,6 +27,7 @@
#include <net/bluetooth/hci_core.h>
#include "smp.h"
+#include "hci_request.h"
#include "hci_debugfs.h"
#define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \
@@ -1250,3 +1251,125 @@ void hci_debugfs_create_conn(struct hci_conn *conn)
snprintf(name, sizeof(name), "%u", conn->handle);
conn->debugfs = debugfs_create_dir(name, hdev->debugfs);
}
+
+static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ struct sk_buff *skb;
+ bool enable;
+ int err;
+
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return -ENETDOWN;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
+ return -EALREADY;
+
+ hci_req_sync_lock(hdev);
+ if (enable)
+ skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
+ HCI_CMD_TIMEOUT);
+ else
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_CMD_TIMEOUT);
+ hci_req_sync_unlock(hdev);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+
+ hci_dev_change_flag(hdev, HCI_DUT_MODE);
+
+ return count;
+}
+
+static const struct file_operations dut_mode_fops = {
+ .open = simple_open,
+ .read = dut_mode_read,
+ .write = dut_mode_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ /* When the diagnostic flags are not persistent and the transport
+ * is not active or in user channel operation, then there is no need
+ * for the vendor callback. Instead just store the desired value and
+ * the setting will be programmed when the controller gets powered on.
+ */
+ if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+ (!test_bit(HCI_RUNNING, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
+ goto done;
+
+ hci_req_sync_lock(hdev);
+ err = hdev->set_diag(hdev, enable);
+ hci_req_sync_unlock(hdev);
+
+ if (err < 0)
+ return err;
+
+done:
+ if (enable)
+ hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
+ else
+ hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
+
+ return count;
+}
+
+static const struct file_operations vendor_diag_fops = {
+ .open = simple_open,
+ .read = vendor_diag_read,
+ .write = vendor_diag_write,
+ .llseek = default_llseek,
+};
+
+void hci_debugfs_create_basic(struct hci_dev *hdev)
+{
+ debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
+ &dut_mode_fops);
+
+ if (hdev->set_diag)
+ debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
+ &vendor_diag_fops);
+}
diff --git a/net/bluetooth/hci_debugfs.h b/net/bluetooth/hci_debugfs.h
index 4444dc8cedc2..9a8a7c93bb12 100644
--- a/net/bluetooth/hci_debugfs.h
+++ b/net/bluetooth/hci_debugfs.h
@@ -26,6 +26,7 @@ void hci_debugfs_create_common(struct hci_dev *hdev);
void hci_debugfs_create_bredr(struct hci_dev *hdev);
void hci_debugfs_create_le(struct hci_dev *hdev);
void hci_debugfs_create_conn(struct hci_conn *conn);
+void hci_debugfs_create_basic(struct hci_dev *hdev);
#else
@@ -45,4 +46,8 @@ static inline void hci_debugfs_create_conn(struct hci_conn *conn)
{
}
+static inline void hci_debugfs_create_basic(struct hci_dev *hdev)
+{
+}
+
#endif
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 0bca035bf2dc..7d0db1ca1248 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -36,6 +36,7 @@
#include "amp.h"
#include "smp.h"
#include "msft.h"
+#include "eir.h"
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -2278,6 +2279,41 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
hci_dev_unlock(hdev);
}
+static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_enhanced_setup_sync_conn *cp;
+ struct hci_conn *acl, *sco;
+ __u16 handle;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
+
+ if (!status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
+ if (!cp)
+ return;
+
+ handle = __le16_to_cpu(cp->handle);
+
+ bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
+
+ hci_dev_lock(hdev);
+
+ acl = hci_conn_hash_lookup_handle(hdev, handle);
+ if (acl) {
+ sco = acl->link;
+ if (sco) {
+ sco->state = BT_CLOSED;
+
+ hci_connect_cfm(sco, status);
+ hci_conn_del(sco);
+ }
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_sniff_mode *cp;
@@ -2351,7 +2387,7 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
- if (conn->type == LE_LINK) {
+ if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
hdev->cur_adv_instance = conn->adv_instance;
hci_req_reenable_advertising(hdev);
}
@@ -2367,6 +2403,28 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_unlock(hdev);
}
+static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
+{
+ /* When using controller based address resolution, then the new
+ * address types 0x02 and 0x03 are used. These types need to be
+ * converted back into either public address or random address type
+ */
+ switch (type) {
+ case ADDR_LE_DEV_PUBLIC_RESOLVED:
+ if (resolved)
+ *resolved = true;
+ return ADDR_LE_DEV_PUBLIC;
+ case ADDR_LE_DEV_RANDOM_RESOLVED:
+ if (resolved)
+ *resolved = true;
+ return ADDR_LE_DEV_RANDOM;
+ }
+
+ if (resolved)
+ *resolved = false;
+ return type;
+}
+
static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
u8 peer_addr_type, u8 own_address_type,
u8 filter_policy)
@@ -2378,21 +2436,7 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
if (!conn)
return;
- /* When using controller based address resolution, then the new
- * address types 0x02 and 0x03 are used. These types need to be
- * converted back into either public address or random address type
- */
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
- switch (own_address_type) {
- case ADDR_LE_DEV_PUBLIC_RESOLVED:
- own_address_type = ADDR_LE_DEV_PUBLIC;
- break;
- case ADDR_LE_DEV_RANDOM_RESOLVED:
- own_address_type = ADDR_LE_DEV_RANDOM;
- break;
- }
- }
+ own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
/* Store the initiator and responder address information which
* is needed for SMP. These values will not change during the
@@ -2961,7 +3005,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
* or until a connection is created or until the Advertising
* is timed out due to Directed Advertising."
*/
- if (conn->type == LE_LINK) {
+ if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
hdev->cur_adv_instance = conn->adv_instance;
hci_req_reenable_advertising(hdev);
}
@@ -3756,6 +3800,10 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cs_setup_sync_conn(hdev, ev->status);
break;
+ case HCI_OP_ENHANCED_SETUP_SYNC_CONN:
+ hci_cs_enhanced_setup_sync_conn(hdev, ev->status);
+ break;
+
case HCI_OP_SNIFF_MODE:
hci_cs_sniff_mode(hdev, ev->status);
break;
@@ -4397,6 +4445,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
{
struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
+ unsigned int notify_evt;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
@@ -4471,15 +4520,21 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
switch (ev->air_mode) {
case 0x02:
- if (hdev->notify)
- hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
+ notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD;
break;
case 0x03:
- if (hdev->notify)
- hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
+ notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP;
break;
}
+ /* Notify only in case of SCO over HCI transport data path which
+ * is zero and non-zero value shall be non-HCI transport data path
+ */
+ if (conn->codec.data_path == 0) {
+ if (hdev->notify)
+ hdev->notify(hdev, notify_evt);
+ }
+
hci_connect_cfm(conn, ev->status);
if (ev->status)
hci_conn_del(conn);
@@ -5282,22 +5337,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
conn->dst_type = irk->addr_type;
}
- /* When using controller based address resolution, then the new
- * address types 0x02 and 0x03 are used. These types need to be
- * converted back into either public address or random address type
- */
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
- switch (conn->dst_type) {
- case ADDR_LE_DEV_PUBLIC_RESOLVED:
- conn->dst_type = ADDR_LE_DEV_PUBLIC;
- break;
- case ADDR_LE_DEV_RANDOM_RESOLVED:
- conn->dst_type = ADDR_LE_DEV_RANDOM;
- break;
- }
- }
+ conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
if (status) {
hci_le_conn_failed(conn, status);
@@ -5479,8 +5519,8 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
/* This function requires the caller holds hdev->lock */
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
bdaddr_t *addr,
- u8 addr_type, u8 adv_type,
- bdaddr_t *direct_rpa)
+ u8 addr_type, bool addr_resolved,
+ u8 adv_type, bdaddr_t *direct_rpa)
{
struct hci_conn *conn;
struct hci_conn_params *params;
@@ -5532,9 +5572,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
}
}
- conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
- hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
- direct_rpa);
+ conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
+ BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
+ HCI_ROLE_MASTER, direct_rpa);
if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
* by higher layer that tried to connect, if no then
@@ -5575,7 +5615,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
struct discovery_state *d = &hdev->discovery;
struct smp_irk *irk;
struct hci_conn *conn;
- bool match;
+ bool match, bdaddr_resolved;
u32 flags;
u8 *ptr;
@@ -5619,6 +5659,9 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
* controller address.
*/
if (direct_addr) {
+ direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
+ &bdaddr_resolved);
+
/* Only resolvable random addresses are valid for these
* kind of reports and others can be ignored.
*/
@@ -5646,13 +5689,15 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
bdaddr_type = irk->addr_type;
}
+ bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
+
/* Check if we have been requested to connect to this device.
*
* direct_addr is set only for directed advertising reports (it is NULL
* for advertising reports) and is already verified to be RPA above.
*/
- conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
- direct_addr);
+ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
+ type, direct_addr);
if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
/* Store report for later inclusion by
* mgmt_device_connected
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index f15626607b2d..92611bfc0b9e 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -30,6 +30,7 @@
#include "smp.h"
#include "hci_request.h"
#include "msft.h"
+#include "eir.h"
#define HCI_REQ_DONE 0
#define HCI_REQ_PEND 1
@@ -521,164 +522,6 @@ void __hci_req_update_name(struct hci_request *req)
hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
}
-#define PNP_INFO_SVCLASS_ID 0x1200
-
-static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
-{
- u8 *ptr = data, *uuids_start = NULL;
- struct bt_uuid *uuid;
-
- if (len < 4)
- return ptr;
-
- list_for_each_entry(uuid, &hdev->uuids, list) {
- u16 uuid16;
-
- if (uuid->size != 16)
- continue;
-
- uuid16 = get_unaligned_le16(&uuid->uuid[12]);
- if (uuid16 < 0x1100)
- continue;
-
- if (uuid16 == PNP_INFO_SVCLASS_ID)
- continue;
-
- if (!uuids_start) {
- uuids_start = ptr;
- uuids_start[0] = 1;
- uuids_start[1] = EIR_UUID16_ALL;
- ptr += 2;
- }
-
- /* Stop if not enough space to put next UUID */
- if ((ptr - data) + sizeof(u16) > len) {
- uuids_start[1] = EIR_UUID16_SOME;
- break;
- }
-
- *ptr++ = (uuid16 & 0x00ff);
- *ptr++ = (uuid16 & 0xff00) >> 8;
- uuids_start[0] += sizeof(uuid16);
- }
-
- return ptr;
-}
-
-static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
-{
- u8 *ptr = data, *uuids_start = NULL;
- struct bt_uuid *uuid;
-
- if (len < 6)
- return ptr;
-
- list_for_each_entry(uuid, &hdev->uuids, list) {
- if (uuid->size != 32)
- continue;
-
- if (!uuids_start) {
- uuids_start = ptr;
- uuids_start[0] = 1;
- uuids_start[1] = EIR_UUID32_ALL;
- ptr += 2;
- }
-
- /* Stop if not enough space to put next UUID */
- if ((ptr - data) + sizeof(u32) > len) {
- uuids_start[1] = EIR_UUID32_SOME;
- break;
- }
-
- memcpy(ptr, &uuid->uuid[12], sizeof(u32));
- ptr += sizeof(u32);
- uuids_start[0] += sizeof(u32);
- }
-
- return ptr;
-}
-
-static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
-{
- u8 *ptr = data, *uuids_start = NULL;
- struct bt_uuid *uuid;
-
- if (len < 18)
- return ptr;
-
- list_for_each_entry(uuid, &hdev->uuids, list) {
- if (uuid->size != 128)
- continue;
-
- if (!uuids_start) {
- uuids_start = ptr;
- uuids_start[0] = 1;
- uuids_start[1] = EIR_UUID128_ALL;
- ptr += 2;
- }
-
- /* Stop if not enough space to put next UUID */
- if ((ptr - data) + 16 > len) {
- uuids_start[1] = EIR_UUID128_SOME;
- break;
- }
-
- memcpy(ptr, uuid->uuid, 16);
- ptr += 16;
- uuids_start[0] += 16;
- }
-
- return ptr;
-}
-
-static void create_eir(struct hci_dev *hdev, u8 *data)
-{
- u8 *ptr = data;
- size_t name_len;
-
- name_len = strlen(hdev->dev_name);
-
- if (name_len > 0) {
- /* EIR Data type */
- if (name_len > 48) {
- name_len = 48;
- ptr[1] = EIR_NAME_SHORT;
- } else
- ptr[1] = EIR_NAME_COMPLETE;
-
- /* EIR Data length */
- ptr[0] = name_len + 1;
-
- memcpy(ptr + 2, hdev->dev_name, name_len);
-
- ptr += (name_len + 2);
- }
-
- if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
- ptr[0] = 2;
- ptr[1] = EIR_TX_POWER;
- ptr[2] = (u8) hdev->inq_tx_power;
-
- ptr += 3;
- }
-
- if (hdev->devid_source > 0) {
- ptr[0] = 9;
- ptr[1] = EIR_DEVICE_ID;
-
- put_unaligned_le16(hdev->devid_source, ptr + 2);
- put_unaligned_le16(hdev->devid_vendor, ptr + 4);
- put_unaligned_le16(hdev->devid_product, ptr + 6);
- put_unaligned_le16(hdev->devid_version, ptr + 8);
-
- ptr += 10;
- }
-
- ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
- ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
- ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
-}
-
void __hci_req_update_eir(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
@@ -698,7 +541,7 @@ void __hci_req_update_eir(struct hci_request *req)
memset(&cp, 0, sizeof(cp));
- create_eir(hdev, cp.data);
+ eir_create(hdev, cp.data);
if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
return;
@@ -1134,25 +977,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
addr_resolv);
}
-static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
-{
- struct adv_info *adv_instance;
-
- /* Instance 0x00 always set local name */
- if (instance == 0x00)
- return true;
-
- adv_instance = hci_find_adv_instance(hdev, instance);
- if (!adv_instance)
- return false;
-
- if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
- adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
- return true;
-
- return adv_instance->scan_rsp_len ? true : false;
-}
-
static void hci_req_clear_event_filter(struct hci_request *req)
{
struct hci_cp_set_event_filter f;
@@ -1281,21 +1105,24 @@ static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
}
}
-static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
- bool enable)
+static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req,
+ bool suspending)
{
struct hci_dev *hdev = req->hdev;
switch (hci_get_adv_monitor_offload_ext(hdev)) {
case HCI_ADV_MONITOR_EXT_MSFT:
- msft_req_add_set_filter_enable(req, enable);
+ if (suspending)
+ msft_suspend(hdev);
+ else
+ msft_resume(hdev);
break;
default:
return;
}
/* No need to block when enabling since it's on resume path */
- if (hdev->suspended && !enable)
+ if (hdev->suspended && suspending)
set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
}
@@ -1362,7 +1189,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
}
/* Disable advertisement filters */
- hci_req_add_set_adv_filter_enable(&req, false);
+ hci_req_prepare_adv_monitor_suspend(&req, true);
/* Prevent disconnects from causing scanning to be re-enabled */
hdev->scanning_paused = true;
@@ -1404,7 +1231,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
/* Reset passive/background scanning to normal */
__hci_update_background_scan(&req);
/* Enable all of the advertisement filters */
- hci_req_add_set_adv_filter_enable(&req, true);
+ hci_req_prepare_adv_monitor_suspend(&req, false);
/* Unpause directed advertising */
hdev->advertising_paused = false;
@@ -1442,7 +1269,7 @@ done:
static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
{
- return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
+ return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
}
void __hci_req_disable_advertising(struct hci_request *req)
@@ -1457,40 +1284,6 @@ void __hci_req_disable_advertising(struct hci_request *req)
}
}
-static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
-{
- u32 flags;
- struct adv_info *adv_instance;
-
- if (instance == 0x00) {
- /* Instance 0 always manages the "Tx Power" and "Flags"
- * fields
- */
- flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
-
- /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
- * corresponds to the "connectable" instance flag.
- */
- if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
- flags |= MGMT_ADV_FLAG_CONNECTABLE;
-
- if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
- flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
- else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
- flags |= MGMT_ADV_FLAG_DISCOV;
-
- return flags;
- }
-
- adv_instance = hci_find_adv_instance(hdev, instance);
-
- /* Return 0 when we got an invalid instance identifier. */
- if (!adv_instance)
- return 0;
-
- return adv_instance->flags;
-}
-
static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
{
/* If privacy is not enabled don't use RPA */
@@ -1555,15 +1348,15 @@ static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
void __hci_req_enable_advertising(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
- struct adv_info *adv_instance;
+ struct adv_info *adv;
struct hci_cp_le_set_adv_param cp;
u8 own_addr_type, enable = 0x01;
bool connectable;
u16 adv_min_interval, adv_max_interval;
u32 flags;
- flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
- adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
+ flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
+ adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
/* If the "connectable" instance flag was not set, then choose between
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
@@ -1595,9 +1388,9 @@ void __hci_req_enable_advertising(struct hci_request *req)
memset(&cp, 0, sizeof(cp));
- if (adv_instance) {
- adv_min_interval = adv_instance->min_interval;
- adv_max_interval = adv_instance->max_interval;
+ if (adv) {
+ adv_min_interval = adv->min_interval;
+ adv_max_interval = adv->max_interval;
} else {
adv_min_interval = hdev->le_adv_min_interval;
adv_max_interval = hdev->le_adv_max_interval;
@@ -1628,85 +1421,6 @@ void __hci_req_enable_advertising(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
-u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
-{
- size_t short_len;
- size_t complete_len;
-
- /* no space left for name (+ NULL + type + len) */
- if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
- return ad_len;
-
- /* use complete name if present and fits */
- complete_len = strlen(hdev->dev_name);
- if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
- return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
- hdev->dev_name, complete_len + 1);
-
- /* use short name if present */
- short_len = strlen(hdev->short_name);
- if (short_len)
- return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
- hdev->short_name, short_len + 1);
-
- /* use shortened full name if present, we already know that name
- * is longer then HCI_MAX_SHORT_NAME_LENGTH
- */
- if (complete_len) {
- u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
-
- memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
- name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
-
- return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
- sizeof(name));
- }
-
- return ad_len;
-}
-
-static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
-{
- return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
-}
-
-static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
-{
- u8 scan_rsp_len = 0;
-
- if (hdev->appearance)
- scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
-
- return append_local_name(hdev, ptr, scan_rsp_len);
-}
-
-static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
- u8 *ptr)
-{
- struct adv_info *adv_instance;
- u32 instance_flags;
- u8 scan_rsp_len = 0;
-
- adv_instance = hci_find_adv_instance(hdev, instance);
- if (!adv_instance)
- return 0;
-
- instance_flags = adv_instance->flags;
-
- if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
- scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
-
- memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
- adv_instance->scan_rsp_len);
-
- scan_rsp_len += adv_instance->scan_rsp_len;
-
- if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
- scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
-
- return scan_rsp_len;
-}
-
void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
@@ -1723,11 +1437,7 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
memset(&pdu, 0, sizeof(pdu));
- if (instance)
- len = create_instance_scan_rsp_data(hdev, instance,
- pdu.data);
- else
- len = create_default_scan_rsp_data(hdev, pdu.data);
+ len = eir_create_scan_rsp(hdev, instance, pdu.data);
if (hdev->scan_rsp_data_len == len &&
!memcmp(pdu.data, hdev->scan_rsp_data, len))
@@ -1748,11 +1458,7 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
memset(&cp, 0, sizeof(cp));
- if (instance)
- len = create_instance_scan_rsp_data(hdev, instance,
- cp.data);
- else
- len = create_default_scan_rsp_data(hdev, cp.data);
+ len = eir_create_scan_rsp(hdev, instance, cp.data);
if (hdev->scan_rsp_data_len == len &&
!memcmp(cp.data, hdev->scan_rsp_data, len))
@@ -1767,95 +1473,6 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
}
}
-static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
-{
- struct adv_info *adv_instance = NULL;
- u8 ad_len = 0, flags = 0;
- u32 instance_flags;
-
- /* Return 0 when the current instance identifier is invalid. */
- if (instance) {
- adv_instance = hci_find_adv_instance(hdev, instance);
- if (!adv_instance)
- return 0;
- }
-
- instance_flags = get_adv_instance_flags(hdev, instance);
-
- /* If instance already has the flags set skip adding it once
- * again.
- */
- if (adv_instance && eir_get_data(adv_instance->adv_data,
- adv_instance->adv_data_len, EIR_FLAGS,
- NULL))
- goto skip_flags;
-
- /* The Add Advertising command allows userspace to set both the general
- * and limited discoverable flags.
- */
- if (instance_flags & MGMT_ADV_FLAG_DISCOV)
- flags |= LE_AD_GENERAL;
-
- if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
- flags |= LE_AD_LIMITED;
-
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
- flags |= LE_AD_NO_BREDR;
-
- if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
- /* If a discovery flag wasn't provided, simply use the global
- * settings.
- */
- if (!flags)
- flags |= mgmt_get_adv_discov_flags(hdev);
-
- /* If flags would still be empty, then there is no need to
- * include the "Flags" AD field".
- */
- if (flags) {
- ptr[0] = 0x02;
- ptr[1] = EIR_FLAGS;
- ptr[2] = flags;
-
- ad_len += 3;
- ptr += 3;
- }
- }
-
-skip_flags:
- if (adv_instance) {
- memcpy(ptr, adv_instance->adv_data,
- adv_instance->adv_data_len);
- ad_len += adv_instance->adv_data_len;
- ptr += adv_instance->adv_data_len;
- }
-
- if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
- s8 adv_tx_power;
-
- if (ext_adv_capable(hdev)) {
- if (adv_instance)
- adv_tx_power = adv_instance->tx_power;
- else
- adv_tx_power = hdev->adv_tx_power;
- } else {
- adv_tx_power = hdev->adv_tx_power;
- }
-
- /* Provide Tx Power only if we can provide a valid value for it */
- if (adv_tx_power != HCI_TX_POWER_INVALID) {
- ptr[0] = 0x02;
- ptr[1] = EIR_TX_POWER;
- ptr[2] = (u8)adv_tx_power;
-
- ad_len += 3;
- ptr += 3;
- }
- }
-
- return ad_len;
-}
-
void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
@@ -1872,7 +1489,7 @@ void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
memset(&pdu, 0, sizeof(pdu));
- len = create_instance_adv_data(hdev, instance, pdu.data);
+ len = eir_create_adv_data(hdev, instance, pdu.data);
/* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
@@ -1894,7 +1511,7 @@ void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
memset(&cp, 0, sizeof(cp));
- len = create_instance_adv_data(hdev, instance, cp.data);
+ len = eir_create_adv_data(hdev, instance, cp.data);
/* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
@@ -2183,7 +1800,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
adv_instance = NULL;
}
- flags = get_adv_instance_flags(hdev, instance);
+ flags = hci_adv_instance_flags(hdev, instance);
/* If the "connectable" instance flag was not set, then choose between
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
@@ -2223,7 +1840,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
else
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
- } else if (adv_instance_is_scannable(hdev, instance) ||
+ } else if (hci_adv_instance_is_scannable(hdev, instance) ||
(flags & MGMT_ADV_PARAM_SCAN_RSP)) {
if (secondary_adv)
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
@@ -3327,6 +2944,53 @@ bool hci_req_stop_discovery(struct hci_request *req)
return ret;
}
+static void config_data_path_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ bt_dev_dbg(hdev, "status %u", status);
+}
+
+int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
+{
+ struct hci_request req;
+ int err;
+ __u8 vnd_len, *vnd_data = NULL;
+ struct hci_op_configure_data_path *cmd = NULL;
+
+ hci_req_init(&req, hdev);
+
+ err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
+ &vnd_data);
+ if (err < 0)
+ goto error;
+
+ cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
+ if (err < 0)
+ goto error;
+
+ cmd->vnd_len = vnd_len;
+ memcpy(cmd->vnd_data, vnd_data, vnd_len);
+
+ cmd->direction = 0x00;
+ hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
+
+ cmd->direction = 0x01;
+ hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
+
+ err = hci_req_run(&req, config_data_path_complete);
+error:
+
+ kfree(cmd);
+ kfree(vnd_data);
+ return err;
+}
+
static int stop_discovery(struct hci_request *req, unsigned long opt)
{
hci_dev_lock(req->hdev);
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 39ee8a18087a..f31420f58525 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -101,6 +101,8 @@ void __hci_req_update_class(struct hci_request *req);
/* Returns true if HCI commands were queued */
bool hci_req_stop_discovery(struct hci_request *req);
+int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec);
+
static inline void hci_req_update_scan(struct hci_dev *hdev)
{
queue_work(hdev->req_workqueue, &hdev->scan_update);
@@ -122,26 +124,3 @@ static inline void hci_update_background_scan(struct hci_dev *hdev)
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);
-
-u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
-
-static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
- u8 *data, u8 data_len)
-{
- eir[eir_len++] = sizeof(type) + data_len;
- eir[eir_len++] = type;
- memcpy(&eir[eir_len], data, data_len);
- eir_len += data_len;
-
- return eir_len;
-}
-
-static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
-{
- eir[eir_len++] = sizeof(type) + sizeof(data);
- eir[eir_len++] = type;
- put_unaligned_le16(data, &eir[eir_len]);
- eir_len += sizeof(data);
-
- return eir_len;
-}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index f1128c2134f0..d0dad1fafe07 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -57,6 +57,7 @@ struct hci_pinfo {
unsigned long flags;
__u32 cookie;
char comm[TASK_COMM_LEN];
+ __u16 mtu;
};
static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
@@ -1374,6 +1375,10 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
break;
}
+ /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
+ if (!hci_pi(sk)->mtu)
+ hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
+
sk->sk_state = BT_BOUND;
done:
@@ -1506,9 +1511,8 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
}
static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
- struct msghdr *msg, size_t msglen)
+ struct sk_buff *skb)
{
- void *buf;
u8 *cp;
struct mgmt_hdr *hdr;
u16 opcode, index, len;
@@ -1517,40 +1521,31 @@ static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
bool var_len, no_hdev;
int err;
- BT_DBG("got %zu bytes", msglen);
+ BT_DBG("got %d bytes", skb->len);
- if (msglen < sizeof(*hdr))
+ if (skb->len < sizeof(*hdr))
return -EINVAL;
- buf = kmalloc(msglen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (memcpy_from_msg(buf, msg, msglen)) {
- err = -EFAULT;
- goto done;
- }
-
- hdr = buf;
+ hdr = (void *)skb->data;
opcode = __le16_to_cpu(hdr->opcode);
index = __le16_to_cpu(hdr->index);
len = __le16_to_cpu(hdr->len);
- if (len != msglen - sizeof(*hdr)) {
+ if (len != skb->len - sizeof(*hdr)) {
err = -EINVAL;
goto done;
}
if (chan->channel == HCI_CHANNEL_CONTROL) {
- struct sk_buff *skb;
+ struct sk_buff *cmd;
/* Send event to monitor */
- skb = create_monitor_ctrl_command(sk, index, opcode, len,
- buf + sizeof(*hdr));
- if (skb) {
- hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ cmd = create_monitor_ctrl_command(sk, index, opcode, len,
+ skb->data + sizeof(*hdr));
+ if (cmd) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
HCI_SOCK_TRUSTED, NULL);
- kfree_skb(skb);
+ kfree_skb(cmd);
}
}
@@ -1615,26 +1610,25 @@ static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
if (hdev && chan->hdev_init)
chan->hdev_init(sk, hdev);
- cp = buf + sizeof(*hdr);
+ cp = skb->data + sizeof(*hdr);
err = handler->func(sk, hdev, cp, len);
if (err < 0)
goto done;
- err = msglen;
+ err = skb->len;
done:
if (hdev)
hci_dev_put(hdev);
- kfree(buf);
return err;
}
-static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
+static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
+ unsigned int flags)
{
struct hci_mon_hdr *hdr;
- struct sk_buff *skb;
struct hci_dev *hdev;
u16 index;
int err;
@@ -1643,24 +1637,13 @@ static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
* the priority byte, the ident length byte and at least one string
* terminator NUL byte. Anything shorter are invalid packets.
*/
- if (len < sizeof(*hdr) + 3)
+ if (skb->len < sizeof(*hdr) + 3)
return -EINVAL;
- skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- return err;
-
- if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
- err = -EFAULT;
- goto drop;
- }
-
hdr = (void *)skb->data;
- if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
- err = -EINVAL;
- goto drop;
- }
+ if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
+ return -EINVAL;
if (__le16_to_cpu(hdr->opcode) == 0x0000) {
__u8 priority = skb->data[sizeof(*hdr)];
@@ -1679,25 +1662,20 @@ static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
* The message follows the ident string (if present) and
* must be NUL terminated. Otherwise it is not a valid packet.
*/
- if (priority > 7 || skb->data[len - 1] != 0x00 ||
- ident_len > len - sizeof(*hdr) - 3 ||
- skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
- err = -EINVAL;
- goto drop;
- }
+ if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
+ ident_len > skb->len - sizeof(*hdr) - 3 ||
+ skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
+ return -EINVAL;
} else {
- err = -EINVAL;
- goto drop;
+ return -EINVAL;
}
index = __le16_to_cpu(hdr->index);
if (index != MGMT_INDEX_NONE) {
hdev = hci_dev_get(index);
- if (!hdev) {
- err = -ENODEV;
- goto drop;
- }
+ if (!hdev)
+ return -ENODEV;
} else {
hdev = NULL;
}
@@ -1705,13 +1683,11 @@ static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
- err = len;
+ err = skb->len;
if (hdev)
hci_dev_put(hdev);
-drop:
- kfree_skb(skb);
return err;
}
@@ -1723,19 +1699,23 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
struct hci_dev *hdev;
struct sk_buff *skb;
int err;
+ const unsigned int flags = msg->msg_flags;
BT_DBG("sock %p sk %p", sock, sk);
- if (msg->msg_flags & MSG_OOB)
+ if (flags & MSG_OOB)
return -EOPNOTSUPP;
- if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
- MSG_CMSG_COMPAT))
+ if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
return -EINVAL;
- if (len < 4 || len > HCI_MAX_FRAME_SIZE)
+ if (len < 4 || len > hci_pi(sk)->mtu)
return -EINVAL;
+ skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
lock_sock(sk);
switch (hci_pi(sk)->channel) {
@@ -1744,39 +1724,30 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
break;
case HCI_CHANNEL_MONITOR:
err = -EOPNOTSUPP;
- goto done;
+ goto drop;
case HCI_CHANNEL_LOGGING:
- err = hci_logging_frame(sk, msg, len);
- goto done;
+ err = hci_logging_frame(sk, skb, flags);
+ goto drop;
default:
mutex_lock(&mgmt_chan_list_lock);
chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
if (chan)
- err = hci_mgmt_cmd(chan, sk, msg, len);
+ err = hci_mgmt_cmd(chan, sk, skb);
else
err = -EINVAL;
mutex_unlock(&mgmt_chan_list_lock);
- goto done;
+ goto drop;
}
hdev = hci_hdev_from_sock(sk);
if (IS_ERR(hdev)) {
err = PTR_ERR(hdev);
- goto done;
+ goto drop;
}
if (!test_bit(HCI_UP, &hdev->flags)) {
err = -ENETDOWN;
- goto done;
- }
-
- skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- goto done;
-
- if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
- err = -EFAULT;
goto drop;
}
@@ -1857,8 +1828,8 @@ drop:
goto done;
}
-static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
- sockptr_t optval, unsigned int len)
+static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int len)
{
struct hci_ufilter uf = { .opcode = 0 };
struct sock *sk = sock->sk;
@@ -1866,9 +1837,6 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
BT_DBG("sk %p, opt %d", sk, optname);
- if (level != SOL_HCI)
- return -ENOPROTOOPT;
-
lock_sock(sk);
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
@@ -1943,18 +1911,63 @@ done:
return err;
}
-static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int __user *optlen)
+static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int len)
{
- struct hci_ufilter uf;
struct sock *sk = sock->sk;
- int len, opt, err = 0;
+ int err = 0, opt = 0;
BT_DBG("sk %p, opt %d", sk, optname);
- if (level != SOL_HCI)
+ if (level == SOL_HCI)
+ return hci_sock_setsockopt_old(sock, level, optname, optval,
+ len);
+
+ if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
+ lock_sock(sk);
+
+ switch (optname) {
+ case BT_SNDMTU:
+ case BT_RCVMTU:
+ switch (hci_pi(sk)->channel) {
+ /* Don't allow changing MTU for channels that are meant for HCI
+ * traffic only.
+ */
+ case HCI_CHANNEL_RAW:
+ case HCI_CHANNEL_USER:
+ err = -ENOPROTOOPT;
+ goto done;
+ }
+
+ if (copy_from_sockptr(&opt, optval, sizeof(u16))) {
+ err = -EFAULT;
+ break;
+ }
+
+ hci_pi(sk)->mtu = opt;
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct hci_ufilter uf;
+ struct sock *sk = sock->sk;
+ int len, opt, err = 0;
+
+ BT_DBG("sk %p, opt %d", sk, optname);
+
if (get_user(len, optlen))
return -EFAULT;
@@ -2012,6 +2025,39 @@ done:
return err;
}
+static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ BT_DBG("sk %p, opt %d", sk, optname);
+
+ if (level == SOL_HCI)
+ return hci_sock_getsockopt_old(sock, level, optname, optval,
+ optlen);
+
+ if (level != SOL_BLUETOOTH)
+ return -ENOPROTOOPT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case BT_SNDMTU:
+ case BT_RCVMTU:
+ if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
+ err = -EFAULT;
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
static const struct proto_ops hci_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 77ba68209dbd..4f8f37599962 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7902,7 +7902,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
dst_type = ADDR_LE_DEV_RANDOM;
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
- hcon = hci_connect_le(hdev, dst, dst_type,
+ hcon = hci_connect_le(hdev, dst, dst_type, false,
chan->sec_level,
HCI_LE_CONN_TIMEOUT,
HCI_ROLE_SLAVE, NULL);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index c99d65ef13b1..160c016a5dfb 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1508,6 +1508,9 @@ static void l2cap_sock_close_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
+ if (!sk)
+ return;
+
l2cap_sock_kill(sk);
}
@@ -1516,6 +1519,9 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
struct sock *sk = chan->data;
struct sock *parent;
+ if (!sk)
+ return;
+
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
/* This callback can be called both for server (BT_LISTEN)
@@ -1707,8 +1713,10 @@ static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
- if (l2cap_pi(sk)->chan)
+ if (l2cap_pi(sk)->chan) {
+ l2cap_pi(sk)->chan->data = NULL;
l2cap_chan_put(l2cap_pi(sk)->chan);
+ }
if (l2cap_pi(sk)->rx_busy_skb) {
kfree_skb(l2cap_pi(sk)->rx_busy_skb);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index cea01e275f1e..3e5283607b97 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -38,6 +38,7 @@
#include "mgmt_util.h"
#include "mgmt_config.h"
#include "msft.h"
+#include "eir.h"
#define MGMT_VERSION 1
#define MGMT_REVISION 21
@@ -3791,6 +3792,18 @@ static const u8 debug_uuid[16] = {
};
#endif
+/* 330859bc-7506-492d-9370-9a6f0614037f */
+static const u8 quality_report_uuid[16] = {
+ 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
+ 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
+};
+
+/* a6695ace-ee7f-4fb9-881a-5fac66c629af */
+static const u8 offload_codecs_uuid[16] = {
+ 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
+ 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
+};
+
/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
static const u8 simult_central_periph_uuid[16] = {
0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
@@ -3806,7 +3819,7 @@ static const u8 rpa_resolution_uuid[16] = {
static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
- char buf[62]; /* Enough space for 3 features */
+ char buf[102]; /* Enough space for 5 features: 2 + 20 * 5 */
struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
u16 idx = 0;
u32 flags;
@@ -3850,6 +3863,28 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
idx++;
}
+ if (hdev && hdev->set_quality_report) {
+ if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
+ flags = BIT(0);
+ else
+ flags = 0;
+
+ memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
+ rp->features[idx].flags = cpu_to_le32(flags);
+ idx++;
+ }
+
+ if (hdev && hdev->get_data_path_id) {
+ if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
+ flags = BIT(0);
+ else
+ flags = 0;
+
+ memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
+ rp->features[idx].flags = cpu_to_le32(flags);
+ idx++;
+ }
+
rp->feature_count = cpu_to_le16(idx);
/* After reading the experimental features information, enable
@@ -3892,150 +3927,341 @@ static int exp_debug_feature_changed(bool enabled, struct sock *skip)
}
#endif
-static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 data_len)
+static int exp_quality_report_feature_changed(bool enabled, struct sock *skip)
{
- struct mgmt_cp_set_exp_feature *cp = data;
- struct mgmt_rp_set_exp_feature rp;
+ struct mgmt_ev_exp_feature_changed ev;
- bt_dev_dbg(hdev, "sock %p", sk);
+ memset(&ev, 0, sizeof(ev));
+ memcpy(ev.uuid, quality_report_uuid, 16);
+ ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
+
+ return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
+ &ev, sizeof(ev),
+ HCI_MGMT_EXP_FEATURE_EVENTS, skip);
+}
+
+#define EXP_FEAT(_uuid, _set_func) \
+{ \
+ .uuid = _uuid, \
+ .set_func = _set_func, \
+}
+
+/* The zero key uuid is special. Multiple exp features are set through it. */
+static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_set_exp_feature *cp, u16 data_len)
+{
+ struct mgmt_rp_set_exp_feature rp;
- if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
- memset(rp.uuid, 0, 16);
- rp.flags = cpu_to_le32(0);
+ memset(rp.uuid, 0, 16);
+ rp.flags = cpu_to_le32(0);
#ifdef CONFIG_BT_FEATURE_DEBUG
- if (!hdev) {
- bool changed = bt_dbg_get();
+ if (!hdev) {
+ bool changed = bt_dbg_get();
- bt_dbg_set(false);
+ bt_dbg_set(false);
- if (changed)
- exp_debug_feature_changed(false, sk);
- }
+ if (changed)
+ exp_debug_feature_changed(false, sk);
+ }
#endif
- if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
- bool changed = hci_dev_test_flag(hdev,
- HCI_ENABLE_LL_PRIVACY);
+ if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
+ bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
- hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
+ hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
- if (changed)
- exp_ll_privacy_feature_changed(false, hdev, sk);
- }
+ if (changed)
+ exp_ll_privacy_feature_changed(false, hdev, sk);
+ }
- hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
- return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
- MGMT_OP_SET_EXP_FEATURE, 0,
- &rp, sizeof(rp));
- }
+ return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
+ MGMT_OP_SET_EXP_FEATURE, 0,
+ &rp, sizeof(rp));
+}
#ifdef CONFIG_BT_FEATURE_DEBUG
- if (!memcmp(cp->uuid, debug_uuid, 16)) {
- bool val, changed;
- int err;
+static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_set_exp_feature *cp, u16 data_len)
+{
+ struct mgmt_rp_set_exp_feature rp;
- /* Command requires to use the non-controller index */
- if (hdev)
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_INVALID_INDEX);
+ bool val, changed;
+ int err;
- /* Parameters are limited to a single octet */
- if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
- return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_INVALID_PARAMS);
+ /* Command requires to use the non-controller index */
+ if (hdev)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_INDEX);
- /* Only boolean on/off is supported */
- if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
- return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_INVALID_PARAMS);
+ /* Parameters are limited to a single octet */
+ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
+ return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
- val = !!cp->param[0];
- changed = val ? !bt_dbg_get() : bt_dbg_get();
- bt_dbg_set(val);
+ /* Only boolean on/off is supported */
+ if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
+ return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
- memcpy(rp.uuid, debug_uuid, 16);
- rp.flags = cpu_to_le32(val ? BIT(0) : 0);
+ val = !!cp->param[0];
+ changed = val ? !bt_dbg_get() : bt_dbg_get();
+ bt_dbg_set(val);
- hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
+ memcpy(rp.uuid, debug_uuid, 16);
+ rp.flags = cpu_to_le32(val ? BIT(0) : 0);
- err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
- MGMT_OP_SET_EXP_FEATURE, 0,
- &rp, sizeof(rp));
+ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
- if (changed)
- exp_debug_feature_changed(val, sk);
+ err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
+ MGMT_OP_SET_EXP_FEATURE, 0,
+ &rp, sizeof(rp));
- return err;
- }
+ if (changed)
+ exp_debug_feature_changed(val, sk);
+
+ return err;
+}
#endif
- if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
- bool val, changed;
- int err;
- u32 flags;
+static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_set_exp_feature *cp,
+ u16 data_len)
+{
+ struct mgmt_rp_set_exp_feature rp;
+ bool val, changed;
+ int err;
+ u32 flags;
+
+ /* Command requires to use the controller index */
+ if (!hdev)
+ return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_INDEX);
- /* Command requires to use the controller index */
- if (!hdev)
- return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_INVALID_INDEX);
+ /* Changes can only be made when controller is powered down */
+ if (hdev_is_powered(hdev))
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_REJECTED);
- /* Changes can only be made when controller is powered down */
- if (hdev_is_powered(hdev))
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_REJECTED);
+ /* Parameters are limited to a single octet */
+ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
- /* Parameters are limited to a single octet */
- if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_INVALID_PARAMS);
+ /* Only boolean on/off is supported */
+ if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
- /* Only boolean on/off is supported */
- if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE,
- MGMT_STATUS_INVALID_PARAMS);
+ val = !!cp->param[0];
- val = !!cp->param[0];
+ if (val) {
+ changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
+ hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING);
- if (val) {
- changed = !hci_dev_test_flag(hdev,
- HCI_ENABLE_LL_PRIVACY);
- hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
- hci_dev_clear_flag(hdev, HCI_ADVERTISING);
+ /* Enable LL privacy + supported settings changed */
+ flags = BIT(0) | BIT(1);
+ } else {
+ changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
+ hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
- /* Enable LL privacy + supported settings changed */
- flags = BIT(0) | BIT(1);
- } else {
- changed = hci_dev_test_flag(hdev,
- HCI_ENABLE_LL_PRIVACY);
- hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
+ /* Disable LL privacy + supported settings changed */
+ flags = BIT(1);
+ }
- /* Disable LL privacy + supported settings changed */
- flags = BIT(1);
+ memcpy(rp.uuid, rpa_resolution_uuid, 16);
+ rp.flags = cpu_to_le32(flags);
+
+ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
+
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE, 0,
+ &rp, sizeof(rp));
+
+ if (changed)
+ exp_ll_privacy_feature_changed(val, hdev, sk);
+
+ return err;
+}
+
+static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_set_exp_feature *cp,
+ u16 data_len)
+{
+ struct mgmt_rp_set_exp_feature rp;
+ bool val, changed;
+ int err;
+
+ /* Command requires to use a valid controller index */
+ if (!hdev)
+ return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_INDEX);
+
+ /* Parameters are limited to a single octet */
+ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* Only boolean on/off is supported */
+ if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_req_sync_lock(hdev);
+
+ val = !!cp->param[0];
+ changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
+
+ if (!hdev->set_quality_report) {
+ err = mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_NOT_SUPPORTED);
+ goto unlock_quality_report;
+ }
+
+ if (changed) {
+ err = hdev->set_quality_report(hdev, val);
+ if (err) {
+ err = mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_FAILED);
+ goto unlock_quality_report;
}
+ if (val)
+ hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
+ else
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
+ }
- memcpy(rp.uuid, rpa_resolution_uuid, 16);
- rp.flags = cpu_to_le32(flags);
+ bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
- hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
+ memcpy(rp.uuid, quality_report_uuid, 16);
+ rp.flags = cpu_to_le32(val ? BIT(0) : 0);
+ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE, 0,
+ &rp, sizeof(rp));
- err = mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE, 0,
- &rp, sizeof(rp));
+ if (changed)
+ exp_quality_report_feature_changed(val, sk);
- if (changed)
- exp_ll_privacy_feature_changed(val, hdev, sk);
+unlock_quality_report:
+ hci_req_sync_unlock(hdev);
+ return err;
+}
- return err;
+static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip)
+{
+ struct mgmt_ev_exp_feature_changed ev;
+
+ memset(&ev, 0, sizeof(ev));
+ memcpy(ev.uuid, offload_codecs_uuid, 16);
+ ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
+
+ return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
+ &ev, sizeof(ev),
+ HCI_MGMT_EXP_FEATURE_EVENTS, skip);
+}
+
+static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_set_exp_feature *cp,
+ u16 data_len)
+{
+ bool val, changed;
+ int err;
+ struct mgmt_rp_set_exp_feature rp;
+
+ /* Command requires to use a valid controller index */
+ if (!hdev)
+ return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_INDEX);
+
+ /* Parameters are limited to a single octet */
+ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* Only boolean on/off is supported */
+ if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ val = !!cp->param[0];
+ changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
+
+ if (!hdev->get_data_path_id) {
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_NOT_SUPPORTED);
+ }
+
+ if (changed) {
+ if (val)
+ hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
+ else
+ hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
+ }
+
+ bt_dev_info(hdev, "offload codecs enable %d changed %d",
+ val, changed);
+
+ memcpy(rp.uuid, offload_codecs_uuid, 16);
+ rp.flags = cpu_to_le32(val ? BIT(0) : 0);
+ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE, 0,
+ &rp, sizeof(rp));
+
+ if (changed)
+ exp_offload_codec_feature_changed(val, sk);
+
+ return err;
+}
+
+static const struct mgmt_exp_feature {
+ const u8 *uuid;
+ int (*set_func)(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_set_exp_feature *cp, u16 data_len);
+} exp_features[] = {
+ EXP_FEAT(ZERO_KEY, set_zero_key_func),
+#ifdef CONFIG_BT_FEATURE_DEBUG
+ EXP_FEAT(debug_uuid, set_debug_func),
+#endif
+ EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
+ EXP_FEAT(quality_report_uuid, set_quality_report_func),
+ EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
+
+ /* end with a null feature */
+ EXP_FEAT(NULL, NULL)
+};
+
+static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_cp_set_exp_feature *cp = data;
+ size_t i = 0;
+
+ bt_dev_dbg(hdev, "sock %p", sk);
+
+ for (i = 0; exp_features[i].uuid; i++) {
+ if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
+ return exp_features[i].set_func(sk, hdev, cp, data_len);
}
return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
@@ -7315,6 +7541,11 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
if (!rp)
return -ENOMEM;
+ if (!status && !lmp_ssp_capable(hdev)) {
+ status = MGMT_STATUS_NOT_SUPPORTED;
+ eir_len = 0;
+ }
+
if (status)
goto complete;
@@ -7526,7 +7757,7 @@ static u8 calculate_name_len(struct hci_dev *hdev)
{
u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
- return append_local_name(hdev, buf, 0);
+ return eir_append_local_name(hdev, buf, 0);
}
static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
@@ -8222,7 +8453,7 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
* advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
MGMT_STATUS_NOT_SUPPORTED);
hci_dev_lock(hdev);
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index b4bfae41e8a5..255cffa554ee 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -94,11 +94,14 @@ struct msft_data {
__u16 pending_add_handle;
__u16 pending_remove_handle;
__u8 reregistering;
+ __u8 suspending;
__u8 filter_enabled;
};
static int __msft_add_monitor_pattern(struct hci_dev *hdev,
struct adv_monitor *monitor);
+static int __msft_remove_monitor(struct hci_dev *hdev,
+ struct adv_monitor *monitor, u16 handle);
bool msft_monitor_supported(struct hci_dev *hdev)
{
@@ -154,7 +157,7 @@ failed:
}
/* This function requires the caller holds hdev->lock */
-static void reregister_monitor_on_restart(struct hci_dev *hdev, int handle)
+static void reregister_monitor(struct hci_dev *hdev, int handle)
{
struct adv_monitor *monitor;
struct msft_data *msft = hdev->msft_data;
@@ -182,31 +185,102 @@ static void reregister_monitor_on_restart(struct hci_dev *hdev, int handle)
}
}
+/* This function requires the caller holds hdev->lock */
+static void remove_monitor_on_suspend(struct hci_dev *hdev, int handle)
+{
+ struct adv_monitor *monitor;
+ struct msft_data *msft = hdev->msft_data;
+ int err;
+
+ while (1) {
+ monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
+ if (!monitor) {
+ /* All monitors have been removed */
+ msft->suspending = false;
+ hci_update_background_scan(hdev);
+ return;
+ }
+
+ msft->pending_remove_handle = (u16)handle;
+ err = __msft_remove_monitor(hdev, monitor, handle);
+
+ /* If success, return and wait for monitor removed callback */
+ if (!err)
+ return;
+
+ /* Otherwise free the monitor and keep removing */
+ hci_free_adv_monitor(hdev, monitor);
+ handle++;
+ }
+}
+
+/* This function requires the caller holds hdev->lock */
+void msft_suspend(struct hci_dev *hdev)
+{
+ struct msft_data *msft = hdev->msft_data;
+
+ if (!msft)
+ return;
+
+ if (msft_monitor_supported(hdev)) {
+ msft->suspending = true;
+ /* Quitely remove all monitors on suspend to avoid waking up
+ * the system.
+ */
+ remove_monitor_on_suspend(hdev, 0);
+ }
+}
+
+/* This function requires the caller holds hdev->lock */
+void msft_resume(struct hci_dev *hdev)
+{
+ struct msft_data *msft = hdev->msft_data;
+
+ if (!msft)
+ return;
+
+ if (msft_monitor_supported(hdev)) {
+ msft->reregistering = true;
+ /* Monitors are removed on suspend, so we need to add all
+ * monitors on resume.
+ */
+ reregister_monitor(hdev, 0);
+ }
+}
+
void msft_do_open(struct hci_dev *hdev)
{
- struct msft_data *msft;
+ struct msft_data *msft = hdev->msft_data;
if (hdev->msft_opcode == HCI_OP_NOP)
return;
+ if (!msft) {
+ bt_dev_err(hdev, "MSFT extension not registered");
+ return;
+ }
+
bt_dev_dbg(hdev, "Initialize MSFT extension");
- msft = kzalloc(sizeof(*msft), GFP_KERNEL);
- if (!msft)
- return;
+ /* Reset existing MSFT data before re-reading */
+ kfree(msft->evt_prefix);
+ msft->evt_prefix = NULL;
+ msft->evt_prefix_len = 0;
+ msft->features = 0;
if (!read_supported_features(hdev, msft)) {
+ hdev->msft_data = NULL;
kfree(msft);
return;
}
- INIT_LIST_HEAD(&msft->handle_map);
- hdev->msft_data = msft;
-
if (msft_monitor_supported(hdev)) {
msft->reregistering = true;
msft_set_filter_enable(hdev, true);
- reregister_monitor_on_restart(hdev, 0);
+ /* Monitors get removed on power off, so we need to explicitly
+ * tell the controller to re-monitor.
+ */
+ reregister_monitor(hdev, 0);
}
}
@@ -221,8 +295,9 @@ void msft_do_close(struct hci_dev *hdev)
bt_dev_dbg(hdev, "Cleanup of MSFT extension");
- hdev->msft_data = NULL;
-
+ /* The controller will silently remove all monitors on power off.
+ * Therefore, remove handle_data mapping and reset monitor state.
+ */
list_for_each_entry_safe(handle_data, tmp, &msft->handle_map, list) {
monitor = idr_find(&hdev->adv_monitors_idr,
handle_data->mgmt_handle);
@@ -233,6 +308,34 @@ void msft_do_close(struct hci_dev *hdev)
list_del(&handle_data->list);
kfree(handle_data);
}
+}
+
+void msft_register(struct hci_dev *hdev)
+{
+ struct msft_data *msft = NULL;
+
+ bt_dev_dbg(hdev, "Register MSFT extension");
+
+ msft = kzalloc(sizeof(*msft), GFP_KERNEL);
+ if (!msft) {
+ bt_dev_err(hdev, "Failed to register MSFT extension");
+ return;
+ }
+
+ INIT_LIST_HEAD(&msft->handle_map);
+ hdev->msft_data = msft;
+}
+
+void msft_unregister(struct hci_dev *hdev)
+{
+ struct msft_data *msft = hdev->msft_data;
+
+ if (!msft)
+ return;
+
+ bt_dev_dbg(hdev, "Unregister MSFT extension");
+
+ hdev->msft_data = NULL;
kfree(msft->evt_prefix);
kfree(msft);
@@ -345,8 +448,7 @@ unlock:
/* If in restart/reregister sequence, keep registering. */
if (msft->reregistering)
- reregister_monitor_on_restart(hdev,
- msft->pending_add_handle + 1);
+ reregister_monitor(hdev, msft->pending_add_handle + 1);
hci_dev_unlock(hdev);
@@ -383,13 +485,25 @@ static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
if (handle_data) {
monitor = idr_find(&hdev->adv_monitors_idr,
handle_data->mgmt_handle);
- if (monitor)
+
+ if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED)
+ monitor->state = ADV_MONITOR_STATE_REGISTERED;
+
+ /* Do not free the monitor if it is being removed due to
+ * suspend. It will be re-monitored on resume.
+ */
+ if (monitor && !msft->suspending)
hci_free_adv_monitor(hdev, monitor);
list_del(&handle_data->list);
kfree(handle_data);
}
+ /* If in suspend/remove sequence, keep removing. */
+ if (msft->suspending)
+ remove_monitor_on_suspend(hdev,
+ msft->pending_remove_handle + 1);
+
/* If remove all monitors is required, we need to continue the process
* here because the earlier it was paused when waiting for the
* response from controller.
@@ -408,7 +522,8 @@ static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
hci_dev_unlock(hdev);
done:
- hci_remove_adv_monitor_complete(hdev, status);
+ if (!msft->suspending)
+ hci_remove_adv_monitor_complete(hdev, status);
}
static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev,
@@ -541,15 +656,15 @@ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor)
if (!msft)
return -EOPNOTSUPP;
- if (msft->reregistering)
+ if (msft->reregistering || msft->suspending)
return -EBUSY;
return __msft_add_monitor_pattern(hdev, monitor);
}
/* This function requires the caller holds hdev->lock */
-int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
- u16 handle)
+static int __msft_remove_monitor(struct hci_dev *hdev,
+ struct adv_monitor *monitor, u16 handle)
{
struct msft_cp_le_cancel_monitor_advertisement cp;
struct msft_monitor_advertisement_handle_data *handle_data;
@@ -557,12 +672,6 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
struct msft_data *msft = hdev->msft_data;
int err = 0;
- if (!msft)
- return -EOPNOTSUPP;
-
- if (msft->reregistering)
- return -EBUSY;
-
handle_data = msft_find_handle_data(hdev, monitor->handle, true);
/* If no matched handle, just remove without telling controller */
@@ -582,6 +691,21 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
return err;
}
+/* This function requires the caller holds hdev->lock */
+int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
+ u16 handle)
+{
+ struct msft_data *msft = hdev->msft_data;
+
+ if (!msft)
+ return -EOPNOTSUPP;
+
+ if (msft->reregistering || msft->suspending)
+ return -EBUSY;
+
+ return __msft_remove_monitor(hdev, monitor, handle);
+}
+
void msft_req_add_set_filter_enable(struct hci_request *req, bool enable)
{
struct hci_dev *hdev = req->hdev;
diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h
index 6e56d94b88d8..59c6e081c789 100644
--- a/net/bluetooth/msft.h
+++ b/net/bluetooth/msft.h
@@ -13,6 +13,8 @@
#if IS_ENABLED(CONFIG_BT_MSFTEXT)
bool msft_monitor_supported(struct hci_dev *hdev);
+void msft_register(struct hci_dev *hdev);
+void msft_unregister(struct hci_dev *hdev);
void msft_do_open(struct hci_dev *hdev);
void msft_do_close(struct hci_dev *hdev);
void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb);
@@ -22,6 +24,8 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
u16 handle);
void msft_req_add_set_filter_enable(struct hci_request *req, bool enable);
int msft_set_filter_enable(struct hci_dev *hdev, bool enable);
+void msft_suspend(struct hci_dev *hdev);
+void msft_resume(struct hci_dev *hdev);
bool msft_curve_validity(struct hci_dev *hdev);
#else
@@ -31,6 +35,8 @@ static inline bool msft_monitor_supported(struct hci_dev *hdev)
return false;
}
+static inline void msft_register(struct hci_dev *hdev) {}
+static inline void msft_unregister(struct hci_dev *hdev) {}
static inline void msft_do_open(struct hci_dev *hdev) {}
static inline void msft_do_close(struct hci_dev *hdev) {}
static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {}
@@ -55,6 +61,9 @@ static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable)
return -EOPNOTSUPP;
}
+static inline void msft_suspend(struct hci_dev *hdev) {}
+static inline void msft_resume(struct hci_dev *hdev) {}
+
static inline bool msft_curve_validity(struct hci_dev *hdev)
{
return false;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index f2bacb464ccf..7324764384b6 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -549,22 +549,58 @@ struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel)
return dlc;
}
+static int rfcomm_dlc_send_frag(struct rfcomm_dlc *d, struct sk_buff *frag)
+{
+ int len = frag->len;
+
+ BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len);
+
+ if (len > d->mtu)
+ return -EINVAL;
+
+ rfcomm_make_uih(frag, d->addr);
+ __skb_queue_tail(&d->tx_queue, frag);
+
+ return len;
+}
+
int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
{
- int len = skb->len;
+ unsigned long flags;
+ struct sk_buff *frag, *next;
+ int len;
if (d->state != BT_CONNECTED)
return -ENOTCONN;
- BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len);
+ frag = skb_shinfo(skb)->frag_list;
+ skb_shinfo(skb)->frag_list = NULL;
- if (len > d->mtu)
- return -EINVAL;
+ /* Queue all fragments atomically. */
+ spin_lock_irqsave(&d->tx_queue.lock, flags);
- rfcomm_make_uih(skb, d->addr);
- skb_queue_tail(&d->tx_queue, skb);
+ len = rfcomm_dlc_send_frag(d, skb);
+ if (len < 0 || !frag)
+ goto unlock;
+
+ for (; frag; frag = next) {
+ int ret;
+
+ next = frag->next;
+
+ ret = rfcomm_dlc_send_frag(d, frag);
+ if (ret < 0) {
+ kfree_skb(frag);
+ goto unlock;
+ }
+
+ len += ret;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&d->tx_queue.lock, flags);
- if (!test_bit(RFCOMM_TX_THROTTLED, &d->flags))
+ if (len > 0 && !test_bit(RFCOMM_TX_THROTTLED, &d->flags))
rfcomm_schedule();
return len;
}
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 2c95bb58f901..4bf4ea6cbb5e 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -575,46 +575,20 @@ static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
sent = bt_sock_wait_ready(sk, msg->msg_flags);
- if (sent)
- goto done;
-
- while (len) {
- size_t size = min_t(size_t, len, d->mtu);
- int err;
-
- skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb) {
- if (sent == 0)
- sent = err;
- break;
- }
- skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
-
- err = memcpy_from_msg(skb_put(skb, size), msg, size);
- if (err) {
- kfree_skb(skb);
- if (sent == 0)
- sent = err;
- break;
- }
- skb->priority = sk->sk_priority;
+ release_sock(sk);
- err = rfcomm_dlc_send(d, skb);
- if (err < 0) {
- kfree_skb(skb);
- if (sent == 0)
- sent = err;
- break;
- }
+ if (sent)
+ return sent;
- sent += size;
- len -= size;
- }
+ skb = bt_skb_sendmmsg(sk, msg, len, d->mtu, RFCOMM_SKB_HEAD_RESERVE,
+ RFCOMM_SKB_TAIL_RESERVE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
-done:
- release_sock(sk);
+ sent = rfcomm_dlc_send(d, skb);
+ if (sent < 0)
+ kfree_skb(skb);
return sent;
}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 98a881586512..8eabf41b2993 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -69,6 +69,7 @@ struct sco_pinfo {
__u32 flags;
__u16 setting;
__u8 cmsg_mask;
+ struct bt_codec codec;
struct sco_conn *conn;
};
@@ -133,6 +134,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
return NULL;
spin_lock_init(&conn->lock);
+ INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
hcon->sco_data = conn;
conn->hcon = hcon;
@@ -187,20 +189,21 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
/* Kill socket */
sco_conn_lock(conn);
sk = conn->sk;
+ if (sk)
+ sock_hold(sk);
sco_conn_unlock(conn);
if (sk) {
- sock_hold(sk);
lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
release_sock(sk);
sock_put(sk);
-
- /* Ensure no more work items will run before freeing conn. */
- cancel_delayed_work_sync(&conn->timeout_work);
}
+ /* Ensure no more work items will run before freeing conn. */
+ cancel_delayed_work_sync(&conn->timeout_work);
+
hcon->sco_data = NULL;
kfree(conn);
}
@@ -213,8 +216,6 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
sco_pi(sk)->conn = conn;
conn->sk = sk;
- INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
-
if (parent)
bt_accept_enqueue(parent, sk, true);
}
@@ -252,7 +253,7 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk)
return -EOPNOTSUPP;
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
- sco_pi(sk)->setting);
+ sco_pi(sk)->setting, &sco_pi(sk)->codec);
if (IS_ERR(hcon))
return PTR_ERR(hcon);
@@ -280,11 +281,10 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk)
return err;
}
-static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
+static int sco_send_frame(struct sock *sk, struct sk_buff *skb)
{
struct sco_conn *conn = sco_pi(sk)->conn;
- struct sk_buff *skb;
- int err;
+ int len = skb->len;
/* Check outgoing MTU */
if (len > conn->mtu)
@@ -292,15 +292,6 @@ static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
- skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- return err;
-
- if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
- kfree_skb(skb);
- return -EFAULT;
- }
-
hci_send_sco(conn->hcon, skb);
return len;
@@ -444,6 +435,7 @@ static void __sco_sock_close(struct sock *sk)
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
+
}
/* Must be called on unlocked socket. */
@@ -504,6 +496,10 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
sk->sk_state = BT_OPEN;
sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
+ sco_pi(sk)->codec.id = BT_CODEC_CVSD;
+ sco_pi(sk)->codec.cid = 0xffff;
+ sco_pi(sk)->codec.vid = 0xffff;
+ sco_pi(sk)->codec.data_path = 0x00;
bt_sock_link(&sco_sk_list, sk);
return sk;
@@ -725,6 +721,7 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
+ struct sk_buff *skb;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -736,14 +733,21 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
+ skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
- err = sco_send_frame(sk, msg, len);
+ err = sco_send_frame(sk, skb);
else
err = -ENOTCONN;
release_sock(sk);
+
+ if (err < 0)
+ kfree_skb(skb);
return err;
}
@@ -825,6 +829,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
int len, err = 0;
struct bt_voice voice;
u32 opt;
+ struct bt_codecs *codecs;
+ struct hci_dev *hdev;
+ __u8 buffer[255];
BT_DBG("sk %p", sk);
@@ -872,6 +879,16 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
}
sco_pi(sk)->setting = voice.setting;
+ hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src,
+ BDADDR_BREDR);
+ if (!hdev) {
+ err = -EBADFD;
+ break;
+ }
+ if (enhanced_sco_capable(hdev) &&
+ voice.setting == BT_VOICE_TRANSPARENT)
+ sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT;
+ hci_dev_put(hdev);
break;
case BT_PKT_STATUS:
@@ -886,6 +903,57 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS;
break;
+ case BT_CODEC:
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
+ sk->sk_state != BT_CONNECT2) {
+ err = -EINVAL;
+ break;
+ }
+
+ hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src,
+ BDADDR_BREDR);
+ if (!hdev) {
+ err = -EBADFD;
+ break;
+ }
+
+ if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) {
+ hci_dev_put(hdev);
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ if (!hdev->get_data_path_id) {
+ hci_dev_put(hdev);
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ if (optlen < sizeof(struct bt_codecs) ||
+ optlen > sizeof(buffer)) {
+ hci_dev_put(hdev);
+ err = -EINVAL;
+ break;
+ }
+
+ if (copy_from_sockptr(buffer, optval, optlen)) {
+ hci_dev_put(hdev);
+ err = -EFAULT;
+ break;
+ }
+
+ codecs = (void *)buffer;
+
+ if (codecs->num_codecs > 1) {
+ hci_dev_put(hdev);
+ err = -EINVAL;
+ break;
+ }
+
+ sco_pi(sk)->codec = codecs->codecs[0];
+ hci_dev_put(hdev);
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -964,6 +1032,12 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
struct bt_voice voice;
u32 phys;
int pkt_status;
+ int buf_len;
+ struct codec_list *c;
+ u8 num_codecs, i, __user *ptr;
+ struct hci_dev *hdev;
+ struct hci_codec_caps *caps;
+ struct bt_codec codec;
BT_DBG("sk %p", sk);
@@ -1028,6 +1102,101 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
err = -EFAULT;
break;
+ case BT_CODEC:
+ num_codecs = 0;
+ buf_len = 0;
+
+ hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
+ if (!hdev) {
+ err = -EBADFD;
+ break;
+ }
+
+ if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) {
+ hci_dev_put(hdev);
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ if (!hdev->get_data_path_id) {
+ hci_dev_put(hdev);
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ /* find total buffer size required to copy codec + caps */
+ hci_dev_lock(hdev);
+ list_for_each_entry(c, &hdev->local_codecs, list) {
+ if (c->transport != HCI_TRANSPORT_SCO_ESCO)
+ continue;
+ num_codecs++;
+ for (i = 0, caps = c->caps; i < c->num_caps; i++) {
+ buf_len += 1 + caps->len;
+ caps = (void *)&caps->data[caps->len];
+ }
+ buf_len += sizeof(struct bt_codec);
+ }
+ hci_dev_unlock(hdev);
+
+ buf_len += sizeof(struct bt_codecs);
+ if (buf_len > len) {
+ hci_dev_put(hdev);
+ err = -ENOBUFS;
+ break;
+ }
+ ptr = optval;
+
+ if (put_user(num_codecs, ptr)) {
+ hci_dev_put(hdev);
+ err = -EFAULT;
+ break;
+ }
+ ptr += sizeof(num_codecs);
+
+ /* Iterate all the codecs supported over SCO and populate
+ * codec data
+ */
+ hci_dev_lock(hdev);
+ list_for_each_entry(c, &hdev->local_codecs, list) {
+ if (c->transport != HCI_TRANSPORT_SCO_ESCO)
+ continue;
+
+ codec.id = c->id;
+ codec.cid = c->cid;
+ codec.vid = c->vid;
+ err = hdev->get_data_path_id(hdev, &codec.data_path);
+ if (err < 0)
+ break;
+ codec.num_caps = c->num_caps;
+ if (copy_to_user(ptr, &codec, sizeof(codec))) {
+ err = -EFAULT;
+ break;
+ }
+ ptr += sizeof(codec);
+
+ /* find codec capabilities data length */
+ len = 0;
+ for (i = 0, caps = c->caps; i < c->num_caps; i++) {
+ len += 1 + caps->len;
+ caps = (void *)&caps->data[caps->len];
+ }
+
+ /* copy codec capabilities data */
+ if (len && copy_to_user(ptr, c->caps, len)) {
+ err = -EFAULT;
+ break;
+ }
+ ptr += len;
+ }
+
+ if (!err && put_user(buf_len, optlen))
+ err = -EFAULT;
+
+ hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
+
+ break;
+
default:
err = -ENOPROTOOPT;
break;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fcb2f493f710..072f0c16c779 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -558,6 +558,12 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
}
+static struct proto bpf_dummy_proto = {
+ .name = "bpf_dummy",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct sock),
+};
+
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
@@ -602,20 +608,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
break;
}
- sk = kzalloc(sizeof(struct sock), GFP_USER);
+ sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
if (!sk) {
kfree(data);
kfree(ctx);
return -ENOMEM;
}
- sock_net_set(sk, net);
sock_init_data(NULL, sk);
skb = build_skb(data, 0);
if (!skb) {
kfree(data);
kfree(ctx);
- kfree(sk);
+ sk_free(sk);
return -ENOMEM;
}
skb->sk = sk;
@@ -688,8 +693,7 @@ out:
if (dev && dev != net->loopback_dev)
dev_put(dev);
kfree_skb(skb);
- bpf_sk_storage_free(sk);
- kfree(sk);
+ sk_free(sk);
kfree(ctx);
return ret;
}
@@ -803,7 +807,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (ret)
goto free_data;
- bpf_prog_change_xdp(NULL, prog);
+ if (repeat > 1)
+ bpf_prog_change_xdp(NULL, prog);
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
/* We convert the xdp_buff back to an xdp_md before checking the return
* code so the reference count of any held netdevice will be decremented
@@ -824,7 +829,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
sizeof(struct xdp_md));
out:
- bpf_prog_change_xdp(prog, NULL);
+ if (repeat > 1)
+ bpf_prog_change_xdp(prog, NULL);
free_data:
kfree(data);
free_ctx:
diff --git a/net/bridge/br.c b/net/bridge/br.c
index d3a32c6813e0..1fac72cc617f 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -36,7 +36,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
bool changed_addr;
int err;
- if (dev->priv_flags & IFF_EBRIDGE) {
+ if (netif_is_bridge_master(dev)) {
err = br_vlan_bridge_event(dev, event, ptr);
if (err)
return notifier_from_errno(err);
@@ -349,7 +349,7 @@ static void __net_exit br_net_exit(struct net *net)
rtnl_lock();
for_each_netdev(net, dev)
- if (dev->priv_flags & IFF_EBRIDGE)
+ if (netif_is_bridge_master(dev))
br_dev_delete(dev, &list);
unregister_netdevice_many(&list);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 46812b659710..a6a68e18c70a 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -825,7 +825,7 @@ int br_fdb_dump(struct sk_buff *skb,
struct net_bridge_fdb_entry *f;
int err = 0;
- if (!(dev->priv_flags & IFF_EBRIDGE))
+ if (!netif_is_bridge_master(dev))
return err;
if (!filter_dev) {
@@ -1076,7 +1076,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EINVAL;
}
- if (dev->priv_flags & IFF_EBRIDGE) {
+ if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
vg = br_vlan_group(br);
} else {
@@ -1173,7 +1173,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_bridge *br;
int err;
- if (dev->priv_flags & IFF_EBRIDGE) {
+ if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
vg = br_vlan_group(br);
} else {
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 4a02f8bb278a..c11bba3e7ec0 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -471,7 +471,7 @@ int br_del_bridge(struct net *net, const char *name)
if (dev == NULL)
ret = -ENXIO; /* Could not find device */
- else if (!(dev->priv_flags & IFF_EBRIDGE)) {
+ else if (!netif_is_bridge_master(dev)) {
/* Attempt to delete non bridge device! */
ret = -EPERM;
}
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 793b0db9d9a3..db4ab2c2ce18 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -26,7 +26,7 @@ static int get_bridge_ifindices(struct net *net, int *indices, int num)
for_each_netdev_rcu(net, dev) {
if (i >= num)
break;
- if (dev->priv_flags & IFF_EBRIDGE)
+ if (netif_is_bridge_master(dev))
indices[i++] = dev->ifindex;
}
rcu_read_unlock();
@@ -71,7 +71,8 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
num = br_fdb_fillbuf(br, buf, maxnum, offset);
if (num > 0) {
- if (copy_to_user(userbuf, buf, num*sizeof(struct __fdb_entry)))
+ if (copy_to_user(userbuf, buf,
+ array_size(num, sizeof(struct __fdb_entry))))
num = -EFAULT;
}
kfree(buf);
@@ -188,7 +189,7 @@ int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user
return -ENOMEM;
get_port_ifindices(br, indices, num);
- if (copy_to_user(argp, indices, num * sizeof(int)))
+ if (copy_to_user(argp, indices, array_size(num, sizeof(int))))
num = -EFAULT;
kfree(indices);
return num;
@@ -336,7 +337,8 @@ static int old_deviceless(struct net *net, void __user *uarg)
args[2] = get_bridge_ifindices(net, indices, args[2]);
- ret = copy_to_user(uarg, indices, args[2]*sizeof(int))
+ ret = copy_to_user(uarg, indices,
+ array_size(args[2], sizeof(int)))
? -EFAULT : args[2];
kfree(indices);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 0281453f7766..61ccf46fcc21 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -422,7 +422,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = net->dev_base_seq;
for_each_netdev_rcu(net, dev) {
- if (dev->priv_flags & IFF_EBRIDGE) {
+ if (netif_is_bridge_master(dev)) {
struct net_bridge *br = netdev_priv(dev);
struct br_port_msg *bpm;
@@ -1016,7 +1016,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
return -ENODEV;
}
- if (!(dev->priv_flags & IFF_EBRIDGE)) {
+ if (!netif_is_bridge_master(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
return -EOPNOTSUPP;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3523c8c7068f..f3d751105343 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1677,8 +1677,6 @@ static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
int ifindex,
struct br_ip *saddr)
{
- lockdep_assert_held_once(&brmctx->br->multicast_lock);
-
write_seqcount_begin(&querier->seq);
querier->port_ifidx = ifindex;
memcpy(&querier->addr, saddr, sizeof(*saddr));
@@ -3867,13 +3865,13 @@ void br_multicast_ctx_init(struct net_bridge *br,
brmctx->ip4_other_query.delay_time = 0;
brmctx->ip4_querier.port_ifidx = 0;
- seqcount_init(&brmctx->ip4_querier.seq);
+ seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
brmctx->multicast_igmp_version = 2;
#if IS_ENABLED(CONFIG_IPV6)
brmctx->multicast_mld_version = 1;
brmctx->ip6_other_query.delay_time = 0;
brmctx->ip6_querier.port_ifidx = 0;
- seqcount_init(&brmctx->ip6_querier.seq);
+ seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
#endif
timer_setup(&brmctx->ip4_mc_router_timer,
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 8edfb98ae1d5..b5af68c105a8 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -968,7 +968,7 @@ static int brnf_device_event(struct notifier_block *unused, unsigned long event,
struct net *net;
int ret;
- if (event != NETDEV_REGISTER || !(dev->priv_flags & IFF_EBRIDGE))
+ if (event != NETDEV_REGISTER || !netif_is_bridge_master(dev))
return NOTIFY_DONE;
ASSERT_RTNL();
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 6c58fc14d2cb..0c8b5f1a15bc 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -106,7 +106,7 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
p = br_port_get_check_rcu(dev);
if (p)
vg = nbp_vlan_group_rcu(p);
- } else if (dev->priv_flags & IFF_EBRIDGE) {
+ } else if (netif_is_bridge_master(dev)) {
br = netdev_priv(dev);
vg = br_vlan_group_rcu(br);
}
@@ -1050,7 +1050,7 @@ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
p = br_port_get_rtnl(dev);
/* We want to accept dev as bridge itself as well */
- if (!p && !(dev->priv_flags & IFF_EBRIDGE))
+ if (!p && !netif_is_bridge_master(dev))
return -EINVAL;
err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
@@ -1666,7 +1666,8 @@ static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
}
return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
- nla_total_size(sizeof(struct br_mcast_stats)) +
+ nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
+ (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) +
nla_total_size(0);
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index b4cef3a97f12..37ca76406f1e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,7 +82,7 @@ struct bridge_mcast_other_query {
struct bridge_mcast_querier {
struct br_ip addr;
int port_ifidx;
- seqcount_t seq;
+ seqcount_spinlock_t seq;
};
/* IGMP/MLD statistics */
@@ -1125,9 +1125,7 @@ static inline unsigned long br_multicast_lmqt(const struct net_bridge_mcast *brm
static inline unsigned long br_multicast_gmi(const struct net_bridge_mcast *brmctx)
{
- /* use the RFC default of 2 for QRV */
- return 2 * brmctx->multicast_query_interval +
- brmctx->multicast_query_response_interval;
+ return brmctx->multicast_membership_interval;
}
static inline bool
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index ba55851fe132..75204d36d7f9 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -233,7 +233,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN);
memcpy(br->bridge_id.addr, addr, ETH_ALEN);
- memcpy(br->dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(br->dev, addr);
list_for_each_entry(p, &br->port_list, list) {
if (ether_addr_equal(p->designated_bridge.addr, oldaddr))
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index a7af4eaff17d..1a11064f9990 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -66,7 +66,7 @@ static unsigned int ebt_broute(void *priv, struct sk_buff *skb,
NFPROTO_BRIDGE, s->in, NULL, NULL,
s->net, NULL);
- ret = ebt_do_table(skb, &state, priv);
+ ret = ebt_do_table(priv, skb, &state);
if (ret != NF_DROP)
return ret;
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index c0b121df4a9a..cb949436bc0e 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -58,28 +58,21 @@ static const struct ebt_table frame_filter = {
.me = THIS_MODULE,
};
-static unsigned int
-ebt_filter_hook(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ebt_do_table(skb, state, priv);
-}
-
static const struct nf_hook_ops ebt_ops_filter[] = {
{
- .hook = ebt_filter_hook,
+ .hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_FILTER_BRIDGED,
},
{
- .hook = ebt_filter_hook,
+ .hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_FILTER_BRIDGED,
},
{
- .hook = ebt_filter_hook,
+ .hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_OUT,
.priority = NF_BR_PRI_FILTER_OTHER,
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 4078151c224f..5ee0531ae506 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -58,27 +58,21 @@ static const struct ebt_table frame_nat = {
.me = THIS_MODULE,
};
-static unsigned int ebt_nat_hook(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ebt_do_table(skb, state, priv);
-}
-
static const struct nf_hook_ops ebt_ops_nat[] = {
{
- .hook = ebt_nat_hook,
+ .hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_OUT,
.priority = NF_BR_PRI_NAT_DST_OTHER,
},
{
- .hook = ebt_nat_hook,
+ .hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_BR_PRI_NAT_SRC,
},
{
- .hook = ebt_nat_hook,
+ .hook = ebt_do_table,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_NAT_DST_BRIDGED,
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 83d1798dfbb4..460d3064dc15 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -189,10 +189,10 @@ ebt_get_target_c(const struct ebt_entry *e)
}
/* Do some firewalling */
-unsigned int ebt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct ebt_table *table)
+unsigned int ebt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
{
+ struct ebt_table *table = priv;
unsigned int hook = state->hook;
int i, nentries;
struct ebt_entry *point;
@@ -926,7 +926,9 @@ static int translate_table(struct net *net, const char *name,
return -ENOMEM;
for_each_possible_cpu(i) {
newinfo->chainstack[i] =
- vmalloc(array_size(udc_cnt, sizeof(*(newinfo->chainstack[0]))));
+ vmalloc_node(array_size(udc_cnt,
+ sizeof(*(newinfo->chainstack[0]))),
+ cpu_to_node(i));
if (!newinfo->chainstack[i]) {
while (i)
vfree(newinfo->chainstack[--i]);
diff --git a/net/can/isotp.c b/net/can/isotp.c
index caaa532ece94..df6968b28bf4 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -121,7 +121,7 @@ enum {
struct tpcon {
int idx;
int len;
- u8 state;
+ u32 state;
u8 bs;
u8 sn;
u8 ll_dl;
@@ -848,6 +848,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
+ u32 old_state = so->tx.state;
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf;
@@ -860,45 +861,55 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
return -EADDRNOTAVAIL;
/* we do not support multiple buffers - for now */
- if (so->tx.state != ISOTP_IDLE || wq_has_sleeper(&so->wait)) {
- if (msg->msg_flags & MSG_DONTWAIT)
- return -EAGAIN;
+ if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
+ wq_has_sleeper(&so->wait)) {
+ if (msg->msg_flags & MSG_DONTWAIT) {
+ err = -EAGAIN;
+ goto err_out;
+ }
/* wait for complete transmission of current pdu */
- wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ if (err)
+ goto err_out;
}
- if (!size || size > MAX_MSG_LENGTH)
- return -EINVAL;
+ if (!size || size > MAX_MSG_LENGTH) {
+ err = -EINVAL;
+ goto err_out;
+ }
/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
/* does the given data fit into a single frame for SF_BROADCAST? */
if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
- (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off))
- return -EINVAL;
+ (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
+ err = -EINVAL;
+ goto err_out;
+ }
err = memcpy_from_msg(so->tx.buf, msg, size);
if (err < 0)
- return err;
+ goto err_out;
dev = dev_get_by_index(sock_net(sk), so->ifindex);
- if (!dev)
- return -ENXIO;
+ if (!dev) {
+ err = -ENXIO;
+ goto err_out;
+ }
skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
dev_put(dev);
- return err;
+ goto err_out;
}
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- so->tx.state = ISOTP_SENDING;
so->tx.len = size;
so->tx.idx = 0;
@@ -954,15 +965,25 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err) {
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(err));
- return err;
+ goto err_out;
}
if (wait_tx_done) {
/* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+
+ if (sk->sk_err)
+ return -sk->sk_err;
}
return size;
+
+err_out:
+ so->tx.state = old_state;
+ if (so->tx.state == ISOTP_IDLE)
+ wake_up_interruptible(&so->wait);
+
+ return err;
}
static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
index f6df20808f5e..16af1a7f80f6 100644
--- a/net/can/j1939/j1939-priv.h
+++ b/net/can/j1939/j1939-priv.h
@@ -330,6 +330,7 @@ int j1939_session_activate(struct j1939_session *session);
void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
void j1939_session_timers_cancel(struct j1939_session *session);
+#define J1939_MIN_TP_PACKET_SIZE 9
#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index 08c8606cfd9c..9bc55ecb37f9 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -249,11 +249,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
struct j1939_priv *priv, *priv_new;
int ret;
- priv = j1939_priv_get_by_ndev(ndev);
+ spin_lock(&j1939_netdev_lock);
+ priv = j1939_priv_get_by_ndev_locked(ndev);
if (priv) {
kref_get(&priv->rx_kref);
+ spin_unlock(&j1939_netdev_lock);
return priv;
}
+ spin_unlock(&j1939_netdev_lock);
priv = j1939_priv_create(ndev);
if (!priv)
@@ -269,10 +272,10 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
/* Someone was faster than us, use their priv and roll
* back our's.
*/
+ kref_get(&priv_new->rx_kref);
spin_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
- kref_get(&priv_new->rx_kref);
return priv_new;
}
j1939_priv_set(ndev, priv);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index bb5c4b8979be..6c0a0ebdd024 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1237,12 +1237,11 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
session->err = -ETIME;
j1939_session_deactivate(session);
} else {
- netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
- __func__, session);
-
j1939_session_list_lock(session->priv);
if (session->state >= J1939_SESSION_ACTIVE &&
session->state < J1939_SESSION_ACTIVE_MAX) {
+ netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
+ __func__, session);
j1939_session_get(session);
hrtimer_start(&session->rxtimer,
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
@@ -1609,6 +1608,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
abort = J1939_XTP_ABORT_FAULT;
else if (len > priv->tp_max_packet_size)
abort = J1939_XTP_ABORT_RESOURCE;
+ else if (len < J1939_MIN_TP_PACKET_SIZE)
+ abort = J1939_XTP_ABORT_FAULT;
}
if (abort != J1939_XTP_NO_ABORT) {
@@ -1789,6 +1790,7 @@ static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
static void j1939_xtp_rx_dat_one(struct j1939_session *session,
struct sk_buff *skb)
{
+ enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *skcb, *se_skcb;
struct sk_buff *se_skb = NULL;
@@ -1803,9 +1805,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
skcb = j1939_skb_to_cb(skb);
dat = skb->data;
- if (skb->len <= 1)
+ if (skb->len != 8) {
/* makes no sense */
+ abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
goto out_session_cancel;
+ }
switch (session->last_cmd) {
case 0xff:
@@ -1904,7 +1908,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
out_session_cancel:
kfree_skb(se_skb);
j1939_session_timers_cancel(session);
- j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
+ j1939_session_cancel(session, abort);
j1939_session_put(session);
}
diff --git a/net/core/Makefile b/net/core/Makefile
index 35ced6201814..4268846f2f47 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -36,3 +36,4 @@ obj-$(CONFIG_FAILOVER) += failover.o
obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
+obj-$(CONFIG_OF) += of_net.o
diff --git a/net/core/dev.c b/net/core/dev.c
index f930329f0dc2..4e3d19a06de4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -140,7 +140,7 @@
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
#include <linux/hrtimer.h>
-#include <linux/netfilter_ingress.h>
+#include <linux/netfilter_netdev.h>
#include <linux/crash_dump.h>
#include <linux/sctp.h>
#include <net/udp_tunnel.h>
@@ -303,6 +303,12 @@ static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
return NULL;
}
+bool netdev_name_in_use(struct net *net, const char *name)
+{
+ return netdev_name_node_lookup(net, name);
+}
+EXPORT_SYMBOL(netdev_name_in_use);
+
int netdev_name_node_alt_create(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
@@ -1133,7 +1139,7 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
}
snprintf(buf, IFNAMSIZ, name, i);
- if (!__dev_get_by_name(net, buf))
+ if (!netdev_name_in_use(net, buf))
return i;
/* It is possible to run out of possible slots
@@ -1187,7 +1193,7 @@ static int dev_get_valid_name(struct net *net, struct net_device *dev,
if (strchr(name, '%'))
return dev_alloc_name_ns(net, dev, name);
- else if (__dev_get_by_name(net, name))
+ else if (netdev_name_in_use(net, name))
return -EEXIST;
else if (dev->name != name)
strlcpy(dev->name, name, IFNAMSIZ);
@@ -1290,8 +1296,8 @@ rollback:
old_assign_type = NET_NAME_RENAMED;
goto rollback;
} else {
- pr_err("%s: name change rollback failed: %d\n",
- dev->name, ret);
+ netdev_err(dev, "name change rollback failed: %d\n",
+ ret);
}
}
@@ -2345,7 +2351,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
/* If TC0 is invalidated disable TC mapping */
if (tc->offset + tc->count > txq) {
- pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
+ netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
dev->num_tc = 0;
return;
}
@@ -2356,8 +2362,8 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
tc = &dev->tc_to_txq[q];
if (tc->offset + tc->count > txq) {
- pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
- i, q);
+ netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
+ i, q);
netdev_set_prio_tc_map(dev, i, 0);
}
}
@@ -3410,7 +3416,7 @@ EXPORT_SYMBOL(__skb_gso_segment);
#ifdef CONFIG_BUG
static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
- pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+ netdev_err(dev, "hw csum failure\n");
skb_dump(KERN_ERR, skb, true);
dump_stack();
}
@@ -3920,6 +3926,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
static struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
+#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
struct tcf_result cl_res;
@@ -3955,6 +3962,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
default:
break;
}
+#endif /* CONFIG_NET_CLS_ACT */
return skb;
}
@@ -4148,13 +4156,20 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
qdisc_pkt_len_init(skb);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_at_ingress = 0;
-# ifdef CONFIG_NET_EGRESS
+#endif
+#ifdef CONFIG_NET_EGRESS
if (static_branch_unlikely(&egress_needed_key)) {
+ if (nf_hook_egress_active()) {
+ skb = nf_hook_egress(skb, &rc, dev);
+ if (!skb)
+ goto out;
+ }
+ nf_skip_egress(skb, true);
skb = sch_handle_egress(skb, &rc, dev);
if (!skb)
goto out;
+ nf_skip_egress(skb, false);
}
-# endif
#endif
/* If device/qdisc don't need skb->dst, release it right now while
* its hot in this cpu cache.
@@ -5296,6 +5311,7 @@ skip_taps:
if (static_branch_unlikely(&ingress_needed_key)) {
bool another = false;
+ nf_skip_egress(skb, true);
skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
&another);
if (another)
@@ -5303,6 +5319,7 @@ skip_taps:
if (!skb)
goto out;
+ nf_skip_egress(skb, false);
if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
goto out;
}
@@ -5839,7 +5856,7 @@ static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int se
gro_normal_list(napi);
}
-static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
+static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
struct packet_offload *ptype;
__be16 type = skb->protocol;
@@ -5868,12 +5885,11 @@ static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
if (err) {
WARN_ON(&ptype->list == head);
kfree_skb(skb);
- return NET_RX_SUCCESS;
+ return;
}
out:
gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
- return NET_RX_SUCCESS;
}
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
@@ -6900,19 +6916,25 @@ EXPORT_SYMBOL(netif_napi_add);
void napi_disable(struct napi_struct *n)
{
+ unsigned long val, new;
+
might_sleep();
set_bit(NAPI_STATE_DISABLE, &n->state);
- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
- msleep(1);
- while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
- msleep(1);
+ do {
+ val = READ_ONCE(n->state);
+ if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
+ usleep_range(20, 200);
+ continue;
+ }
+
+ new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
+ new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
+ } while (cmpxchg(&n->state, val, new) != val);
hrtimer_cancel(&n->timer);
- clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
clear_bit(NAPI_STATE_DISABLE, &n->state);
- clear_bit(NAPI_STATE_THREADED, &n->state);
}
EXPORT_SYMBOL(napi_disable);
@@ -6925,12 +6947,16 @@ EXPORT_SYMBOL(napi_disable);
*/
void napi_enable(struct napi_struct *n)
{
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_atomic();
- clear_bit(NAPI_STATE_SCHED, &n->state);
- clear_bit(NAPI_STATE_NPSVC, &n->state);
- if (n->dev->threaded && n->thread)
- set_bit(NAPI_STATE_THREADED, &n->state);
+ unsigned long val, new;
+
+ do {
+ val = READ_ONCE(n->state);
+ BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
+
+ new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
+ if (n->dev->threaded && n->thread)
+ new |= NAPIF_STATE_THREADED;
+ } while (cmpxchg(&n->state, val, new) != val);
}
EXPORT_SYMBOL(napi_enable);
@@ -6986,8 +7012,8 @@ static int __napi_poll(struct napi_struct *n, bool *repoll)
}
if (unlikely(work > weight))
- pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
- n->poll, work, weight);
+ netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
+ n->poll, work, weight);
if (likely(work < weight))
return work;
@@ -8541,8 +8567,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
dev->flags &= ~IFF_PROMISC;
else {
dev->promiscuity -= inc;
- pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
- dev->name);
+ netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
return -EOVERFLOW;
}
}
@@ -8612,8 +8637,7 @@ static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
dev->flags &= ~IFF_ALLMULTI;
else {
dev->allmulti -= inc;
- pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
- dev->name);
+ netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
return -EOVERFLOW;
}
}
@@ -9150,14 +9174,11 @@ int dev_get_port_parent_id(struct net_device *dev,
}
err = devlink_compat_switch_id_get(dev, ppid);
- if (!err || err != -EOPNOTSUPP)
+ if (!recurse || err != -EOPNOTSUPP)
return err;
- if (!recurse)
- return -EOPNOTSUPP;
-
netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = dev_get_port_parent_id(lower_dev, ppid, recurse);
+ err = dev_get_port_parent_id(lower_dev, ppid, true);
if (err)
break;
if (!first.id_len)
@@ -10858,7 +10879,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
if (!dev->ethtool_ops)
dev->ethtool_ops = &default_ethtool_ops;
- nf_hook_ingress_init(dev);
+ nf_hook_netdev_init(dev);
return dev;
@@ -11144,7 +11165,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
* we can use it in the destination network namespace.
*/
err = -EEXIST;
- if (__dev_get_by_name(net, dev->name)) {
+ if (netdev_name_in_use(net, dev->name)) {
/* We get here if we can't use the current device name */
if (!pat)
goto out;
@@ -11497,7 +11518,7 @@ static void __net_exit default_device_exit(struct net *net)
/* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
- if (__dev_get_by_name(&init_net, fb_name))
+ if (netdev_name_in_use(&init_net, fb_name))
snprintf(fb_name, IFNAMSIZ, "dev%%d");
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 8c39283c26ae..f0cb38344126 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -50,6 +50,11 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
if (addr_len > MAX_ADDR_LEN)
return -EINVAL;
+ ha = list_first_entry(&list->list, struct netdev_hw_addr, list);
+ if (ha && !memcmp(addr, ha->addr, addr_len) &&
+ (!addr_type || addr_type == ha->type))
+ goto found_it;
+
while (*ins_point) {
int diff;
@@ -64,6 +69,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
} else if (diff > 0) {
ins_point = &parent->rb_right;
} else {
+found_it:
if (exclusive)
return -EEXIST;
if (global) {
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 7d975057c2a9..3464854015a2 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -30,6 +30,63 @@
#define CREATE_TRACE_POINTS
#include <trace/events/devlink.h>
+#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
+ (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
+
+struct devlink_dev_stats {
+ u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+ u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+};
+
+struct devlink {
+ u32 index;
+ struct list_head port_list;
+ struct list_head rate_list;
+ struct list_head sb_list;
+ struct list_head dpipe_table_list;
+ struct list_head resource_list;
+ struct list_head param_list;
+ struct list_head region_list;
+ struct list_head reporter_list;
+ struct mutex reporters_lock; /* protects reporter_list */
+ struct devlink_dpipe_headers *dpipe_headers;
+ struct list_head trap_list;
+ struct list_head trap_group_list;
+ struct list_head trap_policer_list;
+ const struct devlink_ops *ops;
+ u64 features;
+ struct xarray snapshot_ids;
+ struct devlink_dev_stats stats;
+ struct device *dev;
+ possible_net_t _net;
+ /* Serializes access to devlink instance specific objects such as
+ * port, sb, dpipe, resource, params, region, traps and more.
+ */
+ struct mutex lock;
+ u8 reload_failed:1;
+ refcount_t refcount;
+ struct completion comp;
+ char priv[0] __aligned(NETDEV_ALIGN);
+};
+
+void *devlink_priv(struct devlink *devlink)
+{
+ return &devlink->priv;
+}
+EXPORT_SYMBOL_GPL(devlink_priv);
+
+struct devlink *priv_to_devlink(void *priv)
+{
+ return container_of(priv, struct devlink, priv);
+}
+EXPORT_SYMBOL_GPL(priv_to_devlink);
+
+struct device *devlink_to_dev(const struct devlink *devlink)
+{
+ return devlink->dev;
+}
+EXPORT_SYMBOL_GPL(devlink_to_dev);
+
static struct devlink_dpipe_field devlink_dpipe_fields_ethernet[] = {
{
.name = "destination mac",
@@ -95,6 +152,22 @@ static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_
static DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
#define DEVLINK_REGISTERED XA_MARK_1
+/* devlink instances are open to the access from the user space after
+ * devlink_register() call. Such logical barrier allows us to have certain
+ * expectations related to locking.
+ *
+ * Before *_register() - we are in initialization stage and no parallel
+ * access possible to the devlink instance. All drivers perform that phase
+ * by implicitly holding device_lock.
+ *
+ * After *_register() - users and driver can access devlink instance at
+ * the same time.
+ */
+#define ASSERT_DEVLINK_REGISTERED(d) \
+ WARN_ON_ONCE(!xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
+#define ASSERT_DEVLINK_NOT_REGISTERED(d) \
+ WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
+
/* devlink_mutex
*
* An overall lock guarding every operation coming from userspace.
@@ -742,6 +815,7 @@ static void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
int err;
WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL);
+ WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
@@ -1040,11 +1114,15 @@ nla_put_failure:
static void devlink_port_notify(struct devlink_port *devlink_port,
enum devlink_command cmd)
{
+ struct devlink *devlink = devlink_port->devlink;
struct sk_buff *msg;
int err;
WARN_ON(cmd != DEVLINK_CMD_PORT_NEW && cmd != DEVLINK_CMD_PORT_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
@@ -1055,19 +1133,22 @@ static void devlink_port_notify(struct devlink_port *devlink_port,
return;
}
- genlmsg_multicast_netns(&devlink_nl_family,
- devlink_net(devlink_port->devlink), msg, 0,
- DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+ 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
static void devlink_rate_notify(struct devlink_rate *devlink_rate,
enum devlink_command cmd)
{
+ struct devlink *devlink = devlink_rate->devlink;
struct sk_buff *msg;
int err;
WARN_ON(cmd != DEVLINK_CMD_RATE_NEW && cmd != DEVLINK_CMD_RATE_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
@@ -1078,9 +1159,8 @@ static void devlink_rate_notify(struct devlink_rate *devlink_rate,
return;
}
- genlmsg_multicast_netns(&devlink_nl_family,
- devlink_net(devlink_rate->devlink), msg, 0,
- DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+ 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
@@ -3952,9 +4032,6 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net,
struct net *curr_net;
int err;
- if (!devlink->reload_enabled)
- return -EOPNOTSUPP;
-
memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
sizeof(remote_reload_stats));
@@ -4022,7 +4099,7 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
u32 actions_performed;
int err;
- if (!devlink_reload_supported(devlink->ops))
+ if (!(devlink->features & DEVLINK_F_RELOAD))
return -EOPNOTSUPP;
err = devlink_resources_validate(devlink, NULL, info);
@@ -4150,6 +4227,7 @@ static void __devlink_flash_update_notify(struct devlink *devlink,
WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE &&
cmd != DEVLINK_CMD_FLASH_UPDATE_END &&
cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS);
+ WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
@@ -5070,6 +5148,11 @@ static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
if (err)
goto nla_put_failure;
+ err = nla_put_u32(msg, DEVLINK_ATTR_REGION_MAX_SNAPSHOTS,
+ region->max_snapshots);
+ if (err)
+ goto nla_put_failure;
+
err = devlink_nl_region_snapshots_id_put(msg, devlink, region);
if (err)
goto nla_put_failure;
@@ -5145,17 +5228,19 @@ static void devlink_nl_region_notify(struct devlink_region *region,
struct devlink_snapshot *snapshot,
enum devlink_command cmd)
{
+ struct devlink *devlink = region->devlink;
struct sk_buff *msg;
WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
msg = devlink_nl_region_notify_build(region, snapshot, cmd, 0, 0);
if (IS_ERR(msg))
return;
- genlmsg_multicast_netns(&devlink_nl_family,
- devlink_net(region->devlink), msg, 0,
- DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+ 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
/**
@@ -6920,10 +7005,12 @@ genlmsg_cancel:
static void devlink_recover_notify(struct devlink_health_reporter *reporter,
enum devlink_command cmd)
{
+ struct devlink *devlink = reporter->devlink;
struct sk_buff *msg;
int err;
WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
+ WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
@@ -6935,9 +7022,8 @@ static void devlink_recover_notify(struct devlink_health_reporter *reporter,
return;
}
- genlmsg_multicast_netns(&devlink_nl_family,
- devlink_net(reporter->devlink),
- msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+ 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
void
@@ -8897,6 +8983,25 @@ static bool devlink_reload_actions_valid(const struct devlink_ops *ops)
}
/**
+ * devlink_set_features - Set devlink supported features
+ *
+ * @devlink: devlink
+ * @features: devlink support features
+ *
+ * This interface allows us to set reload ops separatelly from
+ * the devlink_alloc.
+ */
+void devlink_set_features(struct devlink *devlink, u64 features)
+{
+ ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+
+ WARN_ON(features & DEVLINK_F_RELOAD &&
+ !devlink_reload_supported(devlink->ops));
+ devlink->features = features;
+}
+EXPORT_SYMBOL_GPL(devlink_set_features);
+
+/**
* devlink_alloc_ns - Allocate new devlink instance resources
* in specific namespace
*
@@ -8955,6 +9060,84 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
}
EXPORT_SYMBOL_GPL(devlink_alloc_ns);
+static void
+devlink_trap_policer_notify(struct devlink *devlink,
+ const struct devlink_trap_policer_item *policer_item,
+ enum devlink_command cmd);
+static void
+devlink_trap_group_notify(struct devlink *devlink,
+ const struct devlink_trap_group_item *group_item,
+ enum devlink_command cmd);
+static void devlink_trap_notify(struct devlink *devlink,
+ const struct devlink_trap_item *trap_item,
+ enum devlink_command cmd);
+
+static void devlink_notify_register(struct devlink *devlink)
+{
+ struct devlink_trap_policer_item *policer_item;
+ struct devlink_trap_group_item *group_item;
+ struct devlink_trap_item *trap_item;
+ struct devlink_port *devlink_port;
+ struct devlink_rate *rate_node;
+ struct devlink_region *region;
+
+ devlink_notify(devlink, DEVLINK_CMD_NEW);
+ list_for_each_entry(devlink_port, &devlink->port_list, list)
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+
+ list_for_each_entry(policer_item, &devlink->trap_policer_list, list)
+ devlink_trap_policer_notify(devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_NEW);
+
+ list_for_each_entry(group_item, &devlink->trap_group_list, list)
+ devlink_trap_group_notify(devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_NEW);
+
+ list_for_each_entry(trap_item, &devlink->trap_list, list)
+ devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
+
+ list_for_each_entry(rate_node, &devlink->rate_list, list)
+ devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
+
+ list_for_each_entry(region, &devlink->region_list, list)
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+
+ devlink_params_publish(devlink);
+}
+
+static void devlink_notify_unregister(struct devlink *devlink)
+{
+ struct devlink_trap_policer_item *policer_item;
+ struct devlink_trap_group_item *group_item;
+ struct devlink_trap_item *trap_item;
+ struct devlink_port *devlink_port;
+ struct devlink_rate *rate_node;
+ struct devlink_region *region;
+
+ devlink_params_unpublish(devlink);
+
+ list_for_each_entry_reverse(region, &devlink->region_list, list)
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
+
+ list_for_each_entry_reverse(rate_node, &devlink->rate_list, list)
+ devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
+
+ list_for_each_entry_reverse(trap_item, &devlink->trap_list, list)
+ devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
+
+ list_for_each_entry_reverse(group_item, &devlink->trap_group_list, list)
+ devlink_trap_group_notify(devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_DEL);
+ list_for_each_entry_reverse(policer_item, &devlink->trap_policer_list,
+ list)
+ devlink_trap_policer_notify(devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_DEL);
+
+ list_for_each_entry_reverse(devlink_port, &devlink->port_list, list)
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
+ devlink_notify(devlink, DEVLINK_CMD_DEL);
+}
+
/**
* devlink_register - Register devlink instance
*
@@ -8962,9 +9145,12 @@ EXPORT_SYMBOL_GPL(devlink_alloc_ns);
*/
void devlink_register(struct devlink *devlink)
{
+ ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+ /* Make sure that we are in .probe() routine */
+
mutex_lock(&devlink_mutex);
xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
- devlink_notify(devlink, DEVLINK_CMD_NEW);
+ devlink_notify_register(devlink);
mutex_unlock(&devlink_mutex);
}
EXPORT_SYMBOL_GPL(devlink_register);
@@ -8976,60 +9162,28 @@ EXPORT_SYMBOL_GPL(devlink_register);
*/
void devlink_unregister(struct devlink *devlink)
{
+ ASSERT_DEVLINK_REGISTERED(devlink);
+ /* Make sure that we are in .remove() routine */
+
devlink_put(devlink);
wait_for_completion(&devlink->comp);
mutex_lock(&devlink_mutex);
- WARN_ON(devlink_reload_supported(devlink->ops) &&
- devlink->reload_enabled);
- devlink_notify(devlink, DEVLINK_CMD_DEL);
+ devlink_notify_unregister(devlink);
xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
mutex_unlock(&devlink_mutex);
}
EXPORT_SYMBOL_GPL(devlink_unregister);
/**
- * devlink_reload_enable - Enable reload of devlink instance
- *
- * @devlink: devlink
- *
- * Should be called at end of device initialization
- * process when reload operation is supported.
- */
-void devlink_reload_enable(struct devlink *devlink)
-{
- mutex_lock(&devlink_mutex);
- devlink->reload_enabled = true;
- mutex_unlock(&devlink_mutex);
-}
-EXPORT_SYMBOL_GPL(devlink_reload_enable);
-
-/**
- * devlink_reload_disable - Disable reload of devlink instance
- *
- * @devlink: devlink
- *
- * Should be called at the beginning of device cleanup
- * process when reload operation is supported.
- */
-void devlink_reload_disable(struct devlink *devlink)
-{
- mutex_lock(&devlink_mutex);
- /* Mutex is taken which ensures that no reload operation is in
- * progress while setting up forbidded flag.
- */
- devlink->reload_enabled = false;
- mutex_unlock(&devlink_mutex);
-}
-EXPORT_SYMBOL_GPL(devlink_reload_disable);
-
-/**
* devlink_free - Free devlink instance resources
*
* @devlink: devlink
*/
void devlink_free(struct devlink *devlink)
{
+ ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+
mutex_destroy(&devlink->reporters_lock);
mutex_destroy(&devlink->lock);
WARN_ON(!list_empty(&devlink->trap_policer_list));
@@ -10086,6 +10240,9 @@ void devlink_params_publish(struct devlink *devlink)
{
struct devlink_param_item *param_item;
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
list_for_each_entry(param_item, &devlink->param_list, list) {
if (param_item->published)
continue;
@@ -10118,54 +10275,25 @@ void devlink_params_unpublish(struct devlink *devlink)
EXPORT_SYMBOL_GPL(devlink_params_unpublish);
/**
- * devlink_port_params_register - register port configuration parameters
- *
- * @devlink_port: devlink port
- * @params: configuration parameters array
- * @params_count: number of parameters provided
+ * devlink_param_driverinit_value_get - get configuration parameter
+ * value for driver initializing
*
- * Register the configuration parameters supported by the port.
- */
-int devlink_port_params_register(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count)
-{
- return __devlink_params_register(devlink_port->devlink,
- devlink_port->index,
- &devlink_port->param_list, params,
- params_count,
- DEVLINK_CMD_PORT_PARAM_NEW,
- DEVLINK_CMD_PORT_PARAM_DEL);
-}
-EXPORT_SYMBOL_GPL(devlink_port_params_register);
-
-/**
- * devlink_port_params_unregister - unregister port configuration
- * parameters
+ * @devlink: devlink
+ * @param_id: parameter ID
+ * @init_val: value of parameter in driverinit configuration mode
*
- * @devlink_port: devlink port
- * @params: configuration parameters array
- * @params_count: number of parameters provided
+ * This function should be used by the driver to get driverinit
+ * configuration for initialization after reload command.
*/
-void devlink_port_params_unregister(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count)
-{
- return __devlink_params_unregister(devlink_port->devlink,
- devlink_port->index,
- &devlink_port->param_list,
- params, params_count,
- DEVLINK_CMD_PORT_PARAM_DEL);
-}
-EXPORT_SYMBOL_GPL(devlink_port_params_unregister);
-
-static int
-__devlink_param_driverinit_value_get(struct list_head *param_list, u32 param_id,
- union devlink_param_value *init_val)
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val)
{
struct devlink_param_item *param_item;
- param_item = devlink_param_find_by_id(param_list, param_id);
+ if (!devlink_reload_supported(devlink->ops))
+ return -EOPNOTSUPP;
+
+ param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
if (!param_item)
return -EINVAL;
@@ -10181,17 +10309,26 @@ __devlink_param_driverinit_value_get(struct list_head *param_list, u32 param_id,
return 0;
}
+EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
-static int
-__devlink_param_driverinit_value_set(struct devlink *devlink,
- unsigned int port_index,
- struct list_head *param_list, u32 param_id,
- union devlink_param_value init_val,
- enum devlink_command cmd)
+/**
+ * devlink_param_driverinit_value_set - set value of configuration
+ * parameter for driverinit
+ * configuration mode
+ *
+ * @devlink: devlink
+ * @param_id: parameter ID
+ * @init_val: value of parameter to set for driverinit configuration mode
+ *
+ * This function should be used by the driver to set driverinit
+ * configuration mode default value.
+ */
+int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
{
struct devlink_param_item *param_item;
- param_item = devlink_param_find_by_id(param_list, param_id);
+ param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
if (!param_item)
return -EINVAL;
@@ -10205,52 +10342,9 @@ __devlink_param_driverinit_value_set(struct devlink *devlink,
param_item->driverinit_value = init_val;
param_item->driverinit_value_valid = true;
- devlink_param_notify(devlink, port_index, param_item, cmd);
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
return 0;
}
-
-/**
- * devlink_param_driverinit_value_get - get configuration parameter
- * value for driver initializing
- *
- * @devlink: devlink
- * @param_id: parameter ID
- * @init_val: value of parameter in driverinit configuration mode
- *
- * This function should be used by the driver to get driverinit
- * configuration for initialization after reload command.
- */
-int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
- union devlink_param_value *init_val)
-{
- if (!devlink_reload_supported(devlink->ops))
- return -EOPNOTSUPP;
-
- return __devlink_param_driverinit_value_get(&devlink->param_list,
- param_id, init_val);
-}
-EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
-
-/**
- * devlink_param_driverinit_value_set - set value of configuration
- * parameter for driverinit
- * configuration mode
- *
- * @devlink: devlink
- * @param_id: parameter ID
- * @init_val: value of parameter to set for driverinit configuration mode
- *
- * This function should be used by the driver to set driverinit
- * configuration mode default value.
- */
-int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val)
-{
- return __devlink_param_driverinit_value_set(devlink, 0,
- &devlink->param_list,
- param_id, init_val,
- DEVLINK_CMD_PARAM_NEW);
-}
EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
/**
@@ -10694,6 +10788,8 @@ devlink_trap_group_notify(struct devlink *devlink,
WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_GROUP_NEW &&
cmd != DEVLINK_CMD_TRAP_GROUP_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
@@ -10735,6 +10831,8 @@ static void devlink_trap_notify(struct devlink *devlink,
WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_NEW &&
cmd != DEVLINK_CMD_TRAP_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
@@ -11116,6 +11214,8 @@ devlink_trap_policer_notify(struct devlink *devlink,
WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_POLICER_NEW &&
cmd != DEVLINK_CMD_TRAP_POLICER_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
@@ -11284,6 +11384,24 @@ free_msg:
nlmsg_free(msg);
}
+static struct devlink_port *netdev_to_devlink_port(struct net_device *dev)
+{
+ if (!dev->netdev_ops->ndo_get_devlink_port)
+ return NULL;
+
+ return dev->netdev_ops->ndo_get_devlink_port(dev);
+}
+
+static struct devlink *netdev_to_devlink(struct net_device *dev)
+{
+ struct devlink_port *devlink_port = netdev_to_devlink_port(dev);
+
+ if (!devlink_port)
+ return NULL;
+
+ return devlink_port->devlink;
+}
+
void devlink_compat_running_version(struct net_device *dev,
char *buf, size_t len)
{
@@ -11393,7 +11511,7 @@ static void __net_exit devlink_pernet_pre_exit(struct net *net)
if (!net_eq(devlink_net(devlink), net))
goto retry;
- WARN_ON(!devlink_reload_supported(devlink->ops));
+ WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
err = devlink_reload(devlink, &init_net,
DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
DEVLINK_RELOAD_LIMIT_UNSPEC,
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index bac0184cf3de..7d0a9f84aaf7 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1196,9 +1196,8 @@ proto_again:
break;
}
- proto = hdr->proto;
nhoff += PPPOE_SES_HLEN;
- switch (proto) {
+ switch (hdr->proto) {
case htons(PPP_IP):
proto = htons(ETH_P_IP);
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 8e582e29a41e..4fcbdd71c59f 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -40,10 +40,10 @@
*/
struct net_rate_estimator {
- struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_basic_sync *bstats;
spinlock_t *stats_lock;
- seqcount_t *running;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ bool running;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
u8 ewma_log;
u8 intvl_log; /* period : (250ms << intvl_log) */
@@ -60,13 +60,13 @@ struct net_rate_estimator {
};
static void est_fetch_counters(struct net_rate_estimator *e,
- struct gnet_stats_basic_packed *b)
+ struct gnet_stats_basic_sync *b)
{
- memset(b, 0, sizeof(*b));
+ gnet_stats_basic_sync_init(b);
if (e->stats_lock)
spin_lock(e->stats_lock);
- __gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats);
+ gnet_stats_add_basic(b, e->cpu_bstats, e->bstats, e->running);
if (e->stats_lock)
spin_unlock(e->stats_lock);
@@ -76,14 +76,18 @@ static void est_fetch_counters(struct net_rate_estimator *e,
static void est_timer(struct timer_list *t)
{
struct net_rate_estimator *est = from_timer(est, t, timer);
- struct gnet_stats_basic_packed b;
+ struct gnet_stats_basic_sync b;
+ u64 b_bytes, b_packets;
u64 rate, brate;
est_fetch_counters(est, &b);
- brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
+ b_bytes = u64_stats_read(&b.bytes);
+ b_packets = u64_stats_read(&b.packets);
+
+ brate = (b_bytes - est->last_bytes) << (10 - est->intvl_log);
brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
- rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
+ rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
write_seqcount_begin(&est->seq);
@@ -91,8 +95,8 @@ static void est_timer(struct timer_list *t)
est->avpps += rate;
write_seqcount_end(&est->seq);
- est->last_bytes = b.bytes;
- est->last_packets = b.packets;
+ est->last_bytes = b_bytes;
+ est->last_packets = b_packets;
est->next_jiffies += ((HZ/4) << est->intvl_log);
@@ -109,7 +113,9 @@ static void est_timer(struct timer_list *t)
* @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics
* @lock: lock for statistics and control path
- * @running: qdisc running seqcount
+ * @running: true if @bstats represents a running qdisc, thus @bstats'
+ * internal values might change during basic reads. Only used
+ * if @bstats_cpu is NULL
* @opt: rate estimator configuration TLV
*
* Creates a new rate estimator with &bstats as source and &rate_est
@@ -121,16 +127,16 @@ static void est_timer(struct timer_list *t)
* Returns 0 on success or a negative error code.
*
*/
-int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
- seqcount_t *running,
+ bool running,
struct nlattr *opt)
{
struct gnet_estimator *parm = nla_data(opt);
struct net_rate_estimator *old, *est;
- struct gnet_stats_basic_packed b;
+ struct gnet_stats_basic_sync b;
int intvl_log;
if (nla_len(opt) < sizeof(*parm))
@@ -164,8 +170,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
est_fetch_counters(est, &b);
if (lock)
local_bh_enable();
- est->last_bytes = b.bytes;
- est->last_packets = b.packets;
+ est->last_bytes = u64_stats_read(&b.bytes);
+ est->last_packets = u64_stats_read(&b.packets);
if (lock)
spin_lock_bh(lock);
@@ -214,7 +220,9 @@ EXPORT_SYMBOL(gen_kill_estimator);
* @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics
* @lock: lock for statistics and control path
- * @running: qdisc running seqcount (might be NULL)
+ * @running: true if @bstats represents a running qdisc, thus @bstats'
+ * internal values might change during basic reads. Only used
+ * if @cpu_bstats is NULL
* @opt: rate estimator configuration TLV
*
* Replaces the configuration of a rate estimator by calling
@@ -222,11 +230,11 @@ EXPORT_SYMBOL(gen_kill_estimator);
*
* Returns 0 on success or a negative error code.
*/
-int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt)
+ bool running, struct nlattr *opt)
{
return gen_new_estimator(bstats, cpu_bstats, rate_est,
lock, running, opt);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index e491b083b348..a10335b4ba2d 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -18,7 +18,7 @@
#include <linux/gen_stats.h>
#include <net/netlink.h>
#include <net/gen_stats.h>
-
+#include <net/sch_generic.h>
static inline int
gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
@@ -114,63 +114,112 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
}
EXPORT_SYMBOL(gnet_stats_start_copy);
-static void
-__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu)
+/* Must not be inlined, due to u64_stats seqcount_t lockdep key */
+void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
{
+ u64_stats_set(&b->bytes, 0);
+ u64_stats_set(&b->packets, 0);
+ u64_stats_init(&b->syncp);
+}
+EXPORT_SYMBOL(gnet_stats_basic_sync_init);
+
+static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu)
+{
+ u64 t_bytes = 0, t_packets = 0;
int i;
for_each_possible_cpu(i) {
- struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
+ struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
unsigned int start;
u64 bytes, packets;
do {
start = u64_stats_fetch_begin_irq(&bcpu->syncp);
- bytes = bcpu->bstats.bytes;
- packets = bcpu->bstats.packets;
+ bytes = u64_stats_read(&bcpu->bytes);
+ packets = u64_stats_read(&bcpu->packets);
} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
- bstats->bytes += bytes;
- bstats->packets += packets;
+ t_bytes += bytes;
+ t_packets += packets;
+ }
+ _bstats_update(bstats, t_bytes, t_packets);
+}
+
+void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running)
+{
+ unsigned int start;
+ u64 bytes = 0;
+ u64 packets = 0;
+
+ WARN_ON_ONCE((cpu || running) && in_hardirq());
+
+ if (cpu) {
+ gnet_stats_add_basic_cpu(bstats, cpu);
+ return;
}
+ do {
+ if (running)
+ start = u64_stats_fetch_begin_irq(&b->syncp);
+ bytes = u64_stats_read(&b->bytes);
+ packets = u64_stats_read(&b->packets);
+ } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
+
+ _bstats_update(bstats, bytes, packets);
}
+EXPORT_SYMBOL(gnet_stats_add_basic);
-void
-__gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b)
+static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running)
{
- unsigned int seq;
+ unsigned int start;
if (cpu) {
- __gnet_stats_copy_basic_cpu(bstats, cpu);
+ u64 t_bytes = 0, t_packets = 0;
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
+ unsigned int start;
+ u64 bytes, packets;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&bcpu->syncp);
+ bytes = u64_stats_read(&bcpu->bytes);
+ packets = u64_stats_read(&bcpu->packets);
+ } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
+
+ t_bytes += bytes;
+ t_packets += packets;
+ }
+ *ret_bytes = t_bytes;
+ *ret_packets = t_packets;
return;
}
do {
if (running)
- seq = read_seqcount_begin(running);
- bstats->bytes = b->bytes;
- bstats->packets = b->packets;
- } while (running && read_seqcount_retry(running, seq));
+ start = u64_stats_fetch_begin_irq(&b->syncp);
+ *ret_bytes = u64_stats_read(&b->bytes);
+ *ret_packets = u64_stats_read(&b->packets);
+ } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
}
-EXPORT_SYMBOL(__gnet_stats_copy_basic);
static int
-___gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b,
- int type)
+___gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b,
+ int type, bool running)
{
- struct gnet_stats_basic_packed bstats = {0};
+ u64 bstats_bytes, bstats_packets;
- __gnet_stats_copy_basic(running, &bstats, cpu, b);
+ gnet_stats_read_basic(&bstats_bytes, &bstats_packets, cpu, b, running);
if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
- d->tc_stats.bytes = bstats.bytes;
- d->tc_stats.packets = bstats.packets;
+ d->tc_stats.bytes = bstats_bytes;
+ d->tc_stats.packets = bstats_packets;
}
if (d->tail) {
@@ -178,24 +227,28 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
int res;
memset(&sb, 0, sizeof(sb));
- sb.bytes = bstats.bytes;
- sb.packets = bstats.packets;
+ sb.bytes = bstats_bytes;
+ sb.packets = bstats_packets;
res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
- if (res < 0 || sb.packets == bstats.packets)
+ if (res < 0 || sb.packets == bstats_packets)
return res;
/* emit 64bit stats only if needed */
- return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats.packets,
- sizeof(bstats.packets), TCA_STATS_PAD);
+ return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets,
+ sizeof(bstats_packets), TCA_STATS_PAD);
}
return 0;
}
/**
* gnet_stats_copy_basic - copy basic statistics into statistic TLV
- * @running: seqcount_t pointer
* @d: dumping handle
* @cpu: copy statistic per cpu
* @b: basic statistics
+ * @running: true if @b represents a running qdisc, thus @b's
+ * internal values might change during basic reads.
+ * Only used if @cpu is NULL
+ *
+ * Context: task; must not be run from IRQ or BH contexts
*
* Appends the basic statistics to the top level TLV created by
* gnet_stats_start_copy().
@@ -204,22 +257,25 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
* if the room in the socket buffer was not sufficient.
*/
int
-gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b)
+gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b,
+ bool running)
{
- return ___gnet_stats_copy_basic(running, d, cpu, b,
- TCA_STATS_BASIC);
+ return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC, running);
}
EXPORT_SYMBOL(gnet_stats_copy_basic);
/**
* gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV
- * @running: seqcount_t pointer
* @d: dumping handle
* @cpu: copy statistic per cpu
* @b: basic statistics
+ * @running: true if @b represents a running qdisc, thus @b's
+ * internal values might change during basic reads.
+ * Only used if @cpu is NULL
+ *
+ * Context: task; must not be run from IRQ or BH contexts
*
* Appends the basic statistics to the top level TLV created by
* gnet_stats_start_copy().
@@ -228,13 +284,12 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
* if the room in the socket buffer was not sufficient.
*/
int
-gnet_stats_copy_basic_hw(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b)
+gnet_stats_copy_basic_hw(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b,
+ bool running)
{
- return ___gnet_stats_copy_basic(running, d, cpu, b,
- TCA_STATS_BASIC_HW);
+ return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC_HW, running);
}
EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
@@ -282,16 +337,15 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
}
EXPORT_SYMBOL(gnet_stats_copy_rate_est);
-static void
-__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
- const struct gnet_stats_queue __percpu *q)
+static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *q)
{
int i;
for_each_possible_cpu(i) {
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
- qstats->qlen = 0;
+ qstats->qlen += qcpu->backlog;
qstats->backlog += qcpu->backlog;
qstats->drops += qcpu->drops;
qstats->requeues += qcpu->requeues;
@@ -299,24 +353,21 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
}
}
-void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
- const struct gnet_stats_queue __percpu *cpu,
- const struct gnet_stats_queue *q,
- __u32 qlen)
+void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu,
+ const struct gnet_stats_queue *q)
{
if (cpu) {
- __gnet_stats_copy_queue_cpu(qstats, cpu);
+ gnet_stats_add_queue_cpu(qstats, cpu);
} else {
- qstats->qlen = q->qlen;
- qstats->backlog = q->backlog;
- qstats->drops = q->drops;
- qstats->requeues = q->requeues;
- qstats->overlimits = q->overlimits;
+ qstats->qlen += q->qlen;
+ qstats->backlog += q->backlog;
+ qstats->drops += q->drops;
+ qstats->requeues += q->requeues;
+ qstats->overlimits += q->overlimits;
}
-
- qstats->qlen = qlen;
}
-EXPORT_SYMBOL(__gnet_stats_copy_queue);
+EXPORT_SYMBOL(gnet_stats_add_queue);
/**
* gnet_stats_copy_queue - copy queue statistics into statistics TLV
@@ -339,7 +390,8 @@ gnet_stats_copy_queue(struct gnet_dump *d,
{
struct gnet_stats_queue qstats = {0};
- __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
+ gnet_stats_add_queue(&qstats, cpu_q, q);
+ qstats.qlen = qlen;
if (d->compat_tc_stats) {
d->tc_stats.drops = qstats.drops;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 2d5bc3a75fae..47931c8be04b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -122,6 +122,8 @@ static void neigh_mark_dead(struct neighbour *n)
list_del_init(&n->gc_list);
atomic_dec(&n->tbl->gc_entries);
}
+ if (!list_empty(&n->managed_list))
+ list_del_init(&n->managed_list);
}
static void neigh_update_gc_list(struct neighbour *n)
@@ -130,7 +132,6 @@ static void neigh_update_gc_list(struct neighbour *n)
write_lock_bh(&n->tbl->lock);
write_lock(&n->lock);
-
if (n->dead)
goto out;
@@ -149,32 +150,59 @@ static void neigh_update_gc_list(struct neighbour *n)
list_add_tail(&n->gc_list, &n->tbl->gc_list);
atomic_inc(&n->tbl->gc_entries);
}
+out:
+ write_unlock(&n->lock);
+ write_unlock_bh(&n->tbl->lock);
+}
+static void neigh_update_managed_list(struct neighbour *n)
+{
+ bool on_managed_list, add_to_managed;
+
+ write_lock_bh(&n->tbl->lock);
+ write_lock(&n->lock);
+ if (n->dead)
+ goto out;
+
+ add_to_managed = n->flags & NTF_MANAGED;
+ on_managed_list = !list_empty(&n->managed_list);
+
+ if (!add_to_managed && on_managed_list)
+ list_del_init(&n->managed_list);
+ else if (add_to_managed && !on_managed_list)
+ list_add_tail(&n->managed_list, &n->tbl->managed_list);
out:
write_unlock(&n->lock);
write_unlock_bh(&n->tbl->lock);
}
-static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
- int *notify)
+static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
+ bool *gc_update, bool *managed_update)
{
- bool rc = false;
- u8 ndm_flags;
+ u32 ndm_flags, old_flags = neigh->flags;
if (!(flags & NEIGH_UPDATE_F_ADMIN))
- return rc;
+ return;
+
+ ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
+ ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
- ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
- if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
+ if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
if (ndm_flags & NTF_EXT_LEARNED)
neigh->flags |= NTF_EXT_LEARNED;
else
neigh->flags &= ~NTF_EXT_LEARNED;
- rc = true;
*notify = 1;
+ *gc_update = true;
+ }
+ if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
+ if (ndm_flags & NTF_MANAGED)
+ neigh->flags |= NTF_MANAGED;
+ else
+ neigh->flags &= ~NTF_MANAGED;
+ *notify = 1;
+ *managed_update = true;
}
-
- return rc;
}
static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
@@ -379,7 +407,7 @@ EXPORT_SYMBOL(neigh_ifdown);
static struct neighbour *neigh_alloc(struct neigh_table *tbl,
struct net_device *dev,
- bool exempt_from_gc)
+ u32 flags, bool exempt_from_gc)
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
@@ -412,6 +440,7 @@ do_alloc:
n->updated = n->used = now;
n->nud_state = NUD_NONE;
n->output = neigh_blackhole;
+ n->flags = flags;
seqlock_init(&n->hh.hh_lock);
n->parms = neigh_parms_clone(&tbl->parms);
timer_setup(&n->timer, neigh_timer_handler, 0);
@@ -421,6 +450,7 @@ do_alloc:
refcount_set(&n->refcnt, 1);
n->dead = 1;
INIT_LIST_HEAD(&n->gc_list);
+ INIT_LIST_HEAD(&n->managed_list);
atomic_inc(&tbl->entries);
out:
@@ -575,19 +605,18 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
}
EXPORT_SYMBOL(neigh_lookup_nodev);
-static struct neighbour *___neigh_create(struct neigh_table *tbl,
- const void *pkey,
- struct net_device *dev,
- bool exempt_from_gc, bool want_ref)
+static struct neighbour *
+___neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev, u32 flags,
+ bool exempt_from_gc, bool want_ref)
{
- struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
- u32 hash_val;
- unsigned int key_len = tbl->key_len;
- int error;
+ u32 hash_val, key_len = tbl->key_len;
+ struct neighbour *n1, *rc, *n;
struct neigh_hash_table *nht;
+ int error;
+ n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
-
if (!n) {
rc = ERR_PTR(-ENOBUFS);
goto out;
@@ -650,7 +679,8 @@ static struct neighbour *___neigh_create(struct neigh_table *tbl,
n->dead = 0;
if (!exempt_from_gc)
list_add_tail(&n->gc_list, &n->tbl->gc_list);
-
+ if (n->flags & NTF_MANAGED)
+ list_add_tail(&n->managed_list, &n->tbl->managed_list);
if (want_ref)
neigh_hold(n);
rcu_assign_pointer(n->next,
@@ -674,7 +704,7 @@ out_neigh_release:
struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
struct net_device *dev, bool want_ref)
{
- return ___neigh_create(tbl, pkey, dev, false, want_ref);
+ return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
}
EXPORT_SYMBOL(__neigh_create);
@@ -1205,8 +1235,6 @@ static void neigh_update_hhs(struct neighbour *neigh)
}
}
-
-
/* Generic update routine.
-- lladdr is new lladdr or NULL, if it is not supplied.
-- new is new state.
@@ -1217,7 +1245,8 @@ static void neigh_update_hhs(struct neighbour *neigh)
lladdr instead of overriding it
if it is different.
NEIGH_UPDATE_F_ADMIN means that the change is administrative.
-
+ NEIGH_UPDATE_F_USE means that the entry is user triggered.
+ NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed.
NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
NTF_ROUTER flag.
NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
@@ -1225,17 +1254,15 @@ static void neigh_update_hhs(struct neighbour *neigh)
Caller MUST hold reference count on the entry.
*/
-
static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
u8 new, u32 flags, u32 nlmsg_pid,
struct netlink_ext_ack *extack)
{
- bool ext_learn_change = false;
- u8 old;
- int err;
- int notify = 0;
- struct net_device *dev;
+ bool gc_update = false, managed_update = false;
int update_isrouter = 0;
+ struct net_device *dev;
+ int err, notify = 0;
+ u8 old;
trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
@@ -1254,7 +1281,13 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
- ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
+ neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
+ if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
+ new = old & ~NUD_PERMANENT;
+ neigh->nud_state = new;
+ err = 0;
+ goto out;
+ }
if (!(new & NUD_VALID)) {
neigh_del_timer(neigh);
@@ -1399,15 +1432,13 @@ out:
if (update_isrouter)
neigh_update_is_router(neigh, flags, &notify);
write_unlock_bh(&neigh->lock);
-
- if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
+ if (((new ^ old) & NUD_PERMANENT) || gc_update)
neigh_update_gc_list(neigh);
-
+ if (managed_update)
+ neigh_update_managed_list(neigh);
if (notify)
neigh_update_notify(neigh, nlmsg_pid);
-
trace_neigh_update_done(neigh, err);
-
return err;
}
@@ -1533,6 +1564,20 @@ int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
}
EXPORT_SYMBOL(neigh_direct_output);
+static void neigh_managed_work(struct work_struct *work)
+{
+ struct neigh_table *tbl = container_of(work, struct neigh_table,
+ managed_work.work);
+ struct neighbour *neigh;
+
+ write_lock_bh(&tbl->lock);
+ list_for_each_entry(neigh, &tbl->managed_list, managed_list)
+ neigh_event_send(neigh, NULL);
+ queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
+ NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME));
+ write_unlock_bh(&tbl->lock);
+}
+
static void neigh_proxy_process(struct timer_list *t)
{
struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
@@ -1679,6 +1724,8 @@ void neigh_table_init(int index, struct neigh_table *tbl)
INIT_LIST_HEAD(&tbl->parms_list);
INIT_LIST_HEAD(&tbl->gc_list);
+ INIT_LIST_HEAD(&tbl->managed_list);
+
list_add(&tbl->parms.list, &tbl->parms_list);
write_pnet(&tbl->parms.net, &init_net);
refcount_set(&tbl->parms.refcnt, 1);
@@ -1710,9 +1757,13 @@ void neigh_table_init(int index, struct neigh_table *tbl)
WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
rwlock_init(&tbl->lock);
+
INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
tbl->parms.reachable_time);
+ INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
+ queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
+
timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
skb_queue_head_init_class(&tbl->proxy_queue,
&neigh_table_proxy_queue_class);
@@ -1783,6 +1834,7 @@ const struct nla_policy nda_policy[NDA_MAX+1] = {
[NDA_MASTER] = { .type = NLA_U32 },
[NDA_PROTOCOL] = { .type = NLA_U8 },
[NDA_NH_ID] = { .type = NLA_U32 },
+ [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
[NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
};
@@ -1855,7 +1907,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
- NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
+ NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
@@ -1864,6 +1916,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
struct neighbour *neigh;
void *dst, *lladdr;
u8 protocol = 0;
+ u32 ndm_flags;
int err;
ASSERT_RTNL();
@@ -1879,6 +1932,15 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
}
ndm = nlmsg_data(nlh);
+ ndm_flags = ndm->ndm_flags;
+ if (tb[NDA_FLAGS_EXT]) {
+ u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
+
+ BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
+ (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
+ hweight32(NTF_EXT_MASK)));
+ ndm_flags |= (ext << NTF_EXT_SHIFT);
+ }
if (ndm->ndm_ifindex) {
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
@@ -1906,14 +1968,18 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[NDA_PROTOCOL])
protocol = nla_get_u8(tb[NDA_PROTOCOL]);
-
- if (ndm->ndm_flags & NTF_PROXY) {
+ if (ndm_flags & NTF_PROXY) {
struct pneigh_entry *pn;
+ if (ndm_flags & NTF_MANAGED) {
+ NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
+ goto out;
+ }
+
err = -ENOBUFS;
pn = pneigh_lookup(tbl, net, dst, dev, 1);
if (pn) {
- pn->flags = ndm->ndm_flags;
+ pn->flags = ndm_flags;
if (protocol)
pn->protocol = protocol;
err = 0;
@@ -1933,16 +1999,24 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
neigh = neigh_lookup(tbl, dst, dev);
if (neigh == NULL) {
- bool exempt_from_gc;
+ bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT;
+ bool exempt_from_gc = ndm_permanent ||
+ ndm_flags & NTF_EXT_LEARNED;
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
err = -ENOENT;
goto out;
}
+ if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
+ NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
+ err = -EINVAL;
+ goto out;
+ }
- exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
- ndm->ndm_flags & NTF_EXT_LEARNED;
- neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
+ neigh = ___neigh_create(tbl, dst, dev,
+ ndm_flags &
+ (NTF_EXT_LEARNED | NTF_MANAGED),
+ exempt_from_gc, true);
if (IS_ERR(neigh)) {
err = PTR_ERR(neigh);
goto out;
@@ -1961,22 +2035,22 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
if (protocol)
neigh->protocol = protocol;
-
- if (ndm->ndm_flags & NTF_EXT_LEARNED)
+ if (ndm_flags & NTF_EXT_LEARNED)
flags |= NEIGH_UPDATE_F_EXT_LEARNED;
-
- if (ndm->ndm_flags & NTF_ROUTER)
+ if (ndm_flags & NTF_ROUTER)
flags |= NEIGH_UPDATE_F_ISROUTER;
+ if (ndm_flags & NTF_MANAGED)
+ flags |= NEIGH_UPDATE_F_MANAGED;
+ if (ndm_flags & NTF_USE)
+ flags |= NEIGH_UPDATE_F_USE;
- if (ndm->ndm_flags & NTF_USE) {
+ err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
+ NETLINK_CB(skb).portid, extack);
+ if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
neigh_event_send(neigh, NULL);
err = 0;
- } else
- err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
- NETLINK_CB(skb).portid, extack);
-
+ }
neigh_release(neigh);
-
out:
return err;
}
@@ -2427,6 +2501,7 @@ out:
static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
u32 pid, u32 seq, int type, unsigned int flags)
{
+ u32 neigh_flags, neigh_flags_ext;
unsigned long now = jiffies;
struct nda_cacheinfo ci;
struct nlmsghdr *nlh;
@@ -2436,11 +2511,14 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
if (nlh == NULL)
return -EMSGSIZE;
+ neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
+ neigh_flags = neigh->flags & NTF_OLD_MASK;
+
ndm = nlmsg_data(nlh);
ndm->ndm_family = neigh->ops->family;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
- ndm->ndm_flags = neigh->flags;
+ ndm->ndm_flags = neigh_flags;
ndm->ndm_type = neigh->type;
ndm->ndm_ifindex = neigh->dev->ifindex;
@@ -2471,6 +2549,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
goto nla_put_failure;
+ if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
+ goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
@@ -2484,6 +2564,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
u32 pid, u32 seq, int type, unsigned int flags,
struct neigh_table *tbl)
{
+ u32 neigh_flags, neigh_flags_ext;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
@@ -2491,11 +2572,14 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
if (nlh == NULL)
return -EMSGSIZE;
+ neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
+ neigh_flags = pn->flags & NTF_OLD_MASK;
+
ndm = nlmsg_data(nlh);
ndm->ndm_family = tbl->family;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
- ndm->ndm_flags = pn->flags | NTF_PROXY;
+ ndm->ndm_flags = neigh_flags | NTF_PROXY;
ndm->ndm_type = RTN_UNICAST;
ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
ndm->ndm_state = NUD_NONE;
@@ -2505,6 +2589,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
goto nla_put_failure;
+ if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
+ goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
@@ -2820,6 +2906,7 @@ static inline size_t neigh_nlmsg_size(void)
+ nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(struct nda_cacheinfo))
+ nla_total_size(4) /* NDA_PROBES */
+ + nla_total_size(4) /* NDA_FLAGS_EXT */
+ nla_total_size(1); /* NDA_PROTOCOL */
}
@@ -2848,6 +2935,7 @@ static inline size_t pneigh_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
+ + nla_total_size(4) /* NDA_FLAGS_EXT */
+ nla_total_size(1); /* NDA_PROTOCOL */
}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index eab5fc88a002..d8b9dbabd4a4 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -77,8 +77,8 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
- seq_printf(seq, "%9s: %16llu %12llu %4llu %6llu %4llu %5llu %10llu %9llu "
- "%16llu %12llu %4llu %6llu %4llu %5llu %7llu %10llu\n",
+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
stats->rx_errors,
stats->rx_dropped + stats->rx_missed_errors,
@@ -103,11 +103,11 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
static int dev_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Interface| Receive "
- " | Transmit\n"
- " | bytes packets errs drop fifo frame "
- "compressed multicast| bytes packets errs "
- " drop fifo colls carrier compressed\n");
+ seq_puts(seq, "Inter-| Receive "
+ " | Transmit\n"
+ " face |bytes packets errs drop fifo frame "
+ "compressed multicast|bytes packets errs "
+ "drop fifo colls carrier compressed\n");
else
dev_seq_printf_stats(seq, v);
return 0;
@@ -259,14 +259,14 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
struct packet_type *pt = v;
if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Type Device Function\n");
+ seq_puts(seq, "Type Device Function\n");
else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
if (pt->type == htons(ETH_P_ALL))
seq_puts(seq, "ALL ");
else
seq_printf(seq, "%04x", ntohs(pt->type));
- seq_printf(seq, " %-9s %ps\n",
+ seq_printf(seq, " %-8s %ps\n",
pt->dev ? pt->dev->name : "", pt->func);
}
@@ -327,14 +327,12 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
struct netdev_hw_addr *ha;
struct net_device *dev = v;
- if (v == SEQ_START_TOKEN) {
- seq_puts(seq, "Ifindex Interface Refcount Global_use Address\n");
+ if (v == SEQ_START_TOKEN)
return 0;
- }
netif_addr_lock_bh(dev);
netdev_for_each_mc_addr(ha, dev) {
- seq_printf(seq, "%-7d %-9s %-8d %-10d %*phN\n",
+ seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
dev->ifindex, dev->name,
ha->refcount, ha->global_use,
(int)dev->addr_len, ha->addr);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f6197774048b..d6e4e0b43beb 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -175,6 +175,14 @@ static int change_carrier(struct net_device *dev, unsigned long new_carrier)
static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
+ struct net_device *netdev = to_net_dev(dev);
+
+ /* The check is also done in change_carrier; this helps returning early
+ * without hitting the trylock/restart in netdev_store.
+ */
+ if (!netdev->netdev_ops->ndo_change_carrier)
+ return -EOPNOTSUPP;
+
return netdev_store(dev, attr, buf, len, change_carrier);
}
@@ -196,6 +204,12 @@ static ssize_t speed_show(struct device *dev,
struct net_device *netdev = to_net_dev(dev);
int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->ethtool_ops->get_link_ksettings)
+ return ret;
+
if (!rtnl_trylock())
return restart_syscall();
@@ -216,6 +230,12 @@ static ssize_t duplex_show(struct device *dev,
struct net_device *netdev = to_net_dev(dev);
int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->ethtool_ops->get_link_ksettings)
+ return ret;
+
if (!rtnl_trylock())
return restart_syscall();
@@ -468,6 +488,14 @@ static ssize_t proto_down_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
+ struct net_device *netdev = to_net_dev(dev);
+
+ /* The check is also done in change_proto_down; this helps returning
+ * early without hitting the trylock/restart in netdev_store.
+ */
+ if (!netdev->netdev_ops->ndo_change_proto_down)
+ return -EOPNOTSUPP;
+
return netdev_store(dev, attr, buf, len, change_proto_down);
}
NETDEVICE_SHOW_RW(proto_down, fmt_dec);
@@ -478,6 +506,12 @@ static ssize_t phys_port_id_show(struct device *dev,
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The check is also done in dev_get_phys_port_id; this helps returning
+ * early without hitting the trylock/restart below.
+ */
+ if (!netdev->netdev_ops->ndo_get_phys_port_id)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@ -500,6 +534,13 @@ static ssize_t phys_port_name_show(struct device *dev,
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->netdev_ops->ndo_get_phys_port_name &&
+ !netdev->netdev_ops->ndo_get_devlink_port)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@ -522,6 +563,14 @@ static ssize_t phys_switch_id_show(struct device *dev,
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps
+ * returning early without hitting the trylock/restart below. This works
+ * because recurse is false when calling dev_get_port_parent_id.
+ */
+ if (!netdev->netdev_ops->ndo_get_port_parent_id &&
+ !netdev->netdev_ops->ndo_get_devlink_port)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@ -1226,6 +1275,12 @@ static ssize_t tx_maxrate_store(struct netdev_queue *queue,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ /* The check is also done later; this helps returning early without
+ * hitting the trylock/restart below.
+ */
+ if (!dev->netdev_ops->ndo_set_tx_maxrate)
+ return -EOPNOTSUPP;
+
err = kstrtou32(buf, 10, &rate);
if (err < 0)
return err;
@@ -1869,7 +1924,7 @@ static struct class net_class __ro_after_init = {
.get_ownership = net_get_ownership,
};
-#ifdef CONFIG_OF_NET
+#ifdef CONFIG_OF
static int of_dev_node_match(struct device *dev, const void *data)
{
for (; dev; dev = dev->parent) {
diff --git a/drivers/of/of_net.c b/net/core/of_net.c
index dbac3a172a11..f1a9bf7578e7 100644
--- a/drivers/of/of_net.c
+++ b/net/core/of_net.c
@@ -143,3 +143,28 @@ int of_get_mac_address(struct device_node *np, u8 *addr)
return of_get_mac_addr_nvmem(np, addr);
}
EXPORT_SYMBOL(of_get_mac_address);
+
+/**
+ * of_get_ethdev_address()
+ * @np: Caller's Device Node
+ * @dev: Pointer to netdevice which address will be updated
+ *
+ * Search the device tree for the best MAC address to use.
+ * If found set @dev->dev_addr to that address.
+ *
+ * See documentation of of_get_mac_address() for more information on how
+ * the best address is determined.
+ *
+ * Return: 0 on success and errno in case of error.
+ */
+int of_get_ethdev_address(struct device_node *np, struct net_device *dev)
+{
+ u8 addr[ETH_ALEN];
+ int ret;
+
+ ret = of_get_mac_address(np, addr);
+ if (!ret)
+ eth_hw_addr_set(dev, addr);
+ return ret;
+}
+EXPORT_SYMBOL(of_get_ethdev_address);
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 1a6978427d6c..9b60e4301a44 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -49,6 +49,12 @@ static int page_pool_init(struct page_pool *pool,
* which is the XDP_TX use-case.
*/
if (pool->p.flags & PP_FLAG_DMA_MAP) {
+ /* DMA-mapping is not supported on 32-bit systems with
+ * 64-bit DMA mapping.
+ */
+ if (sizeof(dma_addr_t) > sizeof(unsigned long))
+ return -EOPNOTSUPP;
+
if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
(pool->p.dma_dir != DMA_BIDIRECTIONAL))
return -EINVAL;
@@ -69,10 +75,6 @@ static int page_pool_init(struct page_pool *pool,
*/
}
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
- pool->p.flags & PP_FLAG_PAGE_FRAG)
- return -EINVAL;
-
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
return -ENOMEM;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 327ca6bc6e6d..79477dcae7c2 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3804,9 +3804,8 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
- size_t if_info_size;
- skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
+ skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
if (skb == NULL)
goto errout;
@@ -4384,7 +4383,7 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
continue;
if (br_dev != netdev_master_upper_dev_get(dev) &&
- !(dev->priv_flags & IFF_EBRIDGE))
+ !netif_is_bridge_master(dev))
continue;
cops = ops;
}
@@ -5262,7 +5261,7 @@ nla_put_failure:
static size_t if_nlmsg_stats_size(const struct net_device *dev,
u32 filter_mask)
{
- size_t size = 0;
+ size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
diff --git a/net/core/sock.c b/net/core/sock.c
index 62627e868e03..9862eefce21e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -350,7 +350,7 @@ void sk_error_report(struct sock *sk)
}
EXPORT_SYMBOL(sk_error_report);
-static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
+int sock_get_timeout(long timeo, void *optval, bool old_timeval)
{
struct __kernel_sock_timeval tv;
@@ -379,12 +379,11 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
*(struct __kernel_sock_timeval *)optval = tv;
return sizeof(tv);
}
+EXPORT_SYMBOL(sock_get_timeout);
-static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
- bool old_timeval)
+int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval)
{
- struct __kernel_sock_timeval tv;
-
if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct old_timeval32 tv32;
@@ -393,8 +392,8 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
return -EFAULT;
- tv.tv_sec = tv32.tv_sec;
- tv.tv_usec = tv32.tv_usec;
+ tv->tv_sec = tv32.tv_sec;
+ tv->tv_usec = tv32.tv_usec;
} else if (old_timeval) {
struct __kernel_old_timeval old_tv;
@@ -402,14 +401,28 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
return -EINVAL;
if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
return -EFAULT;
- tv.tv_sec = old_tv.tv_sec;
- tv.tv_usec = old_tv.tv_usec;
+ tv->tv_sec = old_tv.tv_sec;
+ tv->tv_usec = old_tv.tv_usec;
} else {
- if (optlen < sizeof(tv))
+ if (optlen < sizeof(*tv))
return -EINVAL;
- if (copy_from_sockptr(&tv, optval, sizeof(tv)))
+ if (copy_from_sockptr(tv, optval, sizeof(*tv)))
return -EFAULT;
}
+
+ return 0;
+}
+EXPORT_SYMBOL(sock_copy_user_timeval);
+
+static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
+ bool old_timeval)
+{
+ struct __kernel_sock_timeval tv;
+ int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval);
+
+ if (err)
+ return err;
+
if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
return -EDOM;
@@ -947,6 +960,53 @@ void sock_set_mark(struct sock *sk, u32 val)
}
EXPORT_SYMBOL(sock_set_mark);
+static void sock_release_reserved_memory(struct sock *sk, int bytes)
+{
+ /* Round down bytes to multiple of pages */
+ bytes &= ~(SK_MEM_QUANTUM - 1);
+
+ WARN_ON(bytes > sk->sk_reserved_mem);
+ sk->sk_reserved_mem -= bytes;
+ sk_mem_reclaim(sk);
+}
+
+static int sock_reserve_memory(struct sock *sk, int bytes)
+{
+ long allocated;
+ bool charged;
+ int pages;
+
+ if (!mem_cgroup_sockets_enabled || !sk->sk_memcg)
+ return -EOPNOTSUPP;
+
+ if (!bytes)
+ return 0;
+
+ pages = sk_mem_pages(bytes);
+
+ /* pre-charge to memcg */
+ charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!charged)
+ return -ENOMEM;
+
+ /* pre-charge to forward_alloc */
+ allocated = sk_memory_allocated_add(sk, pages);
+ /* If the system goes into memory pressure with this
+ * precharge, give up and return error.
+ */
+ if (allocated > sk_prot_mem_limits(sk, 1)) {
+ sk_memory_allocated_sub(sk, pages);
+ mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
+ return -ENOMEM;
+ }
+ sk->sk_forward_alloc += pages << SK_MEM_QUANTUM_SHIFT;
+
+ sk->sk_reserved_mem += pages << SK_MEM_QUANTUM_SHIFT;
+
+ return 0;
+}
+
/*
* This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic.
@@ -1367,6 +1427,23 @@ set_sndbuf:
~SOCK_BUF_LOCK_MASK);
break;
+ case SO_RESERVE_MEM:
+ {
+ int delta;
+
+ if (val < 0) {
+ ret = -EINVAL;
+ break;
+ }
+
+ delta = val - sk->sk_reserved_mem;
+ if (delta < 0)
+ sock_release_reserved_memory(sk, -delta);
+ else
+ ret = sock_reserve_memory(sk, delta);
+ break;
+ }
+
default:
ret = -ENOPROTOOPT;
break;
@@ -1376,6 +1453,16 @@ set_sndbuf:
}
EXPORT_SYMBOL(sock_setsockopt);
+static const struct cred *sk_get_peer_cred(struct sock *sk)
+{
+ const struct cred *cred;
+
+ spin_lock(&sk->sk_peer_lock);
+ cred = get_cred(sk->sk_peer_cred);
+ spin_unlock(&sk->sk_peer_lock);
+
+ return cred;
+}
static void cred_to_ucred(struct pid *pid, const struct cred *cred,
struct ucred *ucred)
@@ -1552,7 +1639,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
struct ucred peercred;
if (len > sizeof(peercred))
len = sizeof(peercred);
+
+ spin_lock(&sk->sk_peer_lock);
cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
+ spin_unlock(&sk->sk_peer_lock);
+
if (copy_to_user(optval, &peercred, len))
return -EFAULT;
goto lenout;
@@ -1560,20 +1651,23 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
case SO_PEERGROUPS:
{
+ const struct cred *cred;
int ret, n;
- if (!sk->sk_peer_cred)
+ cred = sk_get_peer_cred(sk);
+ if (!cred)
return -ENODATA;
- n = sk->sk_peer_cred->group_info->ngroups;
+ n = cred->group_info->ngroups;
if (len < n * sizeof(gid_t)) {
len = n * sizeof(gid_t);
+ put_cred(cred);
return put_user(len, optlen) ? -EFAULT : -ERANGE;
}
len = n * sizeof(gid_t);
- ret = groups_to_user((gid_t __user *)optval,
- sk->sk_peer_cred->group_info);
+ ret = groups_to_user((gid_t __user *)optval, cred->group_info);
+ put_cred(cred);
if (ret)
return ret;
goto lenout;
@@ -1733,6 +1827,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK;
break;
+ case SO_RESERVE_MEM:
+ v.val = sk->sk_reserved_mem;
+ break;
+
default:
/* We implement the SO_SNDLOWAT etc to not be settable
* (1003.1g 7).
@@ -1935,9 +2033,10 @@ static void __sk_destruct(struct rcu_head *head)
sk->sk_frag.page = NULL;
}
- if (sk->sk_peer_cred)
- put_cred(sk->sk_peer_cred);
+ /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
+ put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
+
if (likely(sk->sk_net_refcnt))
put_net(sock_net(sk));
sk_prot_free(sk->sk_prot_creator, sk);
@@ -2045,6 +2144,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_dst_pending_confirm = 0;
newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0;
+ newsk->sk_reserved_mem = 0;
atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
@@ -3145,6 +3245,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_peer_pid = NULL;
sk->sk_peer_cred = NULL;
+ spin_lock_init(&sk->sk_peer_lock);
+
sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
@@ -3179,17 +3281,15 @@ EXPORT_SYMBOL(sock_init_data);
void lock_sock_nested(struct sock *sk, int subclass)
{
+ /* The sk_lock has mutex_lock() semantics here. */
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
- spin_unlock(&sk->sk_lock.slock);
- /*
- * The sk_lock has mutex_lock() semantics here:
- */
- mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
- local_bh_enable();
+ spin_unlock_bh(&sk->sk_lock.slock);
}
EXPORT_SYMBOL(lock_sock_nested);
@@ -3212,42 +3312,37 @@ void release_sock(struct sock *sk)
}
EXPORT_SYMBOL(release_sock);
-/**
- * lock_sock_fast - fast version of lock_sock
- * @sk: socket
- *
- * This version should be used for very small section, where process wont block
- * return false if fast path is taken:
- *
- * sk_lock.slock locked, owned = 0, BH disabled
- *
- * return true if slow path is taken:
- *
- * sk_lock.slock unlocked, owned = 1, BH enabled
- */
-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
- if (!sk->sk_lock.owned)
+ if (!sk->sk_lock.owned) {
/*
- * Note : We must disable BH
+ * Fast path return with bottom halves disabled and
+ * sock::sk_lock.slock held.
+ *
+ * The 'mutex' is not contended and holding
+ * sock::sk_lock.slock prevents all other lockers to
+ * proceed so the corresponding unlock_sock_fast() can
+ * avoid the slow path of release_sock() completely and
+ * just release slock.
+ *
+ * From a semantical POV this is equivalent to 'acquiring'
+ * the 'mutex', hence the corresponding lockdep
+ * mutex_release() has to happen in the fast path of
+ * unlock_sock_fast().
*/
return false;
+ }
__lock_sock(sk);
sk->sk_lock.owned = 1;
- spin_unlock(&sk->sk_lock.slock);
- /*
- * The sk_lock has mutex_lock() semantics here:
- */
- mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
__acquire(&sk->sk_lock.slock);
- local_bh_enable();
+ spin_unlock_bh(&sk->sk_lock.slock);
return true;
}
-EXPORT_SYMBOL(lock_sock_fast);
+EXPORT_SYMBOL(__lock_sock_fast);
int sock_gettstamp(struct socket *sock, void __user *userstamp,
bool timeval, bool time32)
diff --git a/net/core/stream.c b/net/core/stream.c
index 4f1d4aa5fb38..06b36c730ce8 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -195,14 +195,11 @@ void sk_stream_kill_queues(struct sock *sk)
/* First the read buffer. */
__skb_queue_purge(&sk->sk_receive_queue);
- /* Next, the error queue. */
- __skb_queue_purge(&sk->sk_error_queue);
-
/* Next, the write queue. */
WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */
- sk_mem_reclaim(sk);
+ sk_mem_reclaim_final(sk);
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index c5c1d2b8045e..5183e627468d 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -48,7 +48,7 @@ extern bool dccp_debug;
extern struct inet_hashinfo dccp_hashinfo;
-extern struct percpu_counter dccp_orphan_count;
+DECLARE_PER_CPU(unsigned int, dccp_orphan_count);
void dccp_time_wait(struct sock *sk, int state, int timeo);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index abb5c596a817..fc44dadc778b 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -42,8 +42,8 @@ DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
EXPORT_SYMBOL_GPL(dccp_statistics);
-struct percpu_counter dccp_orphan_count;
-EXPORT_SYMBOL_GPL(dccp_orphan_count);
+DEFINE_PER_CPU(unsigned int, dccp_orphan_count);
+EXPORT_PER_CPU_SYMBOL_GPL(dccp_orphan_count);
struct inet_hashinfo dccp_hashinfo;
EXPORT_SYMBOL_GPL(dccp_hashinfo);
@@ -1055,7 +1055,7 @@ adjudge_to_death:
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(dccp_orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
@@ -1115,13 +1115,10 @@ static int __init dccp_init(void)
BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
sizeof_field(struct sk_buff, cb));
- rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
- if (rc)
- goto out_fail;
inet_hashinfo_init(&dccp_hashinfo);
rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
if (rc)
- goto out_free_percpu;
+ goto out_fail;
rc = -ENOBUFS;
dccp_hashinfo.bind_bucket_cachep =
kmem_cache_create("dccp_bind_bucket",
@@ -1226,8 +1223,6 @@ out_free_bind_bucket_cachep:
kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
out_free_hashinfo2:
inet_hashinfo2_free_mod(&dccp_hashinfo);
-out_free_percpu:
- percpu_counter_destroy(&dccp_orphan_count);
out_fail:
dccp_hashinfo.bhash = NULL;
dccp_hashinfo.ehash = NULL;
@@ -1250,7 +1245,6 @@ static void __exit dccp_fini(void)
dccp_ackvec_exit();
dccp_sysctl_exit();
inet_hashinfo2_free_mod(&dccp_hashinfo);
- percpu_counter_destroy(&dccp_orphan_count);
}
module_init(dccp_init);
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 548285539752..8cb87b5067ee 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -92,17 +92,8 @@ config NET_DSA_TAG_KSZ
Say Y if you want to enable support for tagging frames for the
Microchip 8795/9477/9893 families of switches.
-config NET_DSA_TAG_RTL4_A
- tristate "Tag driver for Realtek 4 byte protocol A tags"
- help
- Say Y or M if you want to enable support for tagging frames for the
- Realtek switches with 4 byte protocol A tags, sich as found in
- the Realtek RTL8366RB.
-
config NET_DSA_TAG_OCELOT
tristate "Tag driver for Ocelot family of switches, using NPI port"
- depends on MSCC_OCELOT_SWITCH_LIB || \
- (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
select PACKING
help
Say Y or M if you want to enable NPI tagging for the Ocelot switches
@@ -114,8 +105,6 @@ config NET_DSA_TAG_OCELOT
config NET_DSA_TAG_OCELOT_8021Q
tristate "Tag driver for Ocelot family of switches, using VLAN"
- depends on MSCC_OCELOT_SWITCH_LIB || \
- (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
help
Say Y or M if you want to enable support for tagging frames with a
custom VLAN-based header. Frames that require timestamping, such as
@@ -130,6 +119,19 @@ config NET_DSA_TAG_QCA
Say Y or M if you want to enable support for tagging frames for
the Qualcomm Atheros QCA8K switches.
+config NET_DSA_TAG_RTL4_A
+ tristate "Tag driver for Realtek 4 byte protocol A tags"
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Realtek switches with 4 byte protocol A tags, sich as found in
+ the Realtek RTL8366RB.
+
+config NET_DSA_TAG_RTL8_4
+ tristate "Tag driver for Realtek 8 byte protocol 4 tags"
+ help
+ Say Y or M if you want to enable support for tagging frames for Realtek
+ switches with 8 byte protocol 4 tags, such as the Realtek RTL8365MB-VC.
+
config NET_DSA_TAG_LAN9303
tristate "Tag driver for SMSC/Microchip LAN9303 family of switches"
help
@@ -138,7 +140,6 @@ config NET_DSA_TAG_LAN9303
config NET_DSA_TAG_SJA1105
tristate "Tag driver for NXP SJA1105 switches"
- depends on NET_DSA_SJA1105 || !NET_DSA_SJA1105
select PACKING
help
Say Y or M if you want to enable support for tagging frames with the
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 67ea009f242c..9f75820e7c98 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -10,12 +10,13 @@ obj-$(CONFIG_NET_DSA_TAG_DSA_COMMON) += tag_dsa.o
obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
obj-$(CONFIG_NET_DSA_TAG_HELLCREEK) += tag_hellcreek.o
obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
-obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
obj-$(CONFIG_NET_DSA_TAG_OCELOT_8021Q) += tag_ocelot_8021q.o
obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
+obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
+obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o
obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 41f36ad8b0ec..ea5169e671ae 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -280,23 +280,22 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
}
#ifdef CONFIG_PM_SLEEP
-static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
+static bool dsa_port_is_initialized(const struct dsa_port *dp)
{
- const struct dsa_port *dp = dsa_to_port(ds, p);
-
return dp->type == DSA_PORT_TYPE_USER && dp->slave;
}
int dsa_switch_suspend(struct dsa_switch *ds)
{
- int i, ret = 0;
+ struct dsa_port *dp;
+ int ret = 0;
/* Suspend slave network devices */
- for (i = 0; i < ds->num_ports; i++) {
- if (!dsa_is_port_initialized(ds, i))
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dsa_port_is_initialized(dp))
continue;
- ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
+ ret = dsa_slave_suspend(dp->slave);
if (ret)
return ret;
}
@@ -310,7 +309,8 @@ EXPORT_SYMBOL_GPL(dsa_switch_suspend);
int dsa_switch_resume(struct dsa_switch *ds)
{
- int i, ret = 0;
+ struct dsa_port *dp;
+ int ret = 0;
if (ds->ops->resume)
ret = ds->ops->resume(ds);
@@ -319,11 +319,11 @@ int dsa_switch_resume(struct dsa_switch *ds)
return ret;
/* Resume slave network devices */
- for (i = 0; i < ds->num_ports; i++) {
- if (!dsa_is_port_initialized(ds, i))
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dsa_port_is_initialized(dp))
continue;
- ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
+ ret = dsa_slave_resume(dp->slave);
if (ret)
return ret;
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 96f211f52ac3..f5270114dcb8 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -170,7 +170,7 @@ void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
/* Check if the bridge is still in use, otherwise it is time
* to clean it up so we can reuse this bridge_num later.
*/
- if (!dsa_bridge_num_find(bridge_dev))
+ if (dsa_bridge_num_find(bridge_dev) < 0)
clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
@@ -399,11 +399,8 @@ static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
if (!dsa_port_is_cpu(cpu_dp))
continue;
- list_for_each_entry(dp, &dst->ports, list) {
- /* Prefer a local CPU port */
- if (dp->ds != cpu_dp->ds)
- continue;
-
+ /* Prefer a local CPU port */
+ dsa_switch_for_each_port(dp, cpu_dp->ds) {
/* Prefer the first local CPU port found */
if (dp->cpu_dp)
continue;
@@ -429,6 +426,7 @@ static int dsa_port_setup(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
bool dsa_port_link_registered = false;
+ struct dsa_switch *ds = dp->ds;
bool dsa_port_enabled = false;
int err = 0;
@@ -438,6 +436,12 @@ static int dsa_port_setup(struct dsa_port *dp)
INIT_LIST_HEAD(&dp->fdbs);
INIT_LIST_HEAD(&dp->mdbs);
+ if (ds->ops->port_setup) {
+ err = ds->ops->port_setup(ds, dp->index);
+ if (err)
+ return err;
+ }
+
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
dsa_port_disable(dp);
@@ -480,8 +484,11 @@ static int dsa_port_setup(struct dsa_port *dp)
dsa_port_disable(dp);
if (err && dsa_port_link_registered)
dsa_port_link_unregister_of(dp);
- if (err)
+ if (err) {
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
return err;
+ }
dp->setup = true;
@@ -533,11 +540,15 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a, *tmp;
if (!dp->setup)
return;
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
+
devlink_port_type_clear(dlp);
switch (dp->type) {
@@ -581,6 +592,36 @@ static void dsa_port_devlink_teardown(struct dsa_port *dp)
dp->devlink_port_setup = false;
}
+/* Destroy the current devlink port, and create a new one which has the UNUSED
+ * flavour. At this point, any call to ds->ops->port_setup has been already
+ * balanced out by a call to ds->ops->port_teardown, so we know that any
+ * devlink port regions the driver had are now unregistered. We then call its
+ * ds->ops->port_setup again, in order for the driver to re-create them on the
+ * new devlink port.
+ */
+static int dsa_port_reinit_as_unused(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ int err;
+
+ dsa_port_devlink_teardown(dp);
+ dp->type = DSA_PORT_TYPE_UNUSED;
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ return err;
+
+ if (ds->ops->port_setup) {
+ /* On error, leave the devlink port registered,
+ * dsa_switch_teardown will clean it up later.
+ */
+ err = ds->ops->port_setup(ds, dp->index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int dsa_devlink_info_get(struct devlink *dl,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -758,16 +799,17 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
{
const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
struct dsa_switch_tree *dst = ds->dst;
- int port, err;
+ struct dsa_port *cpu_dp;
+ int err;
if (tag_ops->proto == dst->default_proto)
return 0;
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
- err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ rtnl_lock();
+ err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
+ tag_ops->proto);
+ rtnl_unlock();
if (err) {
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err));
@@ -804,16 +846,13 @@ static int dsa_switch_setup(struct dsa_switch *ds)
dl_priv = devlink_priv(ds->devlink);
dl_priv->ds = ds;
- devlink_register(ds->devlink);
/* Setup devlink port instances now, so that the switch
* setup() can register regions etc, against the ports
*/
- list_for_each_entry(dp, &ds->dst->ports, list) {
- if (dp->ds == ds) {
- err = dsa_port_devlink_setup(dp);
- if (err)
- goto unregister_devlink_ports;
- }
+ dsa_switch_for_each_port(dp, ds) {
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ goto unregister_devlink_ports;
}
err = dsa_switch_register_notifier(ds);
@@ -830,10 +869,8 @@ static int dsa_switch_setup(struct dsa_switch *ds)
if (err)
goto teardown;
- devlink_params_publish(ds->devlink);
-
if (!ds->slave_mii_bus && ds->ops->phy_read) {
- ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus) {
err = -ENOMEM;
goto teardown;
@@ -843,23 +880,24 @@ static int dsa_switch_setup(struct dsa_switch *ds)
err = mdiobus_register(ds->slave_mii_bus);
if (err < 0)
- goto teardown;
+ goto free_slave_mii_bus;
}
ds->setup = true;
-
+ devlink_register(ds->devlink);
return 0;
+free_slave_mii_bus:
+ if (ds->slave_mii_bus && ds->ops->phy_read)
+ mdiobus_free(ds->slave_mii_bus);
teardown:
if (ds->ops->teardown)
ds->ops->teardown(ds);
unregister_notifier:
dsa_switch_unregister_notifier(ds);
unregister_devlink_ports:
- list_for_each_entry(dp, &ds->dst->ports, list)
- if (dp->ds == ds)
- dsa_port_devlink_teardown(dp);
- devlink_unregister(ds->devlink);
+ dsa_switch_for_each_port(dp, ds)
+ dsa_port_devlink_teardown(dp);
devlink_free(ds->devlink);
ds->devlink = NULL;
return err;
@@ -872,19 +910,23 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
if (!ds->setup)
return;
- if (ds->slave_mii_bus && ds->ops->phy_read)
- mdiobus_unregister(ds->slave_mii_bus);
+ if (ds->devlink)
+ devlink_unregister(ds->devlink);
- dsa_switch_unregister_notifier(ds);
+ if (ds->slave_mii_bus && ds->ops->phy_read) {
+ mdiobus_unregister(ds->slave_mii_bus);
+ mdiobus_free(ds->slave_mii_bus);
+ ds->slave_mii_bus = NULL;
+ }
if (ds->ops->teardown)
ds->ops->teardown(ds);
+ dsa_switch_unregister_notifier(ds);
+
if (ds->devlink) {
- list_for_each_entry(dp, &ds->dst->ports, list)
- if (dp->ds == ds)
- dsa_port_devlink_teardown(dp);
- devlink_unregister(ds->devlink);
+ dsa_switch_for_each_port(dp, ds)
+ dsa_port_devlink_teardown(dp);
devlink_free(ds->devlink);
ds->devlink = NULL;
}
@@ -933,12 +975,9 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
list_for_each_entry(dp, &dst->ports, list) {
err = dsa_port_setup(dp);
if (err) {
- dsa_port_devlink_teardown(dp);
- dp->type = DSA_PORT_TYPE_UNUSED;
- err = dsa_port_devlink_setup(dp);
+ err = dsa_port_reinit_as_unused(dp);
if (err)
goto teardown;
- continue;
}
}
@@ -1043,6 +1082,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
teardown_master:
dsa_tree_teardown_master(dst);
teardown_switches:
+ dsa_tree_teardown_ports(dst);
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
@@ -1102,7 +1142,7 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
goto out_unlock;
list_for_each_entry(dp, &dst->ports, list) {
- if (!dsa_is_user_port(dp->ds, dp->index))
+ if (!dsa_port_is_user(dp))
continue;
if (dp->slave->flags & IFF_UP)
@@ -1133,8 +1173,8 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *dp;
- list_for_each_entry(dp, &dst->ports, list)
- if (dp->ds == ds && dp->index == index)
+ dsa_switch_for_each_port(dp, ds)
+ if (dp->index == index)
return dp;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
@@ -1319,12 +1359,15 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &reg);
- if (err)
+ if (err) {
+ of_node_put(port);
goto out_put_node;
+ }
if (reg >= ds->num_ports) {
dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
port, reg, ds->num_ports);
+ of_node_put(port);
err = -EINVAL;
goto out_put_node;
}
@@ -1332,8 +1375,10 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
dp = dsa_to_port(ds, reg);
err = dsa_port_parse_of(dp, port);
- if (err)
+ if (err) {
+ of_node_put(port);
goto out_put_node;
+ }
}
out_put_node:
@@ -1475,12 +1520,9 @@ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
static void dsa_switch_release_ports(struct dsa_switch *ds)
{
- struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *dp, *next;
- list_for_each_entry_safe(dp, next, &dst->ports, list) {
- if (dp->ds != ds)
- continue;
+ dsa_switch_for_each_port_safe(dp, next, ds) {
list_del(&dp->list);
kfree(dp);
}
@@ -1557,3 +1599,47 @@ void dsa_unregister_switch(struct dsa_switch *ds)
mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
+
+/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
+ * blocking that operation from completion, due to the dev_hold taken inside
+ * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
+ * the DSA master, so that the system can reboot successfully.
+ */
+void dsa_switch_shutdown(struct dsa_switch *ds)
+{
+ struct net_device *master, *slave_dev;
+ LIST_HEAD(unregister_list);
+ struct dsa_port *dp;
+
+ mutex_lock(&dsa2_mutex);
+ rtnl_lock();
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ master = dp->cpu_dp->master;
+ slave_dev = dp->slave;
+
+ netdev_upper_dev_unlink(master, slave_dev);
+ /* Just unlinking ourselves as uppers of the master is not
+ * sufficient. When the master net device unregisters, that will
+ * also call dev_close, which we will catch as NETDEV_GOING_DOWN
+ * and trigger a dev_close on our own devices (dsa_slave_close).
+ * In turn, that will call dev_mc_unsync on the master's net
+ * device. If the master is also a DSA switch port, this will
+ * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
+ * its own master. Lockdep will complain about the fact that
+ * all cascaded masters have the same dsa_master_addr_list_lock_key,
+ * which it normally would not do if the cascaded masters would
+ * be in a proper upper/lower relationship, which we've just
+ * destroyed.
+ * To suppress the lockdep warnings, let's actually unregister
+ * the DSA slave interfaces too, to avoid the nonsensical
+ * multicast address list synchronization on shutdown.
+ */
+ unregister_netdevice_queue(slave_dev, &unregister_list);
+ }
+ unregister_netdevice_many(&unregister_list);
+
+ rtnl_unlock();
+ mutex_unlock(&dsa2_mutex);
+}
+EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 616330a16d31..bf671306b560 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -515,14 +515,15 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
- int err, i;
+ struct dsa_port *other_dp;
+ int err;
/* VLAN awareness was off, so the question is "can we turn it on".
* We may have had 8021q uppers, those need to go. Make sure we don't
* enter an inconsistent state: deny changing the VLAN awareness state
* as long as we have 8021q uppers.
*/
- if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
+ if (vlan_filtering && dsa_port_is_user(dp)) {
struct net_device *upper_dev, *slave = dp->slave;
struct net_device *br = dp->bridge_dev;
struct list_head *iter;
@@ -557,10 +558,10 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
* different ports of the same switch device and one of them has a
* different setting than what is being requested.
*/
- for (i = 0; i < ds->num_ports; i++) {
+ dsa_switch_for_each_port(other_dp, ds) {
struct net_device *other_bridge;
- other_bridge = dsa_to_port(ds, i)->bridge_dev;
+ other_bridge = other_dp->bridge_dev;
if (!other_bridge)
continue;
/* If it's the same bridge, it also has same
@@ -607,20 +608,16 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
return err;
if (ds->vlan_filtering_is_global) {
- int port;
+ struct dsa_port *other_dp;
ds->vlan_filtering = vlan_filtering;
- for (port = 0; port < ds->num_ports; port++) {
- struct net_device *slave;
-
- if (!dsa_is_user_port(ds, port))
- continue;
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *slave = dp->slave;
/* We might be called in the unbind path, so not
* all slave devices might still be registered.
*/
- slave = dsa_to_port(ds, port)->slave;
if (!slave)
continue;
@@ -1041,7 +1038,7 @@ static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
struct phy_device *phydev = NULL;
struct dsa_switch *ds = dp->ds;
- if (dsa_is_user_port(ds, dp->index))
+ if (dsa_port_is_user(dp))
phydev = dp->slave->phydev;
if (!ds->ops->phylink_mac_link_down) {
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a2bf2d8ac65b..9d9fef668eba 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -174,7 +174,7 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
dev_uc_del(master, dev->dev_addr);
out:
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -789,6 +789,37 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
return -EOPNOTSUPP;
}
+static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_eth_phy_stats)
+ ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
+}
+
+static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_eth_mac_stats)
+ ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
+}
+
+static void
+dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_eth_ctrl_stats)
+ ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
+}
+
static void dsa_slave_net_selftest(struct net_device *ndev,
struct ethtool_test *etest, u64 *buf)
{
@@ -1695,6 +1726,9 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_strings = dsa_slave_get_strings,
.get_ethtool_stats = dsa_slave_get_ethtool_stats,
.get_sset_count = dsa_slave_get_sset_count,
+ .get_eth_phy_stats = dsa_slave_get_eth_phy_stats,
+ .get_eth_mac_stats = dsa_slave_get_eth_mac_stats,
+ .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats,
.set_wol = dsa_slave_set_wol,
.get_wol = dsa_slave_get_wol,
.set_eee = dsa_slave_set_eee,
@@ -1954,7 +1988,7 @@ int dsa_slave_create(struct dsa_port *port)
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
if (!is_zero_ether_addr(port->mac))
- ether_addr_copy(slave_dev->dev_addr, port->mac);
+ eth_hw_addr_set(slave_dev, port->mac);
else
eth_hw_addr_inherit(slave_dev, master);
slave_dev->priv_flags |= IFF_NO_QUEUE;
@@ -2334,7 +2368,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
dst = cpu_dp->ds->dst;
list_for_each_entry(dp, &dst->ports, list) {
- if (!dsa_is_user_port(dp->ds, dp->index))
+ if (!dsa_port_is_user(dp))
continue;
list_add(&dp->slave->close_list, &close_list);
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 1c797ec8e2c2..2b1b21bde830 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -17,14 +17,11 @@
static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
- int i;
-
- for (i = 0; i < ds->num_ports; ++i) {
- struct dsa_port *dp = dsa_to_port(ds, i);
+ struct dsa_port *dp;
+ dsa_switch_for_each_port(dp, ds)
if (dp->ageing_time && dp->ageing_time < ageing_time)
ageing_time = dp->ageing_time;
- }
return ageing_time;
}
@@ -49,10 +46,10 @@ static int dsa_switch_ageing_time(struct dsa_switch *ds,
return 0;
}
-static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_mtu_info *info)
+static bool dsa_port_mtu_match(struct dsa_port *dp,
+ struct dsa_notifier_mtu_info *info)
{
- if (ds->index == info->sw_index && port == info->port)
+ if (dp->ds->index == info->sw_index && dp->index == info->port)
return true;
/* Do not propagate to other switches in the tree if the notifier was
@@ -61,7 +58,7 @@ static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
if (info->targeted_match)
return false;
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
return true;
return false;
@@ -70,14 +67,16 @@ static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
static int dsa_switch_mtu(struct dsa_switch *ds,
struct dsa_notifier_mtu_info *info)
{
- int port, ret;
+ struct dsa_port *dp;
+ int ret;
if (!ds->ops->port_change_mtu)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_mtu_match(ds, port, info)) {
- ret = ds->ops->port_change_mtu(ds, port, info->mtu);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_mtu_match(dp, info)) {
+ ret = ds->ops->port_change_mtu(ds, dp->index,
+ info->mtu);
if (ret)
return ret;
}
@@ -120,7 +119,8 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
struct netlink_ext_ack extack = {0};
bool change_vlan_filtering = false;
bool vlan_filtering;
- int err, port;
+ struct dsa_port *dp;
+ int err;
if (dst->index == info->tree_index && ds->index == info->sw_index &&
ds->ops->port_bridge_leave)
@@ -150,10 +150,10 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
* VLAN-aware bridge.
*/
if (change_vlan_filtering && ds->vlan_filtering_is_global) {
- for (port = 0; port < ds->num_ports; port++) {
+ dsa_switch_for_each_port(dp, ds) {
struct net_device *bridge_dev;
- bridge_dev = dsa_to_port(ds, port)->bridge_dev;
+ bridge_dev = dp->bridge_dev;
if (bridge_dev && br_vlan_enabled(bridge_dev)) {
change_vlan_filtering = false;
@@ -168,7 +168,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
if (extack._msg)
dev_err(ds->dev, "port %d: %s\n", info->port,
extack._msg);
- if (err && err != EOPNOTSUPP)
+ if (err && err != -EOPNOTSUPP)
return err;
}
@@ -179,19 +179,19 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
* DSA links) that sit between the targeted port on which the notifier was
* emitted and its dedicated CPU port.
*/
-static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
- int info_sw_index, int info_port)
+static bool dsa_port_host_address_match(struct dsa_port *dp,
+ int info_sw_index, int info_port)
{
struct dsa_port *targeted_dp, *cpu_dp;
struct dsa_switch *targeted_ds;
- targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
+ targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
targeted_dp = dsa_to_port(targeted_ds, info_port);
cpu_dp = targeted_dp->cpu_dp;
- if (dsa_switch_is_upstream_of(ds, targeted_ds))
- return port == dsa_towards_port(ds, cpu_dp->ds->index,
- cpu_dp->index);
+ if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
+ return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
+ cpu_dp->index);
return false;
}
@@ -209,11 +209,12 @@ static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
return NULL;
}
-static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int dsa_port_do_mdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
+ int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
@@ -244,11 +245,12 @@ static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
return 0;
}
-static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int dsa_port_do_mdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
+ int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
@@ -274,11 +276,12 @@ static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
return 0;
}
-static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
+ int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
@@ -309,11 +312,12 @@ static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
return 0;
}
-static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a;
+ int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
@@ -342,17 +346,16 @@ static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
+ struct dsa_port *dp;
int err = 0;
- int port;
if (!ds->ops->port_fdb_add)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_host_address_match(ds, port, info->sw_index,
- info->port)) {
- err = dsa_switch_do_fdb_add(ds, port, info->addr,
- info->vid);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_address_match(dp, info->sw_index,
+ info->port)) {
+ err = dsa_port_do_fdb_add(dp, info->addr, info->vid);
if (err)
break;
}
@@ -364,17 +367,16 @@ static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
+ struct dsa_port *dp;
int err = 0;
- int port;
if (!ds->ops->port_fdb_del)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_host_address_match(ds, port, info->sw_index,
- info->port)) {
- err = dsa_switch_do_fdb_del(ds, port, info->addr,
- info->vid);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_address_match(dp, info->sw_index,
+ info->port)) {
+ err = dsa_port_do_fdb_del(dp, info->addr, info->vid);
if (err)
break;
}
@@ -387,22 +389,24 @@ static int dsa_switch_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
int port = dsa_towards_port(ds, info->sw_index, info->port);
+ struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_fdb_add)
return -EOPNOTSUPP;
- return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid);
+ return dsa_port_do_fdb_add(dp, info->addr, info->vid);
}
static int dsa_switch_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
int port = dsa_towards_port(ds, info->sw_index, info->port);
+ struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_fdb_del)
return -EOPNOTSUPP;
- return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid);
+ return dsa_port_do_fdb_del(dp, info->addr, info->vid);
}
static int dsa_switch_hsr_join(struct dsa_switch *ds,
@@ -468,37 +472,39 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
int port = dsa_towards_port(ds, info->sw_index, info->port);
+ struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_mdb_add)
return -EOPNOTSUPP;
- return dsa_switch_do_mdb_add(ds, port, info->mdb);
+ return dsa_port_do_mdb_add(dp, info->mdb);
}
static int dsa_switch_mdb_del(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
int port = dsa_towards_port(ds, info->sw_index, info->port);
+ struct dsa_port *dp = dsa_to_port(ds, port);
if (!ds->ops->port_mdb_del)
return -EOPNOTSUPP;
- return dsa_switch_do_mdb_del(ds, port, info->mdb);
+ return dsa_port_do_mdb_del(dp, info->mdb);
}
static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
+ struct dsa_port *dp;
int err = 0;
- int port;
if (!ds->ops->port_mdb_add)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_host_address_match(ds, port, info->sw_index,
- info->port)) {
- err = dsa_switch_do_mdb_add(ds, port, info->mdb);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_address_match(dp, info->sw_index,
+ info->port)) {
+ err = dsa_port_do_mdb_add(dp, info->mdb);
if (err)
break;
}
@@ -510,16 +516,16 @@ static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
struct dsa_notifier_mdb_info *info)
{
+ struct dsa_port *dp;
int err = 0;
- int port;
if (!ds->ops->port_mdb_del)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_host_address_match(ds, port, info->sw_index,
- info->port)) {
- err = dsa_switch_do_mdb_del(ds, port, info->mdb);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_address_match(dp, info->sw_index,
+ info->port)) {
+ err = dsa_port_do_mdb_del(dp, info->mdb);
if (err)
break;
}
@@ -528,13 +534,13 @@ static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
return err;
}
-static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_vlan_info *info)
+static bool dsa_port_vlan_match(struct dsa_port *dp,
+ struct dsa_notifier_vlan_info *info)
{
- if (ds->index == info->sw_index && port == info->port)
+ if (dp->ds->index == info->sw_index && dp->index == info->port)
return true;
- if (dsa_is_dsa_port(ds, port))
+ if (dsa_port_is_dsa(dp))
return true;
return false;
@@ -543,14 +549,15 @@ static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
static int dsa_switch_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
- int port, err;
+ struct dsa_port *dp;
+ int err;
if (!ds->ops->port_vlan_add)
return -EOPNOTSUPP;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_vlan_match(ds, port, info)) {
- err = ds->ops->port_vlan_add(ds, port, info->vlan,
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_vlan_match(dp, info)) {
+ err = ds->ops->port_vlan_add(ds, dp->index, info->vlan,
info->extack);
if (err)
return err;
@@ -579,38 +586,34 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
struct dsa_notifier_tag_proto_info *info)
{
const struct dsa_device_ops *tag_ops = info->tag_ops;
- int port, err;
+ struct dsa_port *dp, *cpu_dp;
+ int err;
if (!ds->ops->change_tag_protocol)
return -EOPNOTSUPP;
ASSERT_RTNL();
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
- err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
+ tag_ops->proto);
if (err)
return err;
- dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops);
+ dsa_port_set_tag_protocol(cpu_dp, tag_ops);
}
/* Now that changing the tag protocol can no longer fail, let's update
* the remaining bits which are "duplicated for faster access", and the
* bits that depend on the tagger, such as the MTU.
*/
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_user_port(ds, port)) {
- struct net_device *slave;
+ dsa_switch_for_each_user_port(dp, ds) {
+ struct net_device *slave = dp->slave;
- slave = dsa_to_port(ds, port)->slave;
- dsa_slave_setup_tagger(slave);
+ dsa_slave_setup_tagger(slave);
- /* rtnl_mutex is held in dsa_tree_change_tag_proto */
- dsa_slave_change_mtu(slave, slave->mtu);
- }
+ /* rtnl_mutex is held in dsa_tree_change_tag_proto */
+ dsa_slave_change_mtu(slave, slave->mtu);
}
return 0;
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index f8f7b7c34e7d..72cac2c0af7b 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -6,7 +6,6 @@
* dsa_8021q_netdev_ops is registered for API compliance and not used
* directly by callers.
*/
-#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/dsa/8021q.h>
@@ -78,22 +77,22 @@ EXPORT_SYMBOL_GPL(dsa_8021q_bridge_tx_fwd_offload_vid);
/* Returns the VID to be inserted into the frame from xmit for switch steering
* instructions on egress. Encodes switch ID and port ID.
*/
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
+u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp)
{
- return DSA_8021Q_DIR_TX | DSA_8021Q_SWITCH_ID(ds->index) |
- DSA_8021Q_PORT(port);
+ return DSA_8021Q_DIR_TX | DSA_8021Q_SWITCH_ID(dp->ds->index) |
+ DSA_8021Q_PORT(dp->index);
}
-EXPORT_SYMBOL_GPL(dsa_8021q_tx_vid);
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_tx_vid);
/* Returns the VID that will be installed as pvid for this switch port, sent as
* tagged egress towards the CPU port and decoded by the rcv function.
*/
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
+u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp)
{
- return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(ds->index) |
- DSA_8021Q_PORT(port);
+ return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(dp->ds->index) |
+ DSA_8021Q_PORT(dp->index);
}
-EXPORT_SYMBOL_GPL(dsa_8021q_rx_vid);
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_rx_vid);
/* Returns the decoded switch ID from the RX VID. */
int dsa_8021q_rx_switch_id(u16 vid)
@@ -139,12 +138,13 @@ dsa_tag_8021q_vlan_find(struct dsa_8021q_context *ctx, int port, u16 vid)
return NULL;
}
-static int dsa_switch_do_tag_8021q_vlan_add(struct dsa_switch *ds, int port,
- u16 vid, u16 flags)
+static int dsa_port_do_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid,
+ u16 flags)
{
- struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx;
+ struct dsa_switch *ds = dp->ds;
struct dsa_tag_8021q_vlan *v;
+ int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
@@ -175,12 +175,12 @@ static int dsa_switch_do_tag_8021q_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int dsa_switch_do_tag_8021q_vlan_del(struct dsa_switch *ds, int port,
- u16 vid)
+static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
{
- struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx;
+ struct dsa_switch *ds = dp->ds;
struct dsa_tag_8021q_vlan *v;
+ int port = dp->index;
int err;
/* No need to bother with refcounting for user ports */
@@ -207,14 +207,16 @@ static int dsa_switch_do_tag_8021q_vlan_del(struct dsa_switch *ds, int port,
}
static bool
-dsa_switch_tag_8021q_vlan_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_tag_8021q_vlan_info *info)
+dsa_port_tag_8021q_vlan_match(struct dsa_port *dp,
+ struct dsa_notifier_tag_8021q_vlan_info *info)
{
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ struct dsa_switch *ds = dp->ds;
+
+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
return true;
if (ds->dst->index == info->tree_index && ds->index == info->sw_index)
- return port == info->port;
+ return dp->index == info->port;
return false;
}
@@ -222,7 +224,8 @@ dsa_switch_tag_8021q_vlan_match(struct dsa_switch *ds, int port,
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info)
{
- int port, err;
+ struct dsa_port *dp;
+ int err;
/* Since we use dsa_broadcast(), there might be other switches in other
* trees which don't support tag_8021q, so don't return an error.
@@ -232,21 +235,20 @@ int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
if (!ds->ops->tag_8021q_vlan_add || !ds->tag_8021q_ctx)
return 0;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_tag_8021q_vlan_match(ds, port, info)) {
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_tag_8021q_vlan_match(dp, info)) {
u16 flags = 0;
- if (dsa_is_user_port(ds, port))
+ if (dsa_port_is_user(dp))
flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (vid_is_dsa_8021q_rxvlan(info->vid) &&
dsa_8021q_rx_switch_id(info->vid) == ds->index &&
- dsa_8021q_rx_source_port(info->vid) == port)
+ dsa_8021q_rx_source_port(info->vid) == dp->index)
flags |= BRIDGE_VLAN_INFO_PVID;
- err = dsa_switch_do_tag_8021q_vlan_add(ds, port,
- info->vid,
- flags);
+ err = dsa_port_do_tag_8021q_vlan_add(dp, info->vid,
+ flags);
if (err)
return err;
}
@@ -258,15 +260,15 @@ int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info)
{
- int port, err;
+ struct dsa_port *dp;
+ int err;
if (!ds->ops->tag_8021q_vlan_del || !ds->tag_8021q_ctx)
return 0;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_switch_tag_8021q_vlan_match(ds, port, info)) {
- err = dsa_switch_do_tag_8021q_vlan_del(ds, port,
- info->vid);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_tag_8021q_vlan_match(dp, info)) {
+ err = dsa_port_do_tag_8021q_vlan_del(dp, info->vid);
if (err)
return err;
}
@@ -322,15 +324,14 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
* +-+-----+-+-----+-+-----+-+-----+-+ +-+-----+-+-----+-+-----+-+-----+-+
* swp0 swp1 swp2 swp3 swp0 swp1 swp2 swp3
*/
-static bool dsa_tag_8021q_bridge_match(struct dsa_switch *ds, int port,
- struct dsa_notifier_bridge_info *info)
+static bool
+dsa_port_tag_8021q_bridge_match(struct dsa_port *dp,
+ struct dsa_notifier_bridge_info *info)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
-
/* Don't match on self */
- if (ds->dst->index == info->tree_index &&
- ds->index == info->sw_index &&
- port == info->port)
+ if (dp->ds->dst->index == info->tree_index &&
+ dp->ds->index == info->sw_index &&
+ dp->index == info->port)
return false;
if (dsa_port_is_user(dp))
@@ -344,21 +345,21 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
{
struct dsa_switch *targeted_ds;
struct dsa_port *targeted_dp;
+ struct dsa_port *dp;
u16 targeted_rx_vid;
- int err, port;
+ int err;
if (!ds->tag_8021q_ctx)
return 0;
targeted_ds = dsa_switch_find(info->tree_index, info->sw_index);
targeted_dp = dsa_to_port(targeted_ds, info->port);
- targeted_rx_vid = dsa_8021q_rx_vid(targeted_ds, info->port);
+ targeted_rx_vid = dsa_tag_8021q_rx_vid(targeted_dp);
- for (port = 0; port < ds->num_ports; port++) {
- struct dsa_port *dp = dsa_to_port(ds, port);
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
+ dsa_switch_for_each_port(dp, ds) {
+ u16 rx_vid = dsa_tag_8021q_rx_vid(dp);
- if (!dsa_tag_8021q_bridge_match(ds, port, info))
+ if (!dsa_port_tag_8021q_bridge_match(dp, info))
continue;
/* Install the RX VID of the targeted port in our VLAN table */
@@ -380,21 +381,20 @@ int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
{
struct dsa_switch *targeted_ds;
struct dsa_port *targeted_dp;
+ struct dsa_port *dp;
u16 targeted_rx_vid;
- int port;
if (!ds->tag_8021q_ctx)
return 0;
targeted_ds = dsa_switch_find(info->tree_index, info->sw_index);
targeted_dp = dsa_to_port(targeted_ds, info->port);
- targeted_rx_vid = dsa_8021q_rx_vid(targeted_ds, info->port);
+ targeted_rx_vid = dsa_tag_8021q_rx_vid(targeted_dp);
- for (port = 0; port < ds->num_ports; port++) {
- struct dsa_port *dp = dsa_to_port(ds, port);
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
+ dsa_switch_for_each_port(dp, ds) {
+ u16 rx_vid = dsa_tag_8021q_rx_vid(dp);
- if (!dsa_tag_8021q_bridge_match(ds, port, info))
+ if (!dsa_port_tag_8021q_bridge_match(dp, info))
continue;
/* Remove the RX VID of the targeted port from our VLAN table */
@@ -433,8 +433,8 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
{
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
- u16 tx_vid = dsa_8021q_tx_vid(ds, port);
+ u16 rx_vid = dsa_tag_8021q_rx_vid(dp);
+ u16 tx_vid = dsa_tag_8021q_tx_vid(dp);
struct net_device *master;
int err;
@@ -478,8 +478,8 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port)
{
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
- u16 tx_vid = dsa_8021q_tx_vid(ds, port);
+ u16 rx_vid = dsa_tag_8021q_rx_vid(dp);
+ u16 tx_vid = dsa_tag_8021q_tx_vid(dp);
struct net_device *master;
/* The CPU port is implicitly configured by
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 77d0ce89ab77..b3da4b2ea11c 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -45,6 +45,7 @@
* 6 6 2 2 4 2 N
*/
+#include <linux/dsa/mv88e6xxx.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -129,12 +130,9 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
u8 tag_dev, tag_port;
enum dsa_cmd cmd;
u8 *dsa_header;
- u16 pvid = 0;
- int err;
if (skb->offload_fwd_mark) {
struct dsa_switch_tree *dst = dp->ds->dst;
- struct net_device *br = dp->bridge_dev;
cmd = DSA_CMD_FORWARD;
@@ -144,19 +142,6 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
*/
tag_dev = dst->last_switch + 1 + dp->bridge_num;
tag_port = 0;
-
- /* If we are offloading forwarding for a VLAN-unaware bridge,
- * inject packets to hardware using the bridge's pvid, since
- * that's where the packets ingressed from.
- */
- if (!br_vlan_enabled(br)) {
- /* Safe because __dev_queue_xmit() runs under
- * rcu_read_lock_bh()
- */
- err = br_vlan_get_pvid_rcu(br, &pvid);
- if (err)
- return NULL;
- }
} else {
cmd = DSA_CMD_FROM_CPU;
tag_dev = dp->ds->index;
@@ -180,16 +165,21 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
dsa_header[2] &= ~0x10;
}
} else {
+ struct net_device *br = dp->bridge_dev;
+ u16 vid;
+
+ vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
+
skb_push(skb, DSA_HLEN + extra);
dsa_alloc_etype_header(skb, DSA_HLEN + extra);
- /* Construct untagged DSA tag. */
+ /* Construct DSA header from untagged frame. */
dsa_header = dsa_etype_header_pos_tx(skb) + extra;
dsa_header[0] = (cmd << 6) | tag_dev;
dsa_header[1] = tag_port << 3;
- dsa_header[2] = pvid >> 8;
- dsa_header[3] = pvid & 0xff;
+ dsa_header[2] = vid >> 8;
+ dsa_header[3] = vid & 0xff;
}
return skb;
@@ -210,7 +200,7 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
cmd = dsa_header[0] >> 6;
switch (cmd) {
case DSA_CMD_FORWARD:
- trunk = !!(dsa_header[1] & 7);
+ trunk = !!(dsa_header[1] & 4);
break;
case DSA_CMD_TO_CPU:
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index fa1d60d13ad9..3509fc967ca9 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -6,7 +6,6 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
-#include <linux/slab.h>
#include <net/dsa.h>
#include "dsa_priv.h"
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index d37ab98e7fe1..cd60b94fc175 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -1,19 +1,55 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
*/
#include <linux/dsa/ocelot.h>
-#include <soc/mscc/ocelot.h>
#include "dsa_priv.h"
+/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
+ * payload and move it into the DSA tag, which will make the switch classify
+ * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
+ * which is the pvid of standalone and VLAN-unaware bridge ports.
+ */
+static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
+ u64 *vlan_tci, u64 *tag_type)
+{
+ struct net_device *br = READ_ONCE(dp->bridge_dev);
+ struct vlan_ethhdr *hdr;
+ u16 proto, tci;
+
+ if (!br || !br_vlan_enabled(br)) {
+ *vlan_tci = 0;
+ *tag_type = IFH_TAG_TYPE_C;
+ return;
+ }
+
+ hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
+ br_vlan_get_proto(br, &proto);
+
+ if (ntohs(hdr->h_vlan_proto) == proto) {
+ __skb_vlan_pop(skb, &tci);
+ *vlan_tci = tci;
+ } else {
+ rcu_read_lock();
+ br_vlan_get_pvid_rcu(br, &tci);
+ rcu_read_unlock();
+ *vlan_tci = tci;
+ }
+
+ *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
+}
+
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
__be32 ifh_prefix, void **ifh)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
struct dsa_switch *ds = dp->ds;
+ u64 vlan_tci, tag_type;
void *injection;
__be32 *prefix;
u32 rew_op = 0;
+ ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
+
injection = skb_push(skb, OCELOT_TAG_LEN);
prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
@@ -22,6 +58,8 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
ocelot_ifh_set_bypass(injection, 1);
ocelot_ifh_set_src(injection, ds->num_ports);
ocelot_ifh_set_qos_class(injection, skb->priority);
+ ocelot_ifh_set_vlan_tci(injection, vlan_tci);
+ ocelot_ifh_set_tag_type(injection, tag_type);
rew_op = ocelot_ptp_rew_op(skb);
if (rew_op)
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index 3038a257ba05..a1919ea5e828 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
*
* An implementation of the software-defined tag_8021q.c tagger format, which
* also preserves full functionality under a vlan_filtering bridge. It does
@@ -9,29 +9,43 @@
* that on egress
*/
#include <linux/dsa/8021q.h>
-#include <soc/mscc/ocelot.h>
-#include <soc/mscc/ocelot_ptp.h>
+#include <linux/dsa/ocelot.h>
#include "dsa_priv.h"
+static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
+ struct sk_buff *skb)
+{
+ struct felix_deferred_xmit_work *xmit_work;
+ struct felix_port *felix_port = dp->priv;
+
+ xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
+ if (!xmit_work)
+ return NULL;
+
+ /* Calls felix_port_deferred_xmit in felix.c */
+ kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn);
+ /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ * won't really free the packet.
+ */
+ xmit_work->dp = dp;
+ xmit_work->skb = skb_get(skb);
+
+ kthread_queue_work(felix_port->xmit_worker, &xmit_work->work);
+
+ return NULL;
+}
+
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
- u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
- struct ocelot *ocelot = dp->ds->priv;
- int port = dp->index;
- u32 rew_op = 0;
+ u16 tx_vid = dsa_tag_8021q_tx_vid(dp);
+ struct ethhdr *hdr = eth_hdr(skb);
- rew_op = ocelot_ptp_rew_op(skb);
- if (rew_op) {
- if (!ocelot_can_inject(ocelot, 0))
- return NULL;
-
- ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
- return NULL;
- }
+ if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest))
+ return ocelot_defer_xmit(dp, skb);
return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
diff --git a/net/dsa/tag_rtl8_4.c b/net/dsa/tag_rtl8_4.c
new file mode 100644
index 000000000000..02686ad4045d
--- /dev/null
+++ b/net/dsa/tag_rtl8_4.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handler for Realtek 8 byte switch tags
+ *
+ * Copyright (C) 2021 Alvin Šipraga <[email protected]>
+ *
+ * NOTE: Currently only supports protocol "4" found in the RTL8365MB, hence
+ * named tag_rtl8_4.
+ *
+ * This tag header has the following format:
+ *
+ * -------------------------------------------
+ * | MAC DA | MAC SA | 8 byte tag | Type | ...
+ * -------------------------------------------
+ * _______________/ \______________________________________
+ * / \
+ * 0 7|8 15
+ * |-----------------------------------+-----------------------------------|---
+ * | (16-bit) | ^
+ * | Realtek EtherType [0x8899] | |
+ * |-----------------------------------+-----------------------------------| 8
+ * | (8-bit) | (8-bit) |
+ * | Protocol [0x04] | REASON | b
+ * |-----------------------------------+-----------------------------------| y
+ * | (1) | (1) | (2) | (1) | (3) | (1) | (1) | (1) | (5) | t
+ * | FID_EN | X | FID | PRI_EN | PRI | KEEP | X | LEARN_DIS | X | e
+ * |-----------------------------------+-----------------------------------| s
+ * | (1) | (15-bit) | |
+ * | ALLOW | TX/RX | v
+ * |-----------------------------------+-----------------------------------|---
+ *
+ * With the following field descriptions:
+ *
+ * field | description
+ * ------------+-------------
+ * Realtek | 0x8899: indicates that this is a proprietary Realtek tag;
+ * EtherType | note that Realtek uses the same EtherType for
+ * | other incompatible tag formats (e.g. tag_rtl4_a.c)
+ * Protocol | 0x04: indicates that this tag conforms to this format
+ * X | reserved
+ * ------------+-------------
+ * REASON | reason for forwarding packet to CPU
+ * | 0: packet was forwarded or flooded to CPU
+ * | 80: packet was trapped to CPU
+ * FID_EN | 1: packet has an FID
+ * | 0: no FID
+ * FID | FID of packet (if FID_EN=1)
+ * PRI_EN | 1: force priority of packet
+ * | 0: don't force priority
+ * PRI | priority of packet (if PRI_EN=1)
+ * KEEP | preserve packet VLAN tag format
+ * LEARN_DIS | don't learn the source MAC address of the packet
+ * ALLOW | 1: treat TX/RX field as an allowance port mask, meaning the
+ * | packet may only be forwarded to ports specified in the
+ * | mask
+ * | 0: no allowance port mask, TX/RX field is the forwarding
+ * | port mask
+ * TX/RX | TX (switch->CPU): port number the packet was received on
+ * | RX (CPU->switch): forwarding port mask (if ALLOW=0)
+ * | allowance port mask (if ALLOW=1)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/etherdevice.h>
+
+#include "dsa_priv.h"
+
+/* Protocols supported:
+ *
+ * 0x04 = RTL8365MB DSA protocol
+ */
+
+#define RTL8_4_TAG_LEN 8
+
+#define RTL8_4_PROTOCOL GENMASK(15, 8)
+#define RTL8_4_PROTOCOL_RTL8365MB 0x04
+#define RTL8_4_REASON GENMASK(7, 0)
+#define RTL8_4_REASON_FORWARD 0
+#define RTL8_4_REASON_TRAP 80
+
+#define RTL8_4_LEARN_DIS BIT(5)
+
+#define RTL8_4_TX GENMASK(3, 0)
+#define RTL8_4_RX GENMASK(10, 0)
+
+static struct sk_buff *rtl8_4_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ __be16 *tag;
+
+ skb_push(skb, RTL8_4_TAG_LEN);
+
+ dsa_alloc_etype_header(skb, RTL8_4_TAG_LEN);
+ tag = dsa_etype_header_pos_tx(skb);
+
+ /* Set Realtek EtherType */
+ tag[0] = htons(ETH_P_REALTEK);
+
+ /* Set Protocol; zero REASON */
+ tag[1] = htons(FIELD_PREP(RTL8_4_PROTOCOL, RTL8_4_PROTOCOL_RTL8365MB));
+
+ /* Zero FID_EN, FID, PRI_EN, PRI, KEEP; set LEARN_DIS */
+ tag[2] = htons(FIELD_PREP(RTL8_4_LEARN_DIS, 1));
+
+ /* Zero ALLOW; set RX (CPU->switch) forwarding port mask */
+ tag[3] = htons(FIELD_PREP(RTL8_4_RX, BIT(dp->index)));
+
+ return skb;
+}
+
+static struct sk_buff *rtl8_4_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ __be16 *tag;
+ u16 etype;
+ u8 reason;
+ u8 proto;
+ u8 port;
+
+ if (unlikely(!pskb_may_pull(skb, RTL8_4_TAG_LEN)))
+ return NULL;
+
+ tag = dsa_etype_header_pos_rx(skb);
+
+ /* Parse Realtek EtherType */
+ etype = ntohs(tag[0]);
+ if (unlikely(etype != ETH_P_REALTEK)) {
+ dev_warn_ratelimited(&dev->dev,
+ "non-realtek ethertype 0x%04x\n", etype);
+ return NULL;
+ }
+
+ /* Parse Protocol */
+ proto = FIELD_GET(RTL8_4_PROTOCOL, ntohs(tag[1]));
+ if (unlikely(proto != RTL8_4_PROTOCOL_RTL8365MB)) {
+ dev_warn_ratelimited(&dev->dev,
+ "unknown realtek protocol 0x%02x\n",
+ proto);
+ return NULL;
+ }
+
+ /* Parse REASON */
+ reason = FIELD_GET(RTL8_4_REASON, ntohs(tag[1]));
+
+ /* Parse TX (switch->CPU) */
+ port = FIELD_GET(RTL8_4_TX, ntohs(tag[3]));
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev) {
+ dev_warn_ratelimited(&dev->dev,
+ "could not find slave for port %d\n",
+ port);
+ return NULL;
+ }
+
+ /* Remove tag and recalculate checksum */
+ skb_pull_rcsum(skb, RTL8_4_TAG_LEN);
+
+ dsa_strip_etype_header(skb, RTL8_4_TAG_LEN);
+
+ if (reason != RTL8_4_REASON_TRAP)
+ dsa_default_offload_fwd_mark(skb);
+
+ return skb;
+}
+
+static const struct dsa_device_ops rtl8_4_netdev_ops = {
+ .name = "rtl8_4",
+ .proto = DSA_TAG_PROTO_RTL8_4,
+ .xmit = rtl8_4_tag_xmit,
+ .rcv = rtl8_4_tag_rcv,
+ .needed_headroom = RTL8_4_TAG_LEN,
+};
+module_dsa_tag_driver(rtl8_4_netdev_ops);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL8_4);
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index c054f48541c8..262c8833a910 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -4,6 +4,7 @@
#include <linux/if_vlan.h>
#include <linux/dsa/sja1105.h>
#include <linux/dsa/8021q.h>
+#include <linux/skbuff.h>
#include <linux/packing.h>
#include "dsa_priv.h"
@@ -53,6 +54,11 @@
#define SJA1110_TX_TRAILER_LEN 4
#define SJA1110_MAX_PADDING_LEN 15
+enum sja1110_meta_tstamp {
+ SJA1110_META_TSTAMP_TX = 0,
+ SJA1110_META_TSTAMP_RX = 1,
+};
+
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
static inline bool sja1105_is_link_local(const struct sk_buff *skb)
{
@@ -152,10 +158,7 @@ static u16 sja1105_xmit_tpid(struct dsa_port *dp)
* we're sure about that). It may not be on this port though, so we
* need to find it.
*/
- list_for_each_entry(other_dp, &ds->dst->ports, list) {
- if (other_dp->ds != ds)
- continue;
-
+ dsa_switch_for_each_port(other_dp, ds) {
if (!other_dp->bridge_dev)
continue;
@@ -232,9 +235,9 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
- u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
+ u16 tx_vid = dsa_tag_8021q_tx_vid(dp);
if (skb->offload_fwd_mark)
return sja1105_imprecise_xmit(skb, netdev);
@@ -260,9 +263,9 @@ static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct dsa_port *dp = dsa_slave_to_port(netdev);
- u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
+ u16 tx_vid = dsa_tag_8021q_tx_vid(dp);
__be32 *tx_trailer;
__be16 *tx_header;
int trailer_pos;
@@ -520,6 +523,43 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
is_meta);
}
+static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
+ u8 ts_id, enum sja1110_meta_tstamp dir,
+ u64 tstamp)
+{
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct skb_shared_hwtstamps shwt = {0};
+ struct sja1105_port *sp = dp->priv;
+
+ if (!dsa_port_is_sja1105(dp))
+ return;
+
+ /* We don't care about RX timestamps on the CPU port */
+ if (dir == SJA1110_META_TSTAMP_RX)
+ return;
+
+ spin_lock(&sp->data->skb_txtstamp_queue.lock);
+
+ skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) {
+ if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ __skb_unlink(skb, &sp->data->skb_txtstamp_queue);
+ skb_match = skb;
+
+ break;
+ }
+
+ spin_unlock(&sp->data->skb_txtstamp_queue.lock);
+
+ if (WARN_ON(!skb_match))
+ return;
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+ skb_complete_tx_timestamp(skb_match, &shwt);
+}
+
static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
{
u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 73fce9467467..c7d9e08107cb 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -51,6 +51,7 @@
#include <linux/if_ether.h>
#include <linux/of_net.h>
#include <linux/pci.h>
+#include <linux/property.h>
#include <net/dst.h>
#include <net/arp.h>
#include <net/sock.h>
@@ -304,7 +305,7 @@ void eth_commit_mac_addr_change(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
}
EXPORT_SYMBOL(eth_commit_mac_addr_change);
@@ -523,6 +524,26 @@ int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr)
EXPORT_SYMBOL(eth_platform_get_mac_address);
/**
+ * platform_get_ethdev_address - Set netdev's MAC address from a given device
+ * @dev: Pointer to the device
+ * @netdev: Pointer to netdev to write the address to
+ *
+ * Wrapper around eth_platform_get_mac_address() which writes the address
+ * directly to netdev->dev_addr.
+ */
+int platform_get_ethdev_address(struct device *dev, struct net_device *netdev)
+{
+ u8 addr[ETH_ALEN] __aligned(2);
+ int ret;
+
+ ret = eth_platform_get_mac_address(dev, addr);
+ if (!ret)
+ eth_hw_addr_set(netdev, addr);
+ return ret;
+}
+EXPORT_SYMBOL(platform_get_ethdev_address);
+
+/**
* nvmem_get_mac_address - Obtain the MAC address from an nvmem cell named
* 'mac-address' associated with given device.
*
@@ -557,4 +578,81 @@ int nvmem_get_mac_address(struct device *dev, void *addrbuf)
return 0;
}
-EXPORT_SYMBOL(nvmem_get_mac_address);
+
+static int fwnode_get_mac_addr(struct fwnode_handle *fwnode,
+ const char *name, char *addr)
+{
+ int ret;
+
+ ret = fwnode_property_read_u8_array(fwnode, name, addr, ETH_ALEN);
+ if (ret)
+ return ret;
+
+ if (!is_valid_ether_addr(addr))
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * fwnode_get_mac_address - Get the MAC from the firmware node
+ * @fwnode: Pointer to the firmware node
+ * @addr: Address of buffer to store the MAC in
+ *
+ * Search the firmware node for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the firmware tables, but were not updated by the firmware. For
+ * example, the DTS could define 'mac-address' and 'local-mac-address', with
+ * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'.
+ * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
+ * exists but is all zeros.
+ */
+int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr)
+{
+ if (!fwnode_get_mac_addr(fwnode, "mac-address", addr) ||
+ !fwnode_get_mac_addr(fwnode, "local-mac-address", addr) ||
+ !fwnode_get_mac_addr(fwnode, "address", addr))
+ return 0;
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(fwnode_get_mac_address);
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev: Pointer to the device
+ * @addr: Address of buffer to store the MAC in
+ */
+int device_get_mac_address(struct device *dev, char *addr)
+{
+ return fwnode_get_mac_address(dev_fwnode(dev), addr);
+}
+EXPORT_SYMBOL(device_get_mac_address);
+
+/**
+ * device_get_ethdev_address - Set netdev's MAC address from a given device
+ * @dev: Pointer to the device
+ * @netdev: Pointer to netdev to write the address to
+ *
+ * Wrapper around device_get_mac_address() which writes the address
+ * directly to netdev->dev_addr.
+ */
+int device_get_ethdev_address(struct device *dev, struct net_device *netdev)
+{
+ u8 addr[ETH_ALEN];
+ int ret;
+
+ ret = device_get_mac_address(dev, addr);
+ if (!ret)
+ eth_hw_addr_set(netdev, addr);
+ return ret;
+}
+EXPORT_SYMBOL(device_get_ethdev_address);
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index 0a19470efbfb..b76432e70e6b 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o
ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \
linkstate.o debug.o wol.o features.o privflags.o rings.o \
channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
- tunnels.o fec.o eeprom.o stats.o phc_vclocks.o
+ tunnels.o fec.o eeprom.o stats.o phc_vclocks.o module.o
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 999e2a6bed13..bf6e8c2f9bf7 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -89,7 +89,8 @@ static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
return -EFAULT;
useraddr += sizeof(cmd);
- if (copy_to_user(useraddr, features, copy_size * sizeof(*features)))
+ if (copy_to_user(useraddr, features,
+ array_size(copy_size, sizeof(*features))))
return -EFAULT;
return 0;
@@ -799,7 +800,7 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
goto out;
useraddr += offsetof(struct ethtool_sset_info, data);
- if (copy_to_user(useraddr, info_buf, idx * sizeof(u32)))
+ if (copy_to_user(useraddr, info_buf, array_size(idx, sizeof(u32))))
goto out;
ret = 0;
@@ -1022,7 +1023,7 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
{
int i;
- if (copy_from_user(indir, useraddr, size * sizeof(indir[0])))
+ if (copy_from_user(indir, useraddr, array_size(size, sizeof(indir[0]))))
return -EFAULT;
/* Validate ring indices */
@@ -1895,7 +1896,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
if (copy_to_user(useraddr, &test, sizeof(test)))
goto out;
useraddr += sizeof(test);
- if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
+ if (copy_to_user(useraddr, data, array_size(test.len, sizeof(u64))))
goto out;
ret = 0;
@@ -1937,7 +1938,8 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
goto out;
useraddr += sizeof(gstrings);
if (gstrings.len &&
- copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ copy_to_user(useraddr, data,
+ array_size(gstrings.len, ETH_GSTRING_LEN)))
goto out;
ret = 0;
diff --git a/net/ethtool/module.c b/net/ethtool/module.c
new file mode 100644
index 000000000000..bc2cef11bbda
--- /dev/null
+++ b/net/ethtool/module.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/ethtool.h>
+
+#include "netlink.h"
+#include "common.h"
+#include "bitset.h"
+
+struct module_req_info {
+ struct ethnl_req_info base;
+};
+
+struct module_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_module_power_mode_params power;
+};
+
+#define MODULE_REPDATA(__reply_base) \
+ container_of(__reply_base, struct module_reply_data, base)
+
+/* MODULE_GET */
+
+const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER + 1] = {
+ [ETHTOOL_A_MODULE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+static int module_get_power_mode(struct net_device *dev,
+ struct module_reply_data *data,
+ struct netlink_ext_ack *extack)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->get_module_power_mode)
+ return 0;
+
+ return ops->get_module_power_mode(dev, &data->power, extack);
+}
+
+static int module_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct module_reply_data *data = MODULE_REPDATA(reply_base);
+ struct netlink_ext_ack *extack = info ? info->extack : NULL;
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = module_get_power_mode(dev, data, extack);
+ if (ret < 0)
+ goto out_complete;
+
+out_complete:
+ ethnl_ops_complete(dev);
+ return ret;
+}
+
+static int module_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ struct module_reply_data *data = MODULE_REPDATA(reply_base);
+ int len = 0;
+
+ if (data->power.policy)
+ len += nla_total_size(sizeof(u8)); /* _MODULE_POWER_MODE_POLICY */
+
+ if (data->power.mode)
+ len += nla_total_size(sizeof(u8)); /* _MODULE_POWER_MODE */
+
+ return len;
+}
+
+static int module_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct module_reply_data *data = MODULE_REPDATA(reply_base);
+
+ if (data->power.policy &&
+ nla_put_u8(skb, ETHTOOL_A_MODULE_POWER_MODE_POLICY,
+ data->power.policy))
+ return -EMSGSIZE;
+
+ if (data->power.mode &&
+ nla_put_u8(skb, ETHTOOL_A_MODULE_POWER_MODE, data->power.mode))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_module_request_ops = {
+ .request_cmd = ETHTOOL_MSG_MODULE_GET,
+ .reply_cmd = ETHTOOL_MSG_MODULE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_MODULE_HEADER,
+ .req_info_size = sizeof(struct module_req_info),
+ .reply_data_size = sizeof(struct module_reply_data),
+
+ .prepare_data = module_prepare_data,
+ .reply_size = module_reply_size,
+ .fill_reply = module_fill_reply,
+};
+
+/* MODULE_SET */
+
+const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1] = {
+ [ETHTOOL_A_MODULE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_MODULE_POWER_MODE_POLICY] =
+ NLA_POLICY_RANGE(NLA_U8, ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH,
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO),
+};
+
+static int module_set_power_mode(struct net_device *dev, struct nlattr **tb,
+ bool *p_mod, struct netlink_ext_ack *extack)
+{
+ struct ethtool_module_power_mode_params power = {};
+ struct ethtool_module_power_mode_params power_new;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ int ret;
+
+ if (!tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY])
+ return 0;
+
+ if (!ops->get_module_power_mode || !ops->set_module_power_mode) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY],
+ "Setting power mode policy is not supported by this device");
+ return -EOPNOTSUPP;
+ }
+
+ power_new.policy = nla_get_u8(tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]);
+ ret = ops->get_module_power_mode(dev, &power, extack);
+ if (ret < 0)
+ return ret;
+
+ if (power_new.policy == power.policy)
+ return 0;
+ *p_mod = true;
+
+ return ops->set_module_power_mode(dev, &power_new, extack);
+}
+
+int ethnl_set_module(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
+ struct net_device *dev;
+ bool mod = false;
+ int ret;
+
+ ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_MODULE_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+
+ ret = module_set_power_mode(dev, tb, &mod, info->extack);
+ if (ret < 0)
+ goto out_ops;
+
+ if (!mod)
+ goto out_ops;
+
+ ethtool_notify(dev, ETHTOOL_MSG_MODULE_NTF, NULL);
+
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 1797a0a90019..38b44c0291b1 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -282,6 +282,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_MODULE_EEPROM_GET] = &ethnl_module_eeprom_request_ops,
[ETHTOOL_MSG_STATS_GET] = &ethnl_stats_request_ops,
[ETHTOOL_MSG_PHC_VCLOCKS_GET] = &ethnl_phc_vclocks_request_ops,
+ [ETHTOOL_MSG_MODULE_GET] = &ethnl_module_request_ops,
};
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -593,6 +594,7 @@ ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = {
[ETHTOOL_MSG_PAUSE_NTF] = &ethnl_pause_request_ops,
[ETHTOOL_MSG_EEE_NTF] = &ethnl_eee_request_ops,
[ETHTOOL_MSG_FEC_NTF] = &ethnl_fec_request_ops,
+ [ETHTOOL_MSG_MODULE_NTF] = &ethnl_module_request_ops,
};
/* default notification handler */
@@ -686,6 +688,7 @@ static const ethnl_notify_handler_t ethnl_notify_handlers[] = {
[ETHTOOL_MSG_PAUSE_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_EEE_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_FEC_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_MODULE_NTF] = ethnl_default_notify,
};
void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
@@ -999,6 +1002,22 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_phc_vclocks_get_policy,
.maxattr = ARRAY_SIZE(ethnl_phc_vclocks_get_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_MODULE_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ .policy = ethnl_module_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_module_get_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_MODULE_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_module,
+ .policy = ethnl_module_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_module_set_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index e8987e28036f..836ee7157848 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -337,6 +337,7 @@ extern const struct ethnl_request_ops ethnl_fec_request_ops;
extern const struct ethnl_request_ops ethnl_module_eeprom_request_ops;
extern const struct ethnl_request_ops ethnl_stats_request_ops;
extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops;
+extern const struct ethnl_request_ops ethnl_module_request_ops;
extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1];
extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1];
@@ -373,6 +374,8 @@ extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1];
extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1];
extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1];
extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1];
+extern const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER + 1];
+extern const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1];
int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info);
@@ -391,6 +394,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info);
int ethnl_tunnel_info_start(struct netlink_callback *cb);
int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_set_fec(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_module(struct sk_buff *skb, struct genl_info *info);
extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN];
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 26c32407f029..e00fbb16391f 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -493,7 +493,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
INIT_LIST_HEAD(&hsr->self_node_db);
spin_lock_init(&hsr->list_lock);
- ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
+ eth_hw_addr_set(hsr_dev, slave[0]->dev_addr);
/* initialize protocol specific functions */
if (protocol_version == PRP_V1) {
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index f7e284f23b1f..b099c3150150 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -75,7 +75,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
if (port->type == HSR_PT_SLAVE_A) {
- ether_addr_copy(master->dev->dev_addr, dev->dev_addr);
+ eth_hw_addr_set(master->dev, dev->dev_addr);
call_netdevice_notifiers(NETDEV_CHANGEADDR,
master->dev);
}
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 3297e7fa9945..2cf62718a282 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -157,7 +157,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
lowpan_802154_dev(ldev)->wdev = wdev;
/* Set the lowpan hardware address to the wpan hardware address. */
- memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN);
+ __dev_addr_set(ldev, wdev->dev_addr, IEEE802154_ADDR_LEN);
/* We need headroom for possible wpan_dev_hard_header call and tailroom
* for encryption/fcs handling. The lowpan interface will replace
* the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1d816a5fd3eb..8eb428387bac 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -133,13 +133,9 @@ void inet_sock_destruct(struct sock *sk)
struct inet_sock *inet = inet_sk(sk);
__skb_queue_purge(&sk->sk_receive_queue);
- if (sk->sk_rx_skb_cache) {
- __kfree_skb(sk->sk_rx_skb_cache);
- sk->sk_rx_skb_cache = NULL;
- }
__skb_queue_purge(&sk->sk_error_queue);
- sk_mem_reclaim(sk);
+ sk_mem_reclaim_final(sk);
if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
pr_err("Attempt to release TCP socket in state %d %p\n",
@@ -1666,12 +1662,6 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
}
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
-u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
-{
- return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
-}
-EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
-
unsigned long snmp_fold_field(void __percpu *mib, int offt)
{
unsigned long res = 0;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 4a8550c49202..48f337ccf949 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -9,7 +9,6 @@
#include <linux/types.h>
#include <linux/module.h>
-#include <linux/ip.h>
#include <linux/in.h>
#include <net/ip.h>
#include <net/sock.h>
diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c
index 0c28bd469a68..0e23ade74493 100644
--- a/net/ipv4/fib_notifier.c
+++ b/net/ipv4/fib_notifier.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/fib_notifier.h>
-#include <net/netns/ipv4.h>
#include <net/ip_fib.h>
int call_fib4_notifier(struct notifier_block *nb,
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index b42c429cebbe..3364cb9c67e0 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1661,7 +1661,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
- int nh_weight, u8 rt_family)
+ int nh_weight, u8 rt_family, u32 nh_tclassid)
{
const struct net_device *dev = nhc->nhc_dev;
struct rtnexthop *rtnh;
@@ -1679,6 +1679,9 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
rtnh->rtnh_flags = flags;
+ if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid))
+ goto nla_put_failure;
+
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
@@ -1706,14 +1709,13 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
}
for_nexthops(fi) {
- if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
- AF_INET) < 0)
- goto nla_put_failure;
+ u32 nh_tclassid = 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (nh->nh_tclassid &&
- nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
- goto nla_put_failure;
+ nh_tclassid = nh->nh_tclassid;
#endif
+ if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
+ AF_INET, nh_tclassid) < 0)
+ goto nla_put_failure;
} endfor_nexthops(fi);
mp_end:
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 8b30cadff708..b7e277d8a84d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1054,14 +1054,19 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr), &_iio);
if (!ext_hdr || !iio)
goto send_mal_query;
- if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr))
+ if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr) ||
+ ntohs(iio->extobj_hdr.length) > sizeof(_iio))
goto send_mal_query;
ident_len = ntohs(iio->extobj_hdr.length) - sizeof(iio->extobj_hdr);
+ iio = skb_header_pointer(skb, sizeof(_ext_hdr),
+ sizeof(iio->extobj_hdr) + ident_len, &_iio);
+ if (!iio)
+ goto send_mal_query;
+
status = 0;
dev = NULL;
switch (iio->extobj_hdr.class_type) {
case ICMP_EXT_ECHO_CTYPE_NAME:
- iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio);
if (ident_len >= IFNAMSIZ)
goto send_mal_query;
memset(buff, 0, sizeof(buff));
@@ -1069,30 +1074,24 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
dev = dev_get_by_name(net, buff);
break;
case ICMP_EXT_ECHO_CTYPE_INDEX:
- iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) +
- sizeof(iio->ident.ifindex), &_iio);
if (ident_len != sizeof(iio->ident.ifindex))
goto send_mal_query;
dev = dev_get_by_index(net, ntohl(iio->ident.ifindex));
break;
case ICMP_EXT_ECHO_CTYPE_ADDR:
- if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
+ if (ident_len < sizeof(iio->ident.addr.ctype3_hdr) ||
+ ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
iio->ident.addr.ctype3_hdr.addrlen)
goto send_mal_query;
switch (ntohs(iio->ident.addr.ctype3_hdr.afi)) {
case ICMP_AFI_IP:
- iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) +
- sizeof(struct in_addr), &_iio);
- if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
- sizeof(struct in_addr))
+ if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in_addr))
goto send_mal_query;
dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case ICMP_AFI_IP6:
- iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio);
- if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
- sizeof(struct in6_addr))
+ if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in6_addr))
goto send_mal_query;
dev = ipv6_stub->ipv6_dev_find(net, &iio->ident.addr.ip_addr.ipv6_addr, dev);
dev_hold(dev);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f25d02ad4a8a..f7fea3a7c5e6 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1015,7 +1015,7 @@ void inet_csk_destroy_sock(struct sock *sk)
sk_refcnt_debug_release(sk);
- percpu_counter_dec(sk->sk_prot->orphan_count);
+ this_cpu_dec(*sk->sk_prot->orphan_count);
sock_put(sk);
}
@@ -1074,7 +1074,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
sock_orphan(child);
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(*sk->sk_prot->orphan_count);
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 80aeaf9e6e16..75737267746f 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -242,8 +242,10 @@ static inline int compute_score(struct sock *sk, struct net *net,
if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
return -1;
+ score = sk->sk_bound_dev_if ? 2 : 1;
- score = sk->sk_family == PF_INET ? 2 : 1;
+ if (sk->sk_family == PF_INET)
+ score++;
if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
score++;
}
@@ -596,7 +598,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
if (ok) {
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
} else {
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(*sk->sk_prot->orphan_count);
inet_sk_set_state(sk, TCP_CLOSE);
sock_set_flag(sk, SOCK_DEAD);
inet_csk_destroy_sock(sk);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 0fe6c936dc54..2ac2b95c5694 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -986,7 +986,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
__gre_tunnel_init(dev);
- memcpy(dev->dev_addr, &iph->saddr, 4);
+ __dev_addr_set(dev, &iph->saddr, 4);
memcpy(dev->broadcast, &iph->daddr, 4);
dev->flags = IFF_NOARP;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index fe9101d3d69e..5a473319d3a5 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -834,7 +834,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
t->parms.i_key = p->i_key;
t->parms.o_key = p->o_key;
if (dev->type != ARPHRD_ETHER) {
- memcpy(dev->dev_addr, &p->iph.saddr, 4);
+ __dev_addr_set(dev, &p->iph.saddr, 4);
memcpy(dev->broadcast, &p->iph.daddr, 4);
}
ip_tunnel_add(itn, t);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index efe25a0172e6..8c2bd1d9ddce 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -425,7 +425,7 @@ static int vti_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
- memcpy(dev->dev_addr, &iph->saddr, 4);
+ __dev_addr_set(dev, &iph->saddr, 4);
memcpy(dev->broadcast, &iph->daddr, 4);
dev->flags = IFF_NOARP;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 3aa78ccbec3e..123ea63a04cb 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -380,7 +380,7 @@ static int ipip_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
+ __dev_addr_set(dev, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
tunnel->tun_hlen = 0;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index c53f14b94356..ffc0cab7cf18 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -179,10 +179,11 @@ struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
return (void *)entry + entry->next_offset;
}
-unsigned int arpt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table)
+unsigned int arpt_do_table(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
{
+ const struct xt_table *table = priv;
unsigned int hook = state->hook;
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
unsigned int verdict = NF_DROP;
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 3de78416ec76..78cd5ee24448 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -26,14 +26,6 @@ static const struct xt_table packet_filter = {
.priority = NF_IP_PRI_FILTER,
};
-/* The work comes in here from netfilter.c */
-static unsigned int
-arptable_filter_hook(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return arpt_do_table(skb, state, priv);
-}
-
static struct nf_hook_ops *arpfilter_ops __read_mostly;
static int arptable_filter_table_init(struct net *net)
@@ -72,7 +64,7 @@ static int __init arptable_filter_init(void)
if (ret < 0)
return ret;
- arpfilter_ops = xt_hook_ops_alloc(&packet_filter, arptable_filter_hook);
+ arpfilter_ops = xt_hook_ops_alloc(&packet_filter, arpt_do_table);
if (IS_ERR(arpfilter_ops)) {
xt_unregister_template(&packet_filter);
return PTR_ERR(arpfilter_ops);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 13acb687c19a..2ed7c58b471a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -222,10 +222,11 @@ struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
unsigned int
-ipt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table)
+ipt_do_table(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
{
+ const struct xt_table *table = priv;
unsigned int hook = state->hook;
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
const struct iphdr *ip;
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 0eb0e2ab9bfc..b9062f4552ac 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -28,13 +28,6 @@ static const struct xt_table packet_filter = {
.priority = NF_IP_PRI_FILTER,
};
-static unsigned int
-iptable_filter_hook(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ipt_do_table(skb, state, priv);
-}
-
static struct nf_hook_ops *filter_ops __read_mostly;
/* Default to forward because I got too much mail already. */
@@ -90,7 +83,7 @@ static int __init iptable_filter_init(void)
if (ret < 0)
return ret;
- filter_ops = xt_hook_ops_alloc(&packet_filter, iptable_filter_hook);
+ filter_ops = xt_hook_ops_alloc(&packet_filter, ipt_do_table);
if (IS_ERR(filter_ops)) {
xt_unregister_template(&packet_filter);
return PTR_ERR(filter_ops);
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 40417a3f930b..3abb430af9e6 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -34,7 +34,7 @@ static const struct xt_table packet_mangler = {
};
static unsigned int
-ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state, void *priv)
+ipt_mangle_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state)
{
unsigned int ret;
const struct iphdr *iph;
@@ -50,7 +50,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state, void *pri
daddr = iph->daddr;
tos = iph->tos;
- ret = ipt_do_table(skb, state, priv);
+ ret = ipt_do_table(priv, skb, state);
/* Reroute for ANY change. */
if (ret != NF_DROP && ret != NF_STOLEN) {
iph = ip_hdr(skb);
@@ -75,8 +75,8 @@ iptable_mangle_hook(void *priv,
const struct nf_hook_state *state)
{
if (state->hook == NF_INET_LOCAL_OUT)
- return ipt_mangle_out(skb, state, priv);
- return ipt_do_table(skb, state, priv);
+ return ipt_mangle_out(priv, skb, state);
+ return ipt_do_table(priv, skb, state);
}
static struct nf_hook_ops *mangle_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 45d7e072e6a5..56f6ecc43451 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -29,34 +29,27 @@ static const struct xt_table nf_nat_ipv4_table = {
.af = NFPROTO_IPV4,
};
-static unsigned int iptable_nat_do_chain(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ipt_do_table(skb, state, priv);
-}
-
static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
{
- .hook = iptable_nat_do_chain,
+ .hook = ipt_do_table,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_NAT_DST,
},
{
- .hook = iptable_nat_do_chain,
+ .hook = ipt_do_table,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_NAT_SRC,
},
{
- .hook = iptable_nat_do_chain,
+ .hook = ipt_do_table,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST,
},
{
- .hook = iptable_nat_do_chain,
+ .hook = ipt_do_table,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC,
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index b88e0f36cd05..ca5e5b21587c 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -32,17 +32,9 @@ static const struct xt_table packet_raw_before_defrag = {
.priority = NF_IP_PRI_RAW_BEFORE_DEFRAG,
};
-/* The work comes in here from netfilter.c. */
-static unsigned int
-iptable_raw_hook(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ipt_do_table(skb, state, priv);
-}
-
static struct nf_hook_ops *rawtable_ops __read_mostly;
-static int __net_init iptable_raw_table_init(struct net *net)
+static int iptable_raw_table_init(struct net *net)
{
struct ipt_replace *repl;
const struct xt_table *table = &packet_raw;
@@ -90,7 +82,7 @@ static int __init iptable_raw_init(void)
if (ret < 0)
return ret;
- rawtable_ops = xt_hook_ops_alloc(table, iptable_raw_hook);
+ rawtable_ops = xt_hook_ops_alloc(table, ipt_do_table);
if (IS_ERR(rawtable_ops)) {
xt_unregister_template(table);
return PTR_ERR(rawtable_ops);
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index f519162a2fa5..d885443cb267 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -33,13 +33,6 @@ static const struct xt_table security_table = {
.priority = NF_IP_PRI_SECURITY,
};
-static unsigned int
-iptable_security_hook(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ipt_do_table(skb, state, priv);
-}
-
static struct nf_hook_ops *sectbl_ops __read_mostly;
static int iptable_security_table_init(struct net *net)
@@ -78,7 +71,7 @@ static int __init iptable_security_init(void)
if (ret < 0)
return ret;
- sectbl_ops = xt_hook_ops_alloc(&security_table, iptable_security_hook);
+ sectbl_ops = xt_hook_ops_alloc(&security_table, ipt_do_table);
if (IS_ERR(sectbl_ops)) {
xt_unregister_template(&security_table);
return PTR_ERR(sectbl_ops);
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 613432a36f0a..e61ea428ea18 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -20,13 +20,8 @@
#endif
#include <net/netfilter/nf_conntrack_zones.h>
-static unsigned int defrag4_pernet_id __read_mostly;
static DEFINE_MUTEX(defrag4_mutex);
-struct defrag4_pernet {
- unsigned int users;
-};
-
static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
u_int32_t user)
{
@@ -111,19 +106,15 @@ static const struct nf_hook_ops ipv4_defrag_ops[] = {
static void __net_exit defrag4_net_exit(struct net *net)
{
- struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id);
-
- if (nf_defrag->users) {
+ if (net->nf.defrag_ipv4_users) {
nf_unregister_net_hooks(net, ipv4_defrag_ops,
ARRAY_SIZE(ipv4_defrag_ops));
- nf_defrag->users = 0;
+ net->nf.defrag_ipv4_users = 0;
}
}
static struct pernet_operations defrag4_net_ops = {
.exit = defrag4_net_exit,
- .id = &defrag4_pernet_id,
- .size = sizeof(struct defrag4_pernet),
};
static int __init nf_defrag_init(void)
@@ -138,24 +129,23 @@ static void __exit nf_defrag_fini(void)
int nf_defrag_ipv4_enable(struct net *net)
{
- struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id);
int err = 0;
mutex_lock(&defrag4_mutex);
- if (nf_defrag->users == UINT_MAX) {
+ if (net->nf.defrag_ipv4_users == UINT_MAX) {
err = -EOVERFLOW;
goto out_unlock;
}
- if (nf_defrag->users) {
- nf_defrag->users++;
+ if (net->nf.defrag_ipv4_users) {
+ net->nf.defrag_ipv4_users++;
goto out_unlock;
}
err = nf_register_net_hooks(net, ipv4_defrag_ops,
ARRAY_SIZE(ipv4_defrag_ops));
if (err == 0)
- nf_defrag->users = 1;
+ net->nf.defrag_ipv4_users = 1;
out_unlock:
mutex_unlock(&defrag4_mutex);
@@ -165,12 +155,10 @@ EXPORT_SYMBOL_GPL(nf_defrag_ipv4_enable);
void nf_defrag_ipv4_disable(struct net *net)
{
- struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id);
-
mutex_lock(&defrag4_mutex);
- if (nf_defrag->users) {
- nf_defrag->users--;
- if (nf_defrag->users == 0)
+ if (net->nf.defrag_ipv4_users) {
+ net->nf.defrag_ipv4_users--;
+ if (net->nf.defrag_ipv4_users == 0)
nf_unregister_net_hooks(net, ipv4_defrag_ops,
ARRAY_SIZE(ipv4_defrag_ops));
}
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 75ca4b6e484f..9e8100728d46 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1982,6 +1982,8 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
rcu_assign_pointer(old->nh_grp, newg);
if (newg->resilient) {
+ /* Make sure concurrent readers are not using 'oldg' anymore. */
+ synchronize_net();
rcu_assign_pointer(oldg->res_table, tmp_table);
rcu_assign_pointer(oldg->spare->res_table, tmp_table);
}
@@ -3565,6 +3567,7 @@ static struct notifier_block nh_netdev_notifier = {
};
static int nexthops_dump(struct net *net, struct notifier_block *nb,
+ enum nexthop_event_type event_type,
struct netlink_ext_ack *extack)
{
struct rb_root *root = &net->nexthop.rb_root;
@@ -3575,8 +3578,7 @@ static int nexthops_dump(struct net *net, struct notifier_block *nb,
struct nexthop *nh;
nh = rb_entry(node, struct nexthop, rb_node);
- err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
- extack);
+ err = call_nexthop_notifier(nb, net, event_type, nh, extack);
if (err)
break;
}
@@ -3590,7 +3592,7 @@ int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
int err;
rtnl_lock();
- err = nexthops_dump(net, nb, extack);
+ err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
if (err)
goto unlock;
err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
@@ -3603,8 +3605,17 @@ EXPORT_SYMBOL(register_nexthop_notifier);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
- return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
- nb);
+ int err;
+
+ rtnl_lock();
+ err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
+ nb);
+ if (err)
+ goto unlock;
+ nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
+unlock:
+ rtnl_unlock();
+ return err;
}
EXPORT_SYMBOL(unregister_nexthop_notifier);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index b0d3a09dc84e..f30273afb539 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -53,7 +53,7 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
struct net *net = seq->private;
int orphans, sockets;
- orphans = percpu_counter_sum_positive(&tcp_orphan_count);
+ orphans = tcp_orphan_count_sum();
sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
socket_seq_show(seq);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4680268f2e59..97eb54774924 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -585,18 +585,6 @@ static struct ctl_table ipv4_table[] = {
.extra1 = &sysctl_fib_sync_mem_min,
.extra2 = &sysctl_fib_sync_mem_max,
},
- {
- .procname = "tcp_rx_skb_cache",
- .data = &tcp_rx_skb_cache_key.key,
- .mode = 0644,
- .proc_handler = proc_do_static_key,
- },
- {
- .procname = "tcp_tx_skb_cache",
- .data = &tcp_tx_skb_cache_key.key,
- .mode = 0644,
- .proc_handler = proc_do_static_key,
- },
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e8b48df73c85..c2d9830136d2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -287,8 +287,8 @@ enum {
TCP_CMSG_TS = 2
};
-struct percpu_counter tcp_orphan_count;
-EXPORT_SYMBOL_GPL(tcp_orphan_count);
+DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
+EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);
@@ -325,11 +325,6 @@ struct tcp_splice_state {
unsigned long tcp_memory_pressure __read_mostly;
EXPORT_SYMBOL_GPL(tcp_memory_pressure);
-DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
-EXPORT_SYMBOL(tcp_rx_skb_cache_key);
-
-DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
-
void tcp_enter_memory_pressure(struct sock *sk)
{
unsigned long val;
@@ -647,7 +642,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
EXPORT_SYMBOL(tcp_ioctl);
-static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
+void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
@@ -658,7 +653,7 @@ static inline bool forced_push(const struct tcp_sock *tp)
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
-static void skb_entail(struct sock *sk, struct sk_buff *skb)
+void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
@@ -866,18 +861,6 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
{
struct sk_buff *skb;
- if (likely(!size)) {
- skb = sk->sk_tx_skb_cache;
- if (skb) {
- skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
- sk->sk_tx_skb_cache = NULL;
- pskb_trim(skb, 0);
- INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
- skb_shinfo(skb)->tx_flags = 0;
- memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb));
- return skb;
- }
- }
/* The TCP header must be at least 32-bit aligned. */
size = ALIGN(size, 4);
@@ -963,8 +946,8 @@ void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
}
}
-struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
- struct page *page, int offset, size_t *size)
+static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
+ struct page *page, int offset, size_t *size)
{
struct sk_buff *skb = tcp_write_queue_tail(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -985,7 +968,7 @@ new_segment:
#ifdef CONFIG_TLS_DEVICE
skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
#endif
- skb_entail(sk, skb);
+ tcp_skb_entail(sk, skb);
copy = size_goal;
}
@@ -1314,7 +1297,7 @@ new_segment:
process_backlog++;
skb->ip_summed = CHECKSUM_PARTIAL;
- skb_entail(sk, skb);
+ tcp_skb_entail(sk, skb);
copy = size_goal;
/* All packets are restored as if they have
@@ -2690,11 +2673,36 @@ void tcp_shutdown(struct sock *sk, int how)
}
EXPORT_SYMBOL(tcp_shutdown);
+int tcp_orphan_count_sum(void)
+{
+ int i, total = 0;
+
+ for_each_possible_cpu(i)
+ total += per_cpu(tcp_orphan_count, i);
+
+ return max(total, 0);
+}
+
+static int tcp_orphan_cache;
+static struct timer_list tcp_orphan_timer;
+#define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
+
+static void tcp_orphan_update(struct timer_list *unused)
+{
+ WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum());
+ mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
+}
+
+static bool tcp_too_many_orphans(int shift)
+{
+ return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans;
+}
+
bool tcp_check_oom(struct sock *sk, int shift)
{
bool too_many_orphans, out_of_socket_memory;
- too_many_orphans = tcp_too_many_orphans(sk, shift);
+ too_many_orphans = tcp_too_many_orphans(shift);
out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans)
@@ -2803,7 +2811,7 @@ adjudge_to_death:
/* remove backlog if any, without releasing ownership. */
__release_sock(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(tcp_orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@ -2920,11 +2928,6 @@ void tcp_write_queue_purge(struct sock *sk)
sk_wmem_free_skb(sk, skb);
}
tcp_rtx_queue_purge(sk);
- skb = sk->sk_tx_skb_cache;
- if (skb) {
- __kfree_skb(skb);
- sk->sk_tx_skb_cache = NULL;
- }
INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
@@ -2961,10 +2964,6 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
- if (sk->sk_rx_skb_cache) {
- __kfree_skb(sk->sk_rx_skb_cache);
- sk->sk_rx_skb_cache = NULL;
- }
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->urg_data = 0;
tcp_write_queue_purge(sk);
@@ -4505,7 +4504,10 @@ void __init tcp_init(void)
sizeof_field(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
- percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
+
+ timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE);
+ mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
+
inet_hashinfo_init(&tcp_hashinfo);
inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
thash_entries, 21, /* one slot per 2 MB*/
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 141e85e6422b..246ab7b5e857 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -500,8 +500,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
+ if (room <= 0)
+ return;
+
/* Check #1 */
- if (room > 0 && !tcp_under_memory_pressure(sk)) {
+ if (!tcp_under_memory_pressure(sk)) {
unsigned int truesize = truesize_adjust(adjust, skb);
int incr;
@@ -518,6 +521,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
tp->rcv_ssthresh += min(room, incr);
inet_csk(sk)->icsk_ack.quick |= 1;
}
+ } else {
+ /* Under pressure:
+ * Adjust rcv_ssthresh according to reserved mem
+ */
+ tcp_adjust_rcv_ssthresh(sk);
}
}
@@ -3221,7 +3229,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
long seq_rtt_us = -1L;
long ca_rtt_us = -1L;
u32 pkts_acked = 0;
- u32 last_in_flight = 0;
bool rtt_update;
int flag = 0;
@@ -3257,7 +3264,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
if (!first_ackt)
first_ackt = last_ackt;
- last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
if (before(start_seq, reord))
reord = start_seq;
if (!after(scb->end_seq, tp->high_seq))
@@ -3323,8 +3329,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
- if (pkts_acked == 1 && last_in_flight < tp->mss_cache &&
- last_in_flight && !prior_sacked && fully_acked &&
+ if (pkts_acked == 1 && fully_acked && !prior_sacked &&
+ (tp->snd_una - prior_snd_una) < tp->mss_cache &&
sack->rate->prior_delivered + 1 == tp->delivered &&
!(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
/* Conservatively mark a delayed ACK. It's typically
@@ -3381,9 +3387,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
if (icsk->icsk_ca_ops->pkts_acked) {
struct ack_sample sample = { .pkts_acked = pkts_acked,
- .rtt_us = sack->rate->rtt_us,
- .in_flight = last_in_flight };
+ .rtt_us = sack->rate->rtt_us };
+ sample.in_flight = tp->mss_cache *
+ (tp->delivered - sack->rate->prior_delivered);
icsk->icsk_ca_ops->pkts_acked(sk, &sample);
}
@@ -5346,7 +5353,7 @@ static int tcp_prune_queue(struct sock *sk)
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk);
else if (tcp_under_memory_pressure(sk))
- tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ tcp_adjust_rcv_ssthresh(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
@@ -5381,7 +5388,7 @@ static int tcp_prune_queue(struct sock *sk)
return -1;
}
-static bool tcp_should_expand_sndbuf(const struct sock *sk)
+static bool tcp_should_expand_sndbuf(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -5392,8 +5399,18 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
return false;
/* If we are under global TCP memory pressure, do not expand. */
- if (tcp_under_memory_pressure(sk))
+ if (tcp_under_memory_pressure(sk)) {
+ int unused_mem = sk_unused_reserved_mem(sk);
+
+ /* Adjust sndbuf according to reserved mem. But make sure
+ * it never goes below SOCK_MIN_SNDBUF.
+ * See sk_stream_moderate_sndbuf() for more details.
+ */
+ if (unused_mem > SOCK_MIN_SNDBUF)
+ WRITE_ONCE(sk->sk_sndbuf, unused_mem);
+
return false;
+ }
/* If we are under soft global TCP memory pressure, do not expand. */
if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 2e62e0d6373a..5e6d8cb82ce5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1037,6 +1037,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
EXPORT_SYMBOL(tcp_md5_needed);
+static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
+{
+ if (!old)
+ return true;
+
+ /* l3index always overrides non-l3index */
+ if (old->l3index && new->l3index == 0)
+ return false;
+ if (old->l3index == 0 && new->l3index)
+ return true;
+
+ return old->prefixlen < new->prefixlen;
+}
+
/* Find the Key structure for an address. */
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
@@ -1059,7 +1073,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
lockdep_sock_is_held(sk)) {
if (key->family != family)
continue;
- if (key->l3index && key->l3index != l3index)
+ if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
continue;
if (family == AF_INET) {
mask = inet_make_mask(key->prefixlen);
@@ -1074,8 +1088,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
match = false;
}
- if (match && (!best_match ||
- key->prefixlen > best_match->prefixlen))
+ if (match && better_md5_match(best_match, key))
best_match = key;
}
return best_match;
@@ -1085,7 +1098,7 @@ EXPORT_SYMBOL(__tcp_md5_do_lookup);
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
const union tcp_md5_addr *addr,
int family, u8 prefixlen,
- int l3index)
+ int l3index, u8 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
@@ -1105,7 +1118,9 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
lockdep_sock_is_held(sk)) {
if (key->family != family)
continue;
- if (key->l3index && key->l3index != l3index)
+ if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
+ continue;
+ if (key->l3index != l3index)
continue;
if (!memcmp(&key->addr, addr, size) &&
key->prefixlen == prefixlen)
@@ -1129,7 +1144,7 @@ EXPORT_SYMBOL(tcp_v4_md5_lookup);
/* This can be called on a newly created socket, from other files */
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, int l3index,
+ int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen, gfp_t gfp)
{
/* Add Key to the list */
@@ -1137,7 +1152,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_info *md5sig;
- key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
+ key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
if (key) {
/* Pre-existing entry - just update that one.
* Note that the key might be used concurrently.
@@ -1182,6 +1197,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
key->family = family;
key->prefixlen = prefixlen;
key->l3index = l3index;
+ key->flags = flags;
memcpy(&key->addr, addr,
(family == AF_INET6) ? sizeof(struct in6_addr) :
sizeof(struct in_addr));
@@ -1191,11 +1207,11 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
EXPORT_SYMBOL(tcp_md5_do_add);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
- u8 prefixlen, int l3index)
+ u8 prefixlen, int l3index, u8 flags)
{
struct tcp_md5sig_key *key;
- key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
+ key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
if (!key)
return -ENOENT;
hlist_del_rcu(&key->node);
@@ -1229,6 +1245,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
const union tcp_md5_addr *addr;
u8 prefixlen = 32;
int l3index = 0;
+ u8 flags;
if (optlen < sizeof(cmd))
return -EINVAL;
@@ -1239,6 +1256,8 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
if (sin->sin_family != AF_INET)
return -EINVAL;
+ flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
+
if (optname == TCP_MD5SIG_EXT &&
cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
prefixlen = cmd.tcpm_prefixlen;
@@ -1246,7 +1265,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
return -EINVAL;
}
- if (optname == TCP_MD5SIG_EXT &&
+ if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
struct net_device *dev;
@@ -1267,12 +1286,12 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
if (!cmd.tcpm_keylen)
- return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
+ return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
- return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
+ return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
}
@@ -1596,7 +1615,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
* memory, then we end up not copying the key
* across. Shucks.
*/
- tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
+ tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags,
key->key, key->keylen, GFP_ATOMIC);
sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
}
@@ -1941,7 +1960,6 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
int tcp_v4_rcv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
- struct sk_buff *skb_to_free;
int sdif = inet_sdif(skb);
int dif = inet_iif(skb);
const struct iphdr *iph;
@@ -2082,17 +2100,12 @@ process:
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
if (!sock_owned_by_user(sk)) {
- skb_to_free = sk->sk_rx_skb_cache;
- sk->sk_rx_skb_cache = NULL;
ret = tcp_v4_do_rcv(sk, skb);
} else {
if (tcp_add_backlog(sk, skb))
goto discard_and_relse;
- skb_to_free = NULL;
}
bh_unlock_sock(sk);
- if (skb_to_free)
- __kfree_skb(skb_to_free);
put_and_return:
if (refcounted)
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index 95db7a11ba2a..ab552356bdba 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -25,7 +25,6 @@
* 1) Add mechanism to deal with reverse congestion.
*/
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/math64.h>
#include <net/tcp.h>
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6d72f3ea48c4..3a01e5593a17 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1256,8 +1256,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
if (clone_it) {
- TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
- - tp->snd_una;
oskb = skb;
tcp_skb_tsorted_save(oskb) {
@@ -2969,8 +2967,7 @@ u32 __tcp_select_window(struct sock *sk)
icsk->icsk_ack.quick = 0;
if (tcp_under_memory_pressure(sk))
- tp->rcv_ssthresh = min(tp->rcv_ssthresh,
- 4U * tp->advmss);
+ tcp_adjust_rcv_ssthresh(sk);
/* free_space might become our new window, make sure we don't
* increase it due to wscale.
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index 0de693565963..fbab921670cc 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -65,6 +65,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
+ TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce;
TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
}
@@ -86,6 +87,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
if (!rs->prior_delivered ||
after(scb->tx.delivered, rs->prior_delivered)) {
+ rs->prior_delivered_ce = scb->tx.delivered_ce;
rs->prior_delivered = scb->tx.delivered;
rs->prior_mstamp = scb->tx.delivered_mstamp;
rs->is_app_limited = scb->tx.is_app_limited;
@@ -138,6 +140,10 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
}
rs->delivered = tp->delivered - rs->prior_delivered;
+ rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
+ /* delivered_ce occupies less than 32 bits in the skb control block */
+ rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK;
+
/* Model sending data and receiving ACKs as separate pipeline phases
* for a window. Usually the ACK phase is longer, but with ACK
* compression the send phase can be longer. To be safe we use the
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8851c9463b4b..8536b2a7210b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -390,7 +390,8 @@ static int compute_score(struct sock *sk, struct net *net,
dif, sdif);
if (!dev_match)
return -1;
- score += 4;
+ if (sk->sk_bound_dev_if)
+ score += 4;
if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
score++;
@@ -1053,7 +1054,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
__be16 dport;
u8 tos;
int err, is_udplite = IS_UDPLITE(sk);
- int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
+ int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
struct ip_options_data opt_copy;
@@ -1361,7 +1362,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
}
up->len += size;
- if (!(up->corkflag || (flags&MSG_MORE)))
+ if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE)))
ret = udp_push_pending_frames(sk);
if (!ret)
ret = size;
@@ -2662,9 +2663,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
switch (optname) {
case UDP_CORK:
if (val != 0) {
- up->corkflag = 1;
+ WRITE_ONCE(up->corkflag, 1);
} else {
- up->corkflag = 0;
+ WRITE_ONCE(up->corkflag, 0);
lock_sock(sk);
push_pending_frames(sk);
release_sock(sk);
@@ -2787,7 +2788,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
switch (optname) {
case UDP_CORK:
- val = up->corkflag;
+ val = READ_ONCE(up->corkflag);
break;
case UDP_ENCAP:
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index e504204bca92..bf2e5e5fe142 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -332,10 +332,10 @@ config IPV6_IOAM6_LWTUNNEL
bool "IPv6: IOAM Pre-allocated Trace insertion support"
depends on IPV6
select LWTUNNEL
+ select DST_CACHE
help
- Support for the inline insertion of IOAM Pre-allocated
- Trace Header (only on locally generated packets), using
- the lightweight tunnels mechanism.
+ Support for the insertion of IOAM Pre-allocated Trace
+ Header using the lightweight tunnels mechanism.
If unsure, say N.
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 1bc7e143217b..3036a45e8a1e 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -5,16 +5,14 @@
obj-$(CONFIG_IPV6) += ipv6.o
-ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
+ipv6-y := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
addrlabel.o \
route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \
exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o \
udp_offload.o seg6.o fib6_notifier.o rpl.o ioam6.o
-ipv6-offload := ip6_offload.o tcpv6_offload.o exthdrs_offload.o
-
-ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
+ipv6-$(CONFIG_SYSCTL) += sysctl_net_ipv6.o
ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
@@ -29,8 +27,6 @@ ipv6-$(CONFIG_IPV6_SEG6_HMAC) += seg6_hmac.o
ipv6-$(CONFIG_IPV6_RPL_LWTUNNEL) += rpl_iptunnel.o
ipv6-$(CONFIG_IPV6_IOAM6_LWTUNNEL) += ioam6_iptunnel.o
-ipv6-objs += $(ipv6-y)
-
obj-$(CONFIG_INET6_AH) += ah6.o
obj-$(CONFIG_INET6_ESP) += esp6.o
obj-$(CONFIG_INET6_ESP_OFFLOAD) += esp6_offload.o
@@ -48,7 +44,8 @@ obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
obj-$(CONFIG_IPV6_FOU) += fou6.o
obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
-obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
+obj-$(CONFIG_INET) += output_core.o protocol.o \
+ ip6_offload.o tcpv6_offload.o exthdrs_offload.o
obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c6a90b7bbb70..d4fae16deec4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2237,12 +2237,12 @@ static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
{
- union fwnet_hwaddr *ha;
+ const union fwnet_hwaddr *ha;
if (dev->addr_len != FWNET_ALEN)
return -1;
- ha = (union fwnet_hwaddr *)dev->dev_addr;
+ ha = (const union fwnet_hwaddr *)dev->dev_addr;
memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
eui[0] ^= 2;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 3a871a09f962..38ece3b7b839 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -979,7 +979,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
if (!skb_valid_dst(skb))
ip6_route_input(skb);
- ioam6_fill_trace_data(skb, ns, trace);
+ ioam6_fill_trace_data(skb, ns, trace, true);
break;
default:
break;
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index a1ac0e3d8c60..47447f0241df 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -610,7 +610,11 @@ int ila_xlat_init_net(struct net *net)
if (err)
return err;
- rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
+ err = rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
+ if (err) {
+ free_bucket_spinlocks(ilan->xlat.locks);
+ return err;
+ }
return 0;
}
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 55c290d55605..67c9114835c8 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -106,7 +106,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
return -1;
- score = 1;
+ score = sk->sk_bound_dev_if ? 2 : 1;
if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
score++;
}
diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c
index 5e8961004832..122a3d47424c 100644
--- a/net/ipv6/ioam6.c
+++ b/net/ipv6/ioam6.c
@@ -631,7 +631,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
struct ioam6_namespace *ns,
struct ioam6_trace_hdr *trace,
struct ioam6_schema *sc,
- u8 sclen)
+ u8 sclen, bool is_input)
{
struct __kernel_sock_timeval ts;
u64 raw64;
@@ -645,7 +645,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
/* hop_lim and node_id */
if (trace->type.bit0) {
byte = ipv6_hdr(skb)->hop_limit;
- if (skb->dev)
+ if (is_input)
byte--;
raw32 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id;
@@ -730,7 +730,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
/* hop_lim and node_id (wide) */
if (trace->type.bit8) {
byte = ipv6_hdr(skb)->hop_limit;
- if (skb->dev)
+ if (is_input)
byte--;
raw64 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id_wide;
@@ -770,6 +770,66 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
data += sizeof(__be32);
}
+ /* bit12 undefined: filled with empty value */
+ if (trace->type.bit12) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit13 undefined: filled with empty value */
+ if (trace->type.bit13) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit14 undefined: filled with empty value */
+ if (trace->type.bit14) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit15 undefined: filled with empty value */
+ if (trace->type.bit15) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit16 undefined: filled with empty value */
+ if (trace->type.bit16) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit17 undefined: filled with empty value */
+ if (trace->type.bit17) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit18 undefined: filled with empty value */
+ if (trace->type.bit18) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit19 undefined: filled with empty value */
+ if (trace->type.bit19) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit20 undefined: filled with empty value */
+ if (trace->type.bit20) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
+ /* bit21 undefined: filled with empty value */
+ if (trace->type.bit21) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ data += sizeof(__be32);
+ }
+
/* opaque state snapshot */
if (trace->type.bit22) {
if (!sc) {
@@ -786,21 +846,16 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
/* called with rcu_read_lock() */
void ioam6_fill_trace_data(struct sk_buff *skb,
struct ioam6_namespace *ns,
- struct ioam6_trace_hdr *trace)
+ struct ioam6_trace_hdr *trace,
+ bool is_input)
{
struct ioam6_schema *sc;
u8 sclen = 0;
- /* Skip if Overflow flag is set OR
- * if an unknown type (bit 12-21) is set
+ /* Skip if Overflow flag is set
*/
- if (trace->overflow ||
- trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
- trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
- trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
- trace->type.bit21) {
+ if (trace->overflow)
return;
- }
/* NodeLen does not include Opaque State Snapshot length. We need to
* take it into account if the corresponding bit is set (bit 22) and
@@ -822,7 +877,7 @@ void ioam6_fill_trace_data(struct sk_buff *skb,
return;
}
- __ioam6_fill_trace_data(skb, ns, trace, sc, sclen);
+ __ioam6_fill_trace_data(skb, ns, trace, sc, sclen, is_input);
trace->remlen -= trace->nodelen + sclen;
}
diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
index f9ee04541c17..f90a87389fcc 100644
--- a/net/ipv6/ioam6_iptunnel.c
+++ b/net/ipv6/ioam6_iptunnel.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/net.h>
-#include <linux/netlink.h>
#include <linux/in6.h>
#include <linux/ioam6.h>
#include <linux/ioam6_iptunnel.h>
@@ -17,18 +16,26 @@
#include <net/sock.h>
#include <net/lwtunnel.h>
#include <net/ioam6.h>
+#include <net/netlink.h>
+#include <net/ipv6.h>
+#include <net/dst_cache.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
#define IOAM6_MASK_SHORT_FIELDS 0xff100000
#define IOAM6_MASK_WIDE_FIELDS 0xe00000
struct ioam6_lwt_encap {
- struct ipv6_hopopt_hdr eh;
- u8 pad[2]; /* 2-octet padding for 4n-alignment */
- struct ioam6_hdr ioamh;
- struct ioam6_trace_hdr traceh;
+ struct ipv6_hopopt_hdr eh;
+ u8 pad[2]; /* 2-octet padding for 4n-alignment */
+ struct ioam6_hdr ioamh;
+ struct ioam6_trace_hdr traceh;
} __packed;
struct ioam6_lwt {
+ struct dst_cache cache;
+ u8 mode;
+ struct in6_addr tundst;
struct ioam6_lwt_encap tuninfo;
};
@@ -42,40 +49,29 @@ static struct ioam6_lwt_encap *ioam6_lwt_info(struct lwtunnel_state *lwt)
return &ioam6_lwt_state(lwt)->tuninfo;
}
-static struct ioam6_trace_hdr *ioam6_trace(struct lwtunnel_state *lwt)
+static struct ioam6_trace_hdr *ioam6_lwt_trace(struct lwtunnel_state *lwt)
{
return &(ioam6_lwt_state(lwt)->tuninfo.traceh);
}
static const struct nla_policy ioam6_iptunnel_policy[IOAM6_IPTUNNEL_MAX + 1] = {
+ [IOAM6_IPTUNNEL_MODE] = NLA_POLICY_RANGE(NLA_U8,
+ IOAM6_IPTUNNEL_MODE_MIN,
+ IOAM6_IPTUNNEL_MODE_MAX),
+ [IOAM6_IPTUNNEL_DST] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
[IOAM6_IPTUNNEL_TRACE] = NLA_POLICY_EXACT_LEN(sizeof(struct ioam6_trace_hdr)),
};
-static int nla_put_ioam6_trace(struct sk_buff *skb, int attrtype,
- struct ioam6_trace_hdr *trace)
-{
- struct ioam6_trace_hdr *data;
- struct nlattr *nla;
- int len;
-
- len = sizeof(*trace);
-
- nla = nla_reserve(skb, attrtype, len);
- if (!nla)
- return -EMSGSIZE;
-
- data = nla_data(nla);
- memcpy(data, trace, len);
-
- return 0;
-}
-
static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
{
u32 fields;
if (!trace->type_be32 || !trace->remlen ||
- trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4)
+ trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4 ||
+ trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
+ trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
+ trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
+ trace->type.bit21)
return false;
trace->nodelen = 0;
@@ -97,9 +93,10 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
struct nlattr *tb[IOAM6_IPTUNNEL_MAX + 1];
struct ioam6_lwt_encap *tuninfo;
struct ioam6_trace_hdr *trace;
- struct lwtunnel_state *s;
- int len_aligned;
- int len, err;
+ struct lwtunnel_state *lwt;
+ struct ioam6_lwt *ilwt;
+ int len_aligned, err;
+ u8 mode;
if (family != AF_INET6)
return -EINVAL;
@@ -109,6 +106,16 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
if (err < 0)
return err;
+ if (!tb[IOAM6_IPTUNNEL_MODE])
+ mode = IOAM6_IPTUNNEL_MODE_INLINE;
+ else
+ mode = nla_get_u8(tb[IOAM6_IPTUNNEL_MODE]);
+
+ if (!tb[IOAM6_IPTUNNEL_DST] && mode != IOAM6_IPTUNNEL_MODE_INLINE) {
+ NL_SET_ERR_MSG(extack, "this mode needs a tunnel destination");
+ return -EINVAL;
+ }
+
if (!tb[IOAM6_IPTUNNEL_TRACE]) {
NL_SET_ERR_MSG(extack, "missing trace");
return -EINVAL;
@@ -121,15 +128,24 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
return -EINVAL;
}
- len = sizeof(*tuninfo) + trace->remlen * 4;
- len_aligned = ALIGN(len, 8);
-
- s = lwtunnel_state_alloc(len_aligned);
- if (!s)
+ len_aligned = ALIGN(trace->remlen * 4, 8);
+ lwt = lwtunnel_state_alloc(sizeof(*ilwt) + len_aligned);
+ if (!lwt)
return -ENOMEM;
- tuninfo = ioam6_lwt_info(s);
- tuninfo->eh.hdrlen = (len_aligned >> 3) - 1;
+ ilwt = ioam6_lwt_state(lwt);
+ err = dst_cache_init(&ilwt->cache, GFP_ATOMIC);
+ if (err) {
+ kfree(lwt);
+ return err;
+ }
+
+ ilwt->mode = mode;
+ if (tb[IOAM6_IPTUNNEL_DST])
+ ilwt->tundst = nla_get_in6_addr(tb[IOAM6_IPTUNNEL_DST]);
+
+ tuninfo = ioam6_lwt_info(lwt);
+ tuninfo->eh.hdrlen = ((sizeof(*tuninfo) + len_aligned) >> 3) - 1;
tuninfo->pad[0] = IPV6_TLV_PADN;
tuninfo->ioamh.type = IOAM6_TYPE_PREALLOC;
tuninfo->ioamh.opt_type = IPV6_TLV_IOAM;
@@ -138,27 +154,39 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla,
memcpy(&tuninfo->traceh, trace, sizeof(*trace));
- len = len_aligned - len;
- if (len == 1) {
- tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PAD1;
- } else if (len > 0) {
+ if (len_aligned - trace->remlen * 4) {
tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PADN;
- tuninfo->traceh.data[trace->remlen * 4 + 1] = len - 2;
+ tuninfo->traceh.data[trace->remlen * 4 + 1] = 2;
}
- s->type = LWTUNNEL_ENCAP_IOAM6;
- s->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
+ lwt->type = LWTUNNEL_ENCAP_IOAM6;
+ lwt->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
- *ts = s;
+ *ts = lwt;
return 0;
}
-static int ioam6_do_inline(struct sk_buff *skb, struct ioam6_lwt_encap *tuninfo)
+static int ioam6_do_fill(struct net *net, struct sk_buff *skb)
{
struct ioam6_trace_hdr *trace;
- struct ipv6hdr *oldhdr, *hdr;
struct ioam6_namespace *ns;
+
+ trace = (struct ioam6_trace_hdr *)(skb_transport_header(skb)
+ + sizeof(struct ipv6_hopopt_hdr) + 2
+ + sizeof(struct ioam6_hdr));
+
+ ns = ioam6_namespace(net, trace->namespace_id);
+ if (ns)
+ ioam6_fill_trace_data(skb, ns, trace, false);
+
+ return 0;
+}
+
+static int ioam6_do_inline(struct net *net, struct sk_buff *skb,
+ struct ioam6_lwt_encap *tuninfo)
+{
+ struct ipv6hdr *oldhdr, *hdr;
int hdrlen, err;
hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
@@ -187,80 +215,200 @@ static int ioam6_do_inline(struct sk_buff *skb, struct ioam6_lwt_encap *tuninfo)
hdr->nexthdr = NEXTHDR_HOP;
hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr));
- trace = (struct ioam6_trace_hdr *)(skb_transport_header(skb)
- + sizeof(struct ipv6_hopopt_hdr) + 2
- + sizeof(struct ioam6_hdr));
+ return ioam6_do_fill(net, skb);
+}
- ns = ioam6_namespace(dev_net(skb_dst(skb)->dev), trace->namespace_id);
- if (ns)
- ioam6_fill_trace_data(skb, ns, trace);
+static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
+ struct ioam6_lwt_encap *tuninfo,
+ struct in6_addr *tundst)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct ipv6hdr *hdr, *inner_hdr;
+ int hdrlen, len, err;
- return 0;
+ hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
+ len = sizeof(*hdr) + hdrlen;
+
+ err = skb_cow_head(skb, len + skb->mac_len);
+ if (unlikely(err))
+ return err;
+
+ inner_hdr = ipv6_hdr(skb);
+
+ skb_push(skb, len);
+ skb_reset_network_header(skb);
+ skb_mac_header_rebuild(skb);
+ skb_set_transport_header(skb, sizeof(*hdr));
+
+ tuninfo->eh.nexthdr = NEXTHDR_IPV6;
+ memcpy(skb_transport_header(skb), (u8 *)tuninfo, hdrlen);
+
+ hdr = ipv6_hdr(skb);
+ memcpy(hdr, inner_hdr, sizeof(*hdr));
+
+ hdr->nexthdr = NEXTHDR_HOP;
+ hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr));
+ hdr->daddr = *tundst;
+ ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
+ IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
+
+ skb_postpush_rcsum(skb, hdr, len);
+
+ return ioam6_do_fill(net, skb);
}
static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct lwtunnel_state *lwt = skb_dst(skb)->lwtstate;
+ struct dst_entry *dst = skb_dst(skb);
+ struct in6_addr orig_daddr;
+ struct ioam6_lwt *ilwt;
int err = -EINVAL;
if (skb->protocol != htons(ETH_P_IPV6))
goto drop;
- /* Only for packets we send and
- * that do not contain a Hop-by-Hop yet
- */
- if (skb->dev || ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP)
- goto out;
-
- err = ioam6_do_inline(skb, ioam6_lwt_info(lwt));
- if (unlikely(err))
+ ilwt = ioam6_lwt_state(dst->lwtstate);
+ orig_daddr = ipv6_hdr(skb)->daddr;
+
+ switch (ilwt->mode) {
+ case IOAM6_IPTUNNEL_MODE_INLINE:
+do_inline:
+ /* Direct insertion - if there is no Hop-by-Hop yet */
+ if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP)
+ goto out;
+
+ err = ioam6_do_inline(net, skb, &ilwt->tuninfo);
+ if (unlikely(err))
+ goto drop;
+
+ break;
+ case IOAM6_IPTUNNEL_MODE_ENCAP:
+do_encap:
+ /* Encapsulation (ip6ip6) */
+ err = ioam6_do_encap(net, skb, &ilwt->tuninfo, &ilwt->tundst);
+ if (unlikely(err))
+ goto drop;
+
+ break;
+ case IOAM6_IPTUNNEL_MODE_AUTO:
+ /* Automatic (RFC8200 compliant):
+ * - local packets -> INLINE mode
+ * - in-transit packets -> ENCAP mode
+ */
+ if (!skb->dev)
+ goto do_inline;
+
+ goto do_encap;
+ default:
goto drop;
+ }
- err = skb_cow_head(skb, LL_RESERVED_SPACE(skb_dst(skb)->dev));
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
goto drop;
+ if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
+ preempt_disable();
+ dst = dst_cache_get(&ilwt->cache);
+ preempt_enable();
+
+ if (unlikely(!dst)) {
+ struct ipv6hdr *hdr = ipv6_hdr(skb);
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.daddr = hdr->daddr;
+ fl6.saddr = hdr->saddr;
+ fl6.flowlabel = ip6_flowinfo(hdr);
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = hdr->nexthdr;
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst->error) {
+ err = dst->error;
+ dst_release(dst);
+ goto drop;
+ }
+
+ preempt_disable();
+ dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
+ preempt_enable();
+ }
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+
+ return dst_output(net, sk, skb);
+ }
out:
- return lwt->orig_output(net, sk, skb);
-
+ return dst->lwtstate->orig_output(net, sk, skb);
drop:
kfree_skb(skb);
return err;
}
+static void ioam6_destroy_state(struct lwtunnel_state *lwt)
+{
+ dst_cache_destroy(&ioam6_lwt_state(lwt)->cache);
+}
+
static int ioam6_fill_encap_info(struct sk_buff *skb,
struct lwtunnel_state *lwtstate)
{
- struct ioam6_trace_hdr *trace = ioam6_trace(lwtstate);
+ struct ioam6_lwt *ilwt = ioam6_lwt_state(lwtstate);
+ int err;
- if (nla_put_ioam6_trace(skb, IOAM6_IPTUNNEL_TRACE, trace))
- return -EMSGSIZE;
+ err = nla_put_u8(skb, IOAM6_IPTUNNEL_MODE, ilwt->mode);
+ if (err)
+ goto ret;
- return 0;
+ if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) {
+ err = nla_put_in6_addr(skb, IOAM6_IPTUNNEL_DST, &ilwt->tundst);
+ if (err)
+ goto ret;
+ }
+
+ err = nla_put(skb, IOAM6_IPTUNNEL_TRACE, sizeof(ilwt->tuninfo.traceh),
+ &ilwt->tuninfo.traceh);
+ret:
+ return err;
}
static int ioam6_encap_nlsize(struct lwtunnel_state *lwtstate)
{
- struct ioam6_trace_hdr *trace = ioam6_trace(lwtstate);
+ struct ioam6_lwt *ilwt = ioam6_lwt_state(lwtstate);
+ int nlsize;
+
+ nlsize = nla_total_size(sizeof(ilwt->mode)) +
+ nla_total_size(sizeof(ilwt->tuninfo.traceh));
- return nla_total_size(sizeof(*trace));
+ if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE)
+ nlsize += nla_total_size(sizeof(ilwt->tundst));
+
+ return nlsize;
}
static int ioam6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
{
- struct ioam6_trace_hdr *a_hdr = ioam6_trace(a);
- struct ioam6_trace_hdr *b_hdr = ioam6_trace(b);
-
- return (a_hdr->namespace_id != b_hdr->namespace_id);
+ struct ioam6_trace_hdr *trace_a = ioam6_lwt_trace(a);
+ struct ioam6_trace_hdr *trace_b = ioam6_lwt_trace(b);
+ struct ioam6_lwt *ilwt_a = ioam6_lwt_state(a);
+ struct ioam6_lwt *ilwt_b = ioam6_lwt_state(b);
+
+ return (ilwt_a->mode != ilwt_b->mode ||
+ (ilwt_a->mode != IOAM6_IPTUNNEL_MODE_INLINE &&
+ !ipv6_addr_equal(&ilwt_a->tundst, &ilwt_b->tundst)) ||
+ trace_a->namespace_id != trace_b->namespace_id);
}
static const struct lwtunnel_encap_ops ioam6_iptun_ops = {
- .build_state = ioam6_build_state,
+ .build_state = ioam6_build_state,
+ .destroy_state = ioam6_destroy_state,
.output = ioam6_output,
- .fill_encap = ioam6_fill_encap_info,
+ .fill_encap = ioam6_fill_encap_info,
.get_encap_size = ioam6_encap_nlsize,
- .cmp_encap = ioam6_encap_cmp,
- .owner = THIS_MODULE,
+ .cmp_encap = ioam6_encap_cmp,
+ .owner = THIS_MODULE,
};
int __init ioam6_iptunnel_init(void)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3ad201d372d8..d831d2439693 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1088,7 +1088,7 @@ static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
struct flowi6 *fl6 = &t->fl.u.ip6;
if (dev->type != ARPHRD_ETHER) {
- memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+ __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
}
@@ -1521,7 +1521,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
if (tunnel->parms.collect_md)
return 0;
- memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
+ __dev_addr_set(dev, &tunnel->parms.laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
if (ipv6_addr_any(&tunnel->parms.raddr))
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 12f985f43bcc..2f044a49afa8 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -464,13 +464,14 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
int ip6_forward(struct sk_buff *skb)
{
- struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct inet6_skb_parm *opt = IP6CB(skb);
struct net *net = dev_net(dst->dev);
+ struct inet6_dev *idev;
u32 mtu;
+ idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
if (net->ipv6.devconf_all->forwarding == 0)
goto error;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 20a67efda47f..484aca492cc0 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1449,7 +1449,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
unsigned int mtu;
int t_hlen;
- memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+ __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
/* Set up flowi template */
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 1d8e3ffa225d..527e9ead7449 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -660,7 +660,7 @@ static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu)
struct net_device *tdev = NULL;
int mtu;
- memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+ __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 4b098521a44c..184190b9ea25 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -142,7 +142,7 @@ struct neigh_table nd_tbl = {
};
EXPORT_SYMBOL_GPL(nd_tbl);
-void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
+void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
int data_len, int pad)
{
int space = __ndisc_opt_addr_space(data_len, pad);
@@ -165,7 +165,7 @@ void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
EXPORT_SYMBOL_GPL(__ndisc_fill_addr_option);
static inline void ndisc_fill_addr_option(struct sk_buff *skb, int type,
- void *data, u8 icmp6_type)
+ const void *data, u8 icmp6_type)
{
__ndisc_fill_addr_option(skb, type, data, skb->dev->addr_len,
ndisc_addr_option_pad(skb->dev->type));
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index de2cf3943b91..2d816277f2c5 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -247,10 +247,10 @@ ip6t_next_entry(const struct ip6t_entry *entry)
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
unsigned int
-ip6t_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table)
+ip6t_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
{
+ const struct xt_table *table = priv;
unsigned int hook = state->hook;
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
/* Initializing verdict to NF_DROP keeps gcc happy. */
@@ -273,6 +273,7 @@ ip6t_do_table(struct sk_buff *skb,
* things we don't know, ie. tcp syn flag or ports). If the
* rule is also a fragment-specific rule, non-fragments won't
* match it. */
+ acpar.fragoff = 0;
acpar.hotdrop = false;
acpar.state = state;
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 733c83d38b30..4ad8b2032f1f 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -25,12 +25,7 @@ MODULE_AUTHOR("Andras Kis-Szabo <[email protected]>");
static inline bool
segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
{
- bool r;
- pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
- invert ? '!' : ' ', min, id, max);
- r = (id >= min && id <= max) ^ invert;
- pr_debug(" result %s\n", r ? "PASS" : "FAILED");
- return r;
+ return (id >= min && id <= max) ^ invert;
}
static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
@@ -65,30 +60,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
return false;
}
- pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
- pr_debug("TYPE %04X ", rh->type);
- pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
-
- pr_debug("IPv6 RT segsleft %02X ",
- segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
- rh->segments_left,
- !!(rtinfo->invflags & IP6T_RT_INV_SGS)));
- pr_debug("type %02X %02X %02X ",
- rtinfo->rt_type, rh->type,
- (!(rtinfo->flags & IP6T_RT_TYP) ||
- ((rtinfo->rt_type == rh->type) ^
- !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
- pr_debug("len %02X %04X %02X ",
- rtinfo->hdrlen, hdrlen,
- !(rtinfo->flags & IP6T_RT_LEN) ||
- ((rtinfo->hdrlen == hdrlen) ^
- !!(rtinfo->invflags & IP6T_RT_INV_LEN)));
- pr_debug("res %02X %02X %02X ",
- rtinfo->flags & IP6T_RT_RES,
- ((const struct rt0_hdr *)rh)->reserved,
- !((rtinfo->flags & IP6T_RT_RES) &&
- (((const struct rt0_hdr *)rh)->reserved)));
-
ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
rh->segments_left,
!!(rtinfo->invflags & IP6T_RT_INV_SGS))) &&
@@ -107,22 +78,22 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
reserved),
sizeof(_reserved),
&_reserved);
+ if (!rp) {
+ par->hotdrop = true;
+ return false;
+ }
ret = (*rp == 0);
}
- pr_debug("#%d ", rtinfo->addrnr);
if (!(rtinfo->flags & IP6T_RT_FST)) {
return ret;
} else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
- pr_debug("Not strict ");
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
- pr_debug("There isn't enough space\n");
return false;
} else {
unsigned int i = 0;
- pr_debug("#%d ", rtinfo->addrnr);
for (temp = 0;
temp < (unsigned int)((hdrlen - 8) / 16);
temp++) {
@@ -138,26 +109,20 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
return false;
}
- if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
- pr_debug("i=%d temp=%d;\n", i, temp);
+ if (ipv6_addr_equal(ap, &rtinfo->addrs[i]))
i++;
- }
if (i == rtinfo->addrnr)
break;
}
- pr_debug("i=%d #%d\n", i, rtinfo->addrnr);
if (i == rtinfo->addrnr)
return ret;
else
return false;
}
} else {
- pr_debug("Strict ");
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
- pr_debug("There isn't enough space\n");
return false;
} else {
- pr_debug("#%d ", rtinfo->addrnr);
for (temp = 0; temp < rtinfo->addrnr; temp++) {
ap = skb_header_pointer(skb,
ptr
@@ -173,7 +138,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
break;
}
- pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr);
if (temp == rtinfo->addrnr &&
temp == (unsigned int)((hdrlen - 8) / 16))
return ret;
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 727ee8097012..df785ebda0ca 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -27,14 +27,6 @@ static const struct xt_table packet_filter = {
.priority = NF_IP6_PRI_FILTER,
};
-/* The work comes in here from netfilter.c. */
-static unsigned int
-ip6table_filter_hook(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip6t_do_table(skb, state, priv);
-}
-
static struct nf_hook_ops *filter_ops __read_mostly;
/* Default to forward because I got too much mail already. */
@@ -90,7 +82,7 @@ static int __init ip6table_filter_init(void)
if (ret < 0)
return ret;
- filter_ops = xt_hook_ops_alloc(&packet_filter, ip6table_filter_hook);
+ filter_ops = xt_hook_ops_alloc(&packet_filter, ip6t_do_table);
if (IS_ERR(filter_ops)) {
xt_unregister_template(&packet_filter);
return PTR_ERR(filter_ops);
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 9b518ce37d6a..a88b2ce4a3cb 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -29,7 +29,7 @@ static const struct xt_table packet_mangler = {
};
static unsigned int
-ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state, void *priv)
+ip6t_mangle_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state)
{
unsigned int ret;
struct in6_addr saddr, daddr;
@@ -46,7 +46,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state, void *pr
/* flowlabel and prio (includes version, which shouldn't change either */
flowlabel = *((u_int32_t *)ipv6_hdr(skb));
- ret = ip6t_do_table(skb, state, priv);
+ ret = ip6t_do_table(priv, skb, state);
if (ret != NF_DROP && ret != NF_STOLEN &&
(!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
@@ -68,8 +68,8 @@ ip6table_mangle_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
if (state->hook == NF_INET_LOCAL_OUT)
- return ip6t_mangle_out(skb, state, priv);
- return ip6t_do_table(skb, state, priv);
+ return ip6t_mangle_out(priv, skb, state);
+ return ip6t_do_table(priv, skb, state);
}
static struct nf_hook_ops *mangle_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 921c1723a01e..bf3cb3a13600 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -31,34 +31,27 @@ static const struct xt_table nf_nat_ipv6_table = {
.af = NFPROTO_IPV6,
};
-static unsigned int ip6table_nat_do_chain(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip6t_do_table(skb, state, priv);
-}
-
static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
{
- .hook = ip6table_nat_do_chain,
+ .hook = ip6t_do_table,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_NAT_DST,
},
{
- .hook = ip6table_nat_do_chain,
+ .hook = ip6t_do_table,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_NAT_SRC,
},
{
- .hook = ip6table_nat_do_chain,
+ .hook = ip6t_do_table,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST,
},
{
- .hook = ip6table_nat_do_chain,
+ .hook = ip6t_do_table,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC,
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 4f2a04af71d3..08861d5d1f4d 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -31,14 +31,6 @@ static const struct xt_table packet_raw_before_defrag = {
.priority = NF_IP6_PRI_RAW_BEFORE_DEFRAG,
};
-/* The work comes in here from netfilter.c. */
-static unsigned int
-ip6table_raw_hook(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip6t_do_table(skb, state, priv);
-}
-
static struct nf_hook_ops *rawtable_ops __read_mostly;
static int ip6table_raw_table_init(struct net *net)
@@ -88,7 +80,7 @@ static int __init ip6table_raw_init(void)
return ret;
/* Register hooks */
- rawtable_ops = xt_hook_ops_alloc(table, ip6table_raw_hook);
+ rawtable_ops = xt_hook_ops_alloc(table, ip6t_do_table);
if (IS_ERR(rawtable_ops)) {
xt_unregister_template(table);
return PTR_ERR(rawtable_ops);
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 931674034d8b..4df14a9bae78 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -32,13 +32,6 @@ static const struct xt_table security_table = {
.priority = NF_IP6_PRI_SECURITY,
};
-static unsigned int
-ip6table_security_hook(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip6t_do_table(skb, state, priv);
-}
-
static struct nf_hook_ops *sectbl_ops __read_mostly;
static int ip6table_security_table_init(struct net *net)
@@ -77,7 +70,7 @@ static int __init ip6table_security_init(void)
if (ret < 0)
return ret;
- sectbl_ops = xt_hook_ops_alloc(&security_table, ip6table_security_hook);
+ sectbl_ops = xt_hook_ops_alloc(&security_table, ip6t_do_table);
if (IS_ERR(sectbl_ops)) {
xt_unregister_template(&security_table);
return PTR_ERR(sectbl_ops);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index a0108415275f..5c47be29b9ee 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -33,7 +33,7 @@
static const char nf_frags_cache_name[] = "nf-frags";
-unsigned int nf_frag_pernet_id __read_mostly;
+static unsigned int nf_frag_pernet_id __read_mostly;
static struct inet_frags nf_frags;
static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index e8a59d8bf2ad..cb4eb1d2c620 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -25,8 +25,6 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
-extern unsigned int nf_frag_pernet_id;
-
static DEFINE_MUTEX(defrag6_mutex);
static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
@@ -91,12 +89,10 @@ static const struct nf_hook_ops ipv6_defrag_ops[] = {
static void __net_exit defrag6_net_exit(struct net *net)
{
- struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
-
- if (nf_frag->users) {
+ if (net->nf.defrag_ipv6_users) {
nf_unregister_net_hooks(net, ipv6_defrag_ops,
ARRAY_SIZE(ipv6_defrag_ops));
- nf_frag->users = 0;
+ net->nf.defrag_ipv6_users = 0;
}
}
@@ -134,24 +130,23 @@ static void __exit nf_defrag_fini(void)
int nf_defrag_ipv6_enable(struct net *net)
{
- struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
int err = 0;
mutex_lock(&defrag6_mutex);
- if (nf_frag->users == UINT_MAX) {
+ if (net->nf.defrag_ipv6_users == UINT_MAX) {
err = -EOVERFLOW;
goto out_unlock;
}
- if (nf_frag->users) {
- nf_frag->users++;
+ if (net->nf.defrag_ipv6_users) {
+ net->nf.defrag_ipv6_users++;
goto out_unlock;
}
err = nf_register_net_hooks(net, ipv6_defrag_ops,
ARRAY_SIZE(ipv6_defrag_ops));
if (err == 0)
- nf_frag->users = 1;
+ net->nf.defrag_ipv6_users = 1;
out_unlock:
mutex_unlock(&defrag6_mutex);
@@ -161,12 +156,10 @@ EXPORT_SYMBOL_GPL(nf_defrag_ipv6_enable);
void nf_defrag_ipv6_disable(struct net *net)
{
- struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
-
mutex_lock(&defrag6_mutex);
- if (nf_frag->users) {
- nf_frag->users--;
- if (nf_frag->users == 0)
+ if (net->nf.defrag_ipv6_users) {
+ net->nf.defrag_ipv6_users--;
+ if (net->nf.defrag_ipv6_users == 0)
nf_unregister_net_hooks(net, ipv6_defrag_ops,
ARRAY_SIZE(ipv6_defrag_ops));
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dbc224023977..9b9ef09382ab 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5681,14 +5681,15 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
- rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
+ rt->fib6_nh->fib_nh_weight, AF_INET6,
+ 0) < 0)
goto nla_put_failure;
list_for_each_entry_safe(sibling, next_sibling,
&rt->fib6_siblings, fib6_siblings) {
if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
sibling->fib6_nh->fib_nh_weight,
- AF_INET6) < 0)
+ AF_INET6, 0) < 0)
goto nla_put_failure;
}
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index e412817fba2f..5daa1c3ed83b 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -374,7 +374,11 @@ static int __net_init seg6_net_init(struct net *net)
net->ipv6.seg6_data = sdata;
#ifdef CONFIG_IPV6_SEG6_HMAC
- seg6_hmac_net_init(net);
+ if (seg6_hmac_net_init(net)) {
+ kfree(rcu_dereference_raw(sdata->tun_src));
+ kfree(sdata);
+ return -ENOMEM;
+ };
#endif
return 0;
@@ -388,7 +392,7 @@ static void __net_exit seg6_net_exit(struct net *net)
seg6_hmac_net_exit(net);
#endif
- kfree(sdata->tun_src);
+ kfree(rcu_dereference_raw(sdata->tun_src));
kfree(sdata);
}
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index 687d95dce085..29bc4e7c3046 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -405,9 +405,7 @@ int __net_init seg6_hmac_net_init(struct net *net)
{
struct seg6_pernet_data *sdata = seg6_pernet(net);
- rhashtable_init(&sdata->hmac_infos, &rht_params);
-
- return 0;
+ return rhashtable_init(&sdata->hmac_infos, &rht_params);
}
EXPORT_SYMBOL(seg6_hmac_net_init);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index ef0c7a7c18e2..1b57ee36d668 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -204,7 +204,7 @@ static int ipip6_tunnel_create(struct net_device *dev)
struct sit_net *sitn = net_generic(net, sit_net_id);
int err;
- memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
+ __dev_addr_set(dev, &t->parms.iph.saddr, 4);
memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
if ((__force u16)t->parms.i_flags & SIT_ISATAP)
@@ -1149,7 +1149,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
synchronize_net();
t->parms.iph.saddr = p->iph.saddr;
t->parms.iph.daddr = p->iph.daddr;
- memcpy(t->dev->dev_addr, &p->iph.saddr, 4);
+ __dev_addr_set(t->dev, &p->iph.saddr, 4);
memcpy(t->dev->broadcast, &p->iph.daddr, 4);
ipip6_tunnel_link(sitn, t);
t->parms.iph.ttl = p->iph.ttl;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0ce52d46e4f8..1ef27cd7dbff 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -599,6 +599,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
int l3index = 0;
u8 prefixlen;
+ u8 flags;
if (optlen < sizeof(cmd))
return -EINVAL;
@@ -609,6 +610,8 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
if (sin6->sin6_family != AF_INET6)
return -EINVAL;
+ flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
+
if (optname == TCP_MD5SIG_EXT &&
cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
prefixlen = cmd.tcpm_prefixlen;
@@ -619,7 +622,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
}
- if (optname == TCP_MD5SIG_EXT &&
+ if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
struct net_device *dev;
@@ -640,9 +643,9 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
AF_INET, prefixlen,
- l3index);
+ l3index, flags);
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
- AF_INET6, prefixlen, l3index);
+ AF_INET6, prefixlen, l3index, flags);
}
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
@@ -650,12 +653,12 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
- AF_INET, prefixlen, l3index,
+ AF_INET, prefixlen, l3index, flags,
cmd.tcpm_key, cmd.tcpm_keylen,
GFP_KERNEL);
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
- AF_INET6, prefixlen, l3index,
+ AF_INET6, prefixlen, l3index, flags,
cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
}
@@ -1404,7 +1407,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
* across. Shucks.
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
- AF_INET6, 128, l3index, key->key, key->keylen,
+ AF_INET6, 128, l3index, key->flags, key->key, key->keylen,
sk_gfp_mask(sk, GFP_ATOMIC));
}
#endif
@@ -1618,7 +1621,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
{
- struct sk_buff *skb_to_free;
int sdif = inet6_sdif(skb);
int dif = inet6_iif(skb);
const struct tcphdr *th;
@@ -1754,17 +1756,12 @@ process:
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
if (!sock_owned_by_user(sk)) {
- skb_to_free = sk->sk_rx_skb_cache;
- sk->sk_rx_skb_cache = NULL;
ret = tcp_v6_do_rcv(sk, skb);
} else {
if (tcp_add_backlog(sk, skb))
goto discard_and_relse;
- skb_to_free = NULL;
}
bh_unlock_sock(sk);
- if (skb_to_free)
- __kfree_skb(skb_to_free);
put_and_return:
if (refcounted)
sock_put(sk);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index ea53847b5b7e..8d785232b479 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -133,7 +133,8 @@ static int compute_score(struct sock *sk, struct net *net,
dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
if (!dev_match)
return -1;
- score++;
+ if (sk->sk_bound_dev_if)
+ score++;
if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
score++;
@@ -1303,7 +1304,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int addr_len = msg->msg_namelen;
bool connected = false;
int ulen = len;
- int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
+ int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
int err;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 647c0554d04c..40ca3c1e42a2 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -781,7 +781,7 @@ int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
if (nskb) {
struct llc_sap *sap = llc->sap;
- u8 *dmac = llc->daddr.mac;
+ const u8 *dmac = llc->daddr.mac;
if (llc->dev->flags & IFF_LOOPBACK)
dmac = llc->dev->dev_addr;
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index ad6547736c21..dde9bf08a593 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -80,7 +80,7 @@ out_free:
* establishment will inform to upper layer via calling it's confirm
* function and passing proper information.
*/
-int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap)
+int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac, u8 dsap)
{
int rc = -EISCONN;
struct llc_addr laddr, daddr;
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index b9ad087bcbd7..5a6466fc626a 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -56,7 +56,7 @@ int llc_mac_hdr_init(struct sk_buff *skb,
* package primitive as an event and send to SAP event handler
*/
int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
- unsigned char *dmac, unsigned char dsap)
+ const unsigned char *dmac, unsigned char dsap)
{
int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index a4eccb98220a..0ff490a73fae 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -26,7 +26,7 @@
#include <net/llc_c_st.h>
#include <net/llc_conn.h>
-static void llc_ui_format_mac(struct seq_file *seq, u8 *addr)
+static void llc_ui_format_mac(struct seq_file *seq, const u8 *addr)
{
seq_printf(seq, "%pM", addr);
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index efbefcbac3ac..7cab1cf09bf1 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -60,7 +60,10 @@ static struct mesh_table *mesh_table_alloc(void)
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
- rhashtable_init(&newtbl->rhead, &mesh_rht_params);
+ if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
+ kfree(newtbl);
+ return NULL;
+ }
return newtbl;
}
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 204830a55240..3fbd0b9ff913 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -2,6 +2,7 @@
/*
* Copyright 2012-2013, Marco Porsch <[email protected]>
* Copyright 2012-2013, cozybit Inc.
+ * Copyright (C) 2021 Intel Corporation
*/
#include "mesh.h"
@@ -588,7 +589,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
/* only transmit to PS STA with announced, non-zero awake window */
if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
- (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
+ (!elems->awake_window || !get_unaligned_le16(elems->awake_window)))
return;
if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index e5935e3d7a07..8c6416129d5b 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -392,10 +392,6 @@ static bool rate_control_send_low(struct ieee80211_sta *pubsta,
int mcast_rate;
bool use_basicrate = false;
- if (ieee80211_is_tx_data(txrc->skb) &&
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return false;
-
if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {
__rate_control_send_low(txrc->hw, sband, pubsta, info,
txrc->rate_idx_mask);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 8acc743559c3..fc5c608d02e2 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -4121,7 +4121,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
if (!bssid)
return false;
if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
- ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
+ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
+ !is_valid_ether_addr(hdr->addr2))
return false;
if (ieee80211_is_beacon(hdr->frame_control))
return true;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 36524101d11f..51b49f0d3ad4 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -514,6 +514,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->cparams.target = MS2TIME(20);
sta->cparams.interval = MS2TIME(100);
sta->cparams.ecn = true;
+ sta->cparams.ce_threshold_selector = 0;
+ sta->cparams.ce_threshold_mask = 0;
sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5c426b093ee2..a756a197c770 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2210,7 +2210,11 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
}
vht_mcs = iterator.this_arg[4] >> 4;
+ if (vht_mcs > 11)
+ vht_mcs = 0;
vht_nss = iterator.this_arg[4] & 0xF;
+ if (!vht_nss || vht_nss > 8)
+ vht_nss = 1;
break;
/*
@@ -3381,6 +3385,14 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
goto out;
+ /* If n == 2, the "while (*frag_tail)" loop above didn't execute
+ * and frag_tail should be &skb_shinfo(head)->frag_list.
+ * However, ieee80211_amsdu_prepare_head() can reallocate it.
+ * Reload frag_tail to have it pointing to the correct place.
+ */
+ if (n == 2)
+ frag_tail = &skb_shinfo(head)->frag_list;
+
/*
* Pad out the previous subframe to a multiple of 4 by adding the
* padding to the next one, that's being added. Note that head->len
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index bca47fad5a16..4eed23e27610 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -520,6 +520,9 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
return RX_DROP_UNUSABLE;
}
+ /* reload hdr - skb might have been reallocated */
+ hdr = (void *)rx->skb->data;
+
data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE;
@@ -749,6 +752,9 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
}
+ /* reload hdr - skb might have been reallocated */
+ hdr = (void *)rx->skb->data;
+
data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE;
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 323d3d2d986f..500ed1b81250 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -129,15 +129,14 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev_addr_set(dev, addr->sa_data);
sdata->wpan_dev.extended_addr = extended_addr;
/* update lowpan interface mac address when
* wpan mac has been changed
*/
if (sdata->wpan_dev.lowpan_dev)
- memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr,
- dev->addr_len);
+ dev_addr_set(sdata->wpan_dev.lowpan_dev, dev->dev_addr);
return mac802154_wpan_update_llsec(dev);
}
@@ -615,6 +614,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
unsigned char name_assign_type, enum nl802154_iftype type,
__le64 extended_addr)
{
+ u8 addr[IEEE802154_EXTENDED_ADDR_LEN];
struct net_device *ndev = NULL;
struct ieee802154_sub_if_data *sdata = NULL;
int ret;
@@ -638,11 +638,12 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
switch (type) {
case NL802154_IFTYPE_NODE:
ndev->type = ARPHRD_IEEE802154;
- if (ieee802154_is_valid_extended_unicast_addr(extended_addr))
- ieee802154_le64_to_be64(ndev->dev_addr, &extended_addr);
- else
- memcpy(ndev->dev_addr, ndev->perm_addr,
- IEEE802154_EXTENDED_ADDR_LEN);
+ if (ieee802154_is_valid_extended_unicast_addr(extended_addr)) {
+ ieee802154_le64_to_be64(addr, &extended_addr);
+ dev_addr_set(ndev, addr);
+ } else {
+ dev_addr_set(ndev, ndev->perm_addr);
+ }
break;
case NL802154_IFTYPE_MONITOR:
ndev->type = ARPHRD_IEEE802154_MONITOR;
diff --git a/net/mctp/Kconfig b/net/mctp/Kconfig
index 2cdf3d0a28c9..868c92272cbd 100644
--- a/net/mctp/Kconfig
+++ b/net/mctp/Kconfig
@@ -11,3 +11,8 @@ menuconfig MCTP
This option enables core MCTP support. For communicating with other
devices, you'll want to enable a driver for a specific hardware
channel.
+
+config MCTP_TEST
+ bool "MCTP core tests" if !KUNIT_ALL_TESTS
+ depends on MCTP=y && KUNIT=y
+ default KUNIT_ALL_TESTS
diff --git a/net/mctp/Makefile b/net/mctp/Makefile
index 0171333384d7..6cd55233e685 100644
--- a/net/mctp/Makefile
+++ b/net/mctp/Makefile
@@ -1,3 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MCTP) += mctp.o
mctp-objs := af_mctp.o device.o route.o neigh.o
+
+# tests
+obj-$(CONFIG_MCTP_TEST) += test/utils.o
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index a9526ac29dff..66a411d60b6c 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -16,6 +16,9 @@
#include <net/mctpdevice.h>
#include <net/sock.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mctp.h>
+
/* socket implementation */
static int mctp_release(struct socket *sock)
@@ -223,16 +226,61 @@ static const struct proto_ops mctp_dgram_ops = {
.sendpage = sock_no_sendpage,
};
+static void mctp_sk_expire_keys(struct timer_list *timer)
+{
+ struct mctp_sock *msk = container_of(timer, struct mctp_sock,
+ key_expiry);
+ struct net *net = sock_net(&msk->sk);
+ unsigned long next_expiry, flags;
+ struct mctp_sk_key *key;
+ struct hlist_node *tmp;
+ bool next_expiry_valid = false;
+
+ spin_lock_irqsave(&net->mctp.keys_lock, flags);
+
+ hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
+ spin_lock(&key->lock);
+
+ if (!time_after_eq(key->expiry, jiffies)) {
+ trace_mctp_key_release(key, MCTP_TRACE_KEY_TIMEOUT);
+ key->valid = false;
+ hlist_del_rcu(&key->hlist);
+ hlist_del_rcu(&key->sklist);
+ spin_unlock(&key->lock);
+ mctp_key_unref(key);
+ continue;
+ }
+
+ if (next_expiry_valid) {
+ if (time_before(key->expiry, next_expiry))
+ next_expiry = key->expiry;
+ } else {
+ next_expiry = key->expiry;
+ next_expiry_valid = true;
+ }
+ spin_unlock(&key->lock);
+ }
+
+ spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
+
+ if (next_expiry_valid)
+ mod_timer(timer, next_expiry);
+}
+
static int mctp_sk_init(struct sock *sk)
{
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
INIT_HLIST_HEAD(&msk->keys);
+ timer_setup(&msk->key_expiry, mctp_sk_expire_keys, 0);
return 0;
}
static void mctp_sk_close(struct sock *sk, long timeout)
{
+ struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
+
+ del_timer_sync(&msk->key_expiry);
sk_common_release(sk);
}
@@ -263,21 +311,23 @@ static void mctp_sk_unhash(struct sock *sk)
/* remove tag allocations */
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
- hlist_del_rcu(&key->sklist);
- hlist_del_rcu(&key->hlist);
+ hlist_del(&key->sklist);
+ hlist_del(&key->hlist);
- spin_lock(&key->reasm_lock);
+ trace_mctp_key_release(key, MCTP_TRACE_KEY_CLOSED);
+
+ spin_lock(&key->lock);
if (key->reasm_head)
kfree_skb(key->reasm_head);
key->reasm_head = NULL;
key->reasm_dead = true;
- spin_unlock(&key->reasm_lock);
+ key->valid = false;
+ spin_unlock(&key->lock);
- kfree_rcu(key, rcu);
+ /* key is no longer on the lookup lists, unref */
+ mctp_key_unref(key);
}
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
-
- synchronize_rcu();
}
static struct proto mctp_proto = {
@@ -385,7 +435,7 @@ static __exit void mctp_exit(void)
sock_unregister(PF_MCTP);
}
-module_init(mctp_init);
+subsys_initcall(mctp_init);
module_exit(mctp_exit);
MODULE_DESCRIPTION("MCTP core");
diff --git a/net/mctp/device.c b/net/mctp/device.c
index b9f38e765f61..3827d62f52c9 100644
--- a/net/mctp/device.c
+++ b/net/mctp/device.c
@@ -35,14 +35,6 @@ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->mctp_ptr);
}
-static void mctp_dev_destroy(struct mctp_dev *mdev)
-{
- struct net_device *dev = mdev->dev;
-
- dev_put(dev);
- kfree_rcu(mdev, rcu);
-}
-
static int mctp_fill_addrinfo(struct sk_buff *skb, struct netlink_callback *cb,
struct mctp_dev *mdev, mctp_eid_t eid)
{
@@ -255,6 +247,19 @@ static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
}
+void mctp_dev_hold(struct mctp_dev *mdev)
+{
+ refcount_inc(&mdev->refs);
+}
+
+void mctp_dev_put(struct mctp_dev *mdev)
+{
+ if (refcount_dec_and_test(&mdev->refs)) {
+ dev_put(mdev->dev);
+ kfree_rcu(mdev, rcu);
+ }
+}
+
static struct mctp_dev *mctp_add_dev(struct net_device *dev)
{
struct mctp_dev *mdev;
@@ -270,7 +275,9 @@ static struct mctp_dev *mctp_add_dev(struct net_device *dev)
mdev->net = mctp_default_net(dev_net(dev));
/* associate to net_device */
+ refcount_set(&mdev->refs, 1);
rcu_assign_pointer(dev->mctp_ptr, mdev);
+
dev_hold(dev);
mdev->dev = dev;
@@ -330,12 +337,26 @@ static int mctp_set_link_af(struct net_device *dev, const struct nlattr *attr,
return 0;
}
+/* Matches netdev types that should have MCTP handling */
+static bool mctp_known(struct net_device *dev)
+{
+ /* only register specific types (inc. NONE for TUN devices) */
+ return dev->type == ARPHRD_MCTP ||
+ dev->type == ARPHRD_LOOPBACK ||
+ dev->type == ARPHRD_NONE;
+}
+
static void mctp_unregister(struct net_device *dev)
{
struct mctp_dev *mdev;
mdev = mctp_dev_get_rtnl(dev);
-
+ if (mctp_known(dev) != (bool)mdev) {
+ // Sanity check, should match what was set in mctp_register
+ netdev_warn(dev, "%s: mdev pointer %d but type (%d) match is %d",
+ __func__, (bool)mdev, mctp_known(dev), dev->type);
+ return;
+ }
if (!mdev)
return;
@@ -345,7 +366,7 @@ static void mctp_unregister(struct net_device *dev)
mctp_neigh_remove_dev(mdev);
kfree(mdev->addrs);
- mctp_dev_destroy(mdev);
+ mctp_dev_put(mdev);
}
static int mctp_register(struct net_device *dev)
@@ -353,11 +374,17 @@ static int mctp_register(struct net_device *dev)
struct mctp_dev *mdev;
/* Already registered? */
- if (rtnl_dereference(dev->mctp_ptr))
+ mdev = rtnl_dereference(dev->mctp_ptr);
+
+ if (mdev) {
+ if (!mctp_known(dev))
+ netdev_warn(dev, "%s: mctp_dev set for unknown type %d",
+ __func__, dev->type);
return 0;
+ }
- /* only register specific types; MCTP-specific and loopback for now */
- if (dev->type != ARPHRD_MCTP && dev->type != ARPHRD_LOOPBACK)
+ /* only register specific types */
+ if (!mctp_known(dev))
return 0;
mdev = mctp_add_dev(dev);
diff --git a/net/mctp/neigh.c b/net/mctp/neigh.c
index 90ed2f02d1fb..5cc042121493 100644
--- a/net/mctp/neigh.c
+++ b/net/mctp/neigh.c
@@ -47,7 +47,7 @@ static int mctp_neigh_add(struct mctp_dev *mdev, mctp_eid_t eid,
}
INIT_LIST_HEAD(&neigh->list);
neigh->dev = mdev;
- dev_hold(neigh->dev->dev);
+ mctp_dev_hold(neigh->dev);
neigh->eid = eid;
neigh->source = source;
memcpy(neigh->ha, lladdr, lladdr_len);
@@ -63,7 +63,7 @@ static void __mctp_neigh_free(struct rcu_head *rcu)
{
struct mctp_neigh *neigh = container_of(rcu, struct mctp_neigh, rcu);
- dev_put(neigh->dev->dev);
+ mctp_dev_put(neigh->dev);
kfree(neigh);
}
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 5ca186d53cb0..82fb5ae524f6 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -11,6 +11,7 @@
*/
#include <linux/idr.h>
+#include <linux/kconfig.h>
#include <linux/mctp.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
@@ -23,7 +24,10 @@
#include <net/netlink.h>
#include <net/sock.h>
+#include <trace/events/mctp.h>
+
static const unsigned int mctp_message_maxlen = 64 * 1024;
+static const unsigned long mctp_key_lifetime = 6 * CONFIG_HZ;
/* route output callbacks */
static int mctp_route_discard(struct mctp_route *route, struct sk_buff *skb)
@@ -83,25 +87,43 @@ static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
return true;
}
+/* returns a key (with key->lock held, and refcounted), or NULL if no such
+ * key exists.
+ */
static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
- mctp_eid_t peer)
+ mctp_eid_t peer,
+ unsigned long *irqflags)
+ __acquires(&key->lock)
{
struct mctp_sk_key *key, *ret;
+ unsigned long flags;
struct mctp_hdr *mh;
u8 tag;
- WARN_ON(!rcu_read_lock_held());
-
mh = mctp_hdr(skb);
tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
ret = NULL;
+ spin_lock_irqsave(&net->mctp.keys_lock, flags);
+
+ hlist_for_each_entry(key, &net->mctp.keys, hlist) {
+ if (!mctp_key_match(key, mh->dest, peer, tag))
+ continue;
- hlist_for_each_entry_rcu(key, &net->mctp.keys, hlist) {
- if (mctp_key_match(key, mh->dest, peer, tag)) {
+ spin_lock(&key->lock);
+ if (key->valid) {
+ refcount_inc(&key->refs);
ret = key;
break;
}
+ spin_unlock(&key->lock);
+ }
+
+ if (ret) {
+ spin_unlock(&net->mctp.keys_lock);
+ *irqflags = flags;
+ } else {
+ spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
}
return ret;
@@ -121,11 +143,19 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
key->local_addr = local;
key->tag = tag;
key->sk = &msk->sk;
- spin_lock_init(&key->reasm_lock);
+ key->valid = true;
+ spin_lock_init(&key->lock);
+ refcount_set(&key->refs, 1);
return key;
}
+void mctp_key_unref(struct mctp_sk_key *key)
+{
+ if (refcount_dec_and_test(&key->refs))
+ kfree(key);
+}
+
static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
{
struct net *net = sock_net(&msk->sk);
@@ -138,12 +168,20 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
key->tag)) {
- rc = -EEXIST;
- break;
+ spin_lock(&tmp->lock);
+ if (tmp->valid)
+ rc = -EEXIST;
+ spin_unlock(&tmp->lock);
+ if (rc)
+ break;
}
}
if (!rc) {
+ refcount_inc(&key->refs);
+ key->expiry = jiffies + mctp_key_lifetime;
+ timer_reduce(&msk->key_expiry, key->expiry);
+
hlist_add_head(&key->hlist, &net->mctp.keys);
hlist_add_head(&key->sklist, &msk->keys);
}
@@ -153,28 +191,35 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
return rc;
}
-/* Must be called with key->reasm_lock, which it will release. Will schedule
- * the key for an RCU free.
+/* We're done with the key; unset valid and remove from lists. There may still
+ * be outstanding refs on the key though...
*/
static void __mctp_key_unlock_drop(struct mctp_sk_key *key, struct net *net,
unsigned long flags)
- __releases(&key->reasm_lock)
+ __releases(&key->lock)
{
struct sk_buff *skb;
skb = key->reasm_head;
key->reasm_head = NULL;
key->reasm_dead = true;
- spin_unlock_irqrestore(&key->reasm_lock, flags);
+ key->valid = false;
+ spin_unlock_irqrestore(&key->lock, flags);
spin_lock_irqsave(&net->mctp.keys_lock, flags);
- hlist_del_rcu(&key->hlist);
- hlist_del_rcu(&key->sklist);
+ hlist_del(&key->hlist);
+ hlist_del(&key->sklist);
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
- kfree_rcu(key, rcu);
+
+ /* one unref for the lists */
+ mctp_key_unref(key);
+
+ /* and one for the local reference */
+ mctp_key_unref(key);
if (skb)
kfree_skb(skb);
+
}
static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
@@ -248,8 +293,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
rcu_read_lock();
- /* lookup socket / reasm context, exactly matching (src,dest,tag) */
- key = mctp_lookup_key(net, skb, mh->src);
+ /* lookup socket / reasm context, exactly matching (src,dest,tag).
+ * we hold a ref on the key, and key->lock held.
+ */
+ key = mctp_lookup_key(net, skb, mh->src, &f);
if (flags & MCTP_HDR_FLAG_SOM) {
if (key) {
@@ -260,10 +307,12 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* key for reassembly - we'll create a more specific
* one for future packets if required (ie, !EOM).
*/
- key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY);
+ key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
if (key) {
msk = container_of(key->sk,
struct mctp_sock, sk);
+ spin_unlock_irqrestore(&key->lock, f);
+ mctp_key_unref(key);
key = NULL;
}
}
@@ -282,11 +331,13 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
if (flags & MCTP_HDR_FLAG_EOM) {
sock_queue_rcv_skb(&msk->sk, skb);
if (key) {
- spin_lock_irqsave(&key->reasm_lock, f);
/* we've hit a pending reassembly; not much we
* can do but drop it
*/
+ trace_mctp_key_release(key,
+ MCTP_TRACE_KEY_REPLIED);
__mctp_key_unlock_drop(key, net, f);
+ key = NULL;
}
rc = 0;
goto out_unlock;
@@ -303,7 +354,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
goto out_unlock;
}
- /* we can queue without the reasm lock here, as the
+ /* we can queue without the key lock here, as the
* key isn't observable yet
*/
mctp_frag_queue(key, skb);
@@ -318,17 +369,22 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
if (rc)
kfree(key);
- } else {
- /* existing key: start reassembly */
- spin_lock_irqsave(&key->reasm_lock, f);
+ trace_mctp_key_acquire(key);
+ /* we don't need to release key->lock on exit */
+ mctp_key_unref(key);
+ key = NULL;
+
+ } else {
if (key->reasm_head || key->reasm_dead) {
/* duplicate start? drop everything */
+ trace_mctp_key_release(key,
+ MCTP_TRACE_KEY_INVALIDATED);
__mctp_key_unlock_drop(key, net, f);
rc = -EEXIST;
+ key = NULL;
} else {
rc = mctp_frag_queue(key, skb);
- spin_unlock_irqrestore(&key->reasm_lock, f);
}
}
@@ -337,8 +393,6 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* using the message-specific key
*/
- spin_lock_irqsave(&key->reasm_lock, f);
-
/* we need to be continuing an existing reassembly... */
if (!key->reasm_head)
rc = -EINVAL;
@@ -351,9 +405,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
if (!rc && flags & MCTP_HDR_FLAG_EOM) {
sock_queue_rcv_skb(key->sk, key->reasm_head);
key->reasm_head = NULL;
+ trace_mctp_key_release(key, MCTP_TRACE_KEY_REPLIED);
__mctp_key_unlock_drop(key, net, f);
- } else {
- spin_unlock_irqrestore(&key->reasm_lock, f);
+ key = NULL;
}
} else {
@@ -363,6 +417,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
out_unlock:
rcu_read_unlock();
+ if (key) {
+ spin_unlock_irqrestore(&key->lock, f);
+ mctp_key_unref(key);
+ }
out:
if (rc)
kfree_skb(skb);
@@ -412,7 +470,7 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
static void mctp_route_release(struct mctp_route *rt)
{
if (refcount_dec_and_test(&rt->refs)) {
- dev_put(rt->dev->dev);
+ mctp_dev_put(rt->dev);
kfree_rcu(rt, rcu);
}
}
@@ -454,11 +512,15 @@ static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key,
lockdep_assert_held(&mns->keys_lock);
+ key->expiry = jiffies + mctp_key_lifetime;
+ timer_reduce(&msk->key_expiry, key->expiry);
+
/* we hold the net->key_lock here, allowing updates to both
* then net and sk
*/
hlist_add_head_rcu(&key->hlist, &mns->keys);
hlist_add_head_rcu(&key->sklist, &msk->keys);
+ refcount_inc(&key->refs);
}
/* Allocate a locally-owned tag value for (saddr, daddr), and reserve
@@ -474,6 +536,10 @@ static int mctp_alloc_local_tag(struct mctp_sock *msk,
int rc = -EAGAIN;
u8 tagbits;
+ /* for NULL destination EIDs, we may get a response from any peer */
+ if (daddr == MCTP_ADDR_NULL)
+ daddr = MCTP_ADDR_ANY;
+
/* be optimistic, alloc now */
key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL);
if (!key)
@@ -488,14 +554,26 @@ static int mctp_alloc_local_tag(struct mctp_sock *msk,
* tags. If we find a conflict, clear that bit from tagbits
*/
hlist_for_each_entry(tmp, &mns->keys, hlist) {
+ /* We can check the lookup fields (*_addr, tag) without the
+ * lock held, they don't change over the lifetime of the key.
+ */
+
/* if we don't own the tag, it can't conflict */
if (tmp->tag & MCTP_HDR_FLAG_TO)
continue;
- if ((tmp->peer_addr == daddr ||
- tmp->peer_addr == MCTP_ADDR_ANY) &&
- tmp->local_addr == saddr)
+ if (!((tmp->peer_addr == daddr ||
+ tmp->peer_addr == MCTP_ADDR_ANY) &&
+ tmp->local_addr == saddr))
+ continue;
+
+ spin_lock(&tmp->lock);
+ /* key must still be valid. If we find a match, clear the
+ * potential tag value
+ */
+ if (tmp->valid)
tagbits &= ~(1 << tmp->tag);
+ spin_unlock(&tmp->lock);
if (!tagbits)
break;
@@ -504,7 +582,12 @@ static int mctp_alloc_local_tag(struct mctp_sock *msk,
if (tagbits) {
key->tag = __ffs(tagbits);
mctp_reserve_tag(net, key, msk);
+ trace_mctp_key_acquire(key);
+
*tagp = key->tag;
+ /* done with the key in this scope */
+ mctp_key_unref(key);
+ key = NULL;
rc = 0;
}
@@ -552,6 +635,20 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
return rt;
}
+static struct mctp_route *mctp_route_lookup_null(struct net *net,
+ struct net_device *dev)
+{
+ struct mctp_route *rt;
+
+ list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
+ if (rt->dev->dev == dev && rt->type == RTN_LOCAL &&
+ refcount_inc_not_zero(&rt->refs))
+ return rt;
+ }
+
+ return NULL;
+}
+
/* sends a skb to rt and releases the route. */
int mctp_do_route(struct mctp_route *rt, struct sk_buff *skb)
{
@@ -741,7 +838,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
rt->max = daddr_start + daddr_extent;
rt->mtu = mtu;
rt->dev = mdev;
- dev_hold(rt->dev->dev);
+ mctp_dev_hold(rt->dev);
rt->type = type;
rt->output = rtfn;
@@ -821,13 +918,18 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
struct net_device *orig_dev)
{
struct net *net = dev_net(dev);
+ struct mctp_dev *mdev;
struct mctp_skb_cb *cb;
struct mctp_route *rt;
struct mctp_hdr *mh;
- /* basic non-data sanity checks */
- if (dev->type != ARPHRD_MCTP)
+ rcu_read_lock();
+ mdev = __mctp_dev_get(dev);
+ rcu_read_unlock();
+ if (!mdev) {
+ /* basic non-data sanity checks */
goto err_drop;
+ }
if (!pskb_may_pull(skb, sizeof(struct mctp_hdr)))
goto err_drop;
@@ -841,11 +943,14 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
goto err_drop;
cb = __mctp_cb(skb);
- rcu_read_lock();
- cb->net = READ_ONCE(__mctp_dev_get(dev)->net);
- rcu_read_unlock();
+ cb->net = READ_ONCE(mdev->net);
rt = mctp_route_lookup(net, cb->net, mh->dest);
+
+ /* NULL EID, but addressed to our physical address */
+ if (!rt && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST)
+ rt = mctp_route_lookup_null(net, dev);
+
if (!rt)
goto err_drop;
@@ -926,10 +1031,15 @@ static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
}
+static const struct nla_policy rta_metrics_policy[RTAX_MAX + 1] = {
+ [RTAX_MTU] = { .type = NLA_U32 },
+};
+
static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[RTA_MAX + 1];
+ struct nlattr *tbx[RTAX_MAX + 1];
mctp_eid_t daddr_start;
struct mctp_dev *mdev;
struct rtmsg *rtm;
@@ -946,8 +1056,15 @@ static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
- /* TODO: parse mtu from nlparse */
mtu = 0;
+ if (tb[RTA_METRICS]) {
+ rc = nla_parse_nested(tbx, RTAX_MAX, tb[RTA_METRICS],
+ rta_metrics_policy, NULL);
+ if (rc < 0)
+ return rc;
+ if (tbx[RTAX_MTU])
+ mtu = nla_get_u32(tbx[RTAX_MTU]);
+ }
if (rtm->rtm_type != RTN_UNICAST)
return -EINVAL;
@@ -1116,3 +1233,7 @@ void __exit mctp_routes_exit(void)
rtnl_unregister(PF_MCTP, RTM_GETROUTE);
dev_remove_pack(&mctp_packet_type);
}
+
+#if IS_ENABLED(CONFIG_MCTP_TEST)
+#include "test/route-test.c"
+#endif
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
new file mode 100644
index 000000000000..36fac3daf86a
--- /dev/null
+++ b/net/mctp/test/route-test.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+
+#include "utils.h"
+
+struct mctp_test_route {
+ struct mctp_route rt;
+ struct sk_buff_head pkts;
+};
+
+static int mctp_test_route_output(struct mctp_route *rt, struct sk_buff *skb)
+{
+ struct mctp_test_route *test_rt = container_of(rt, struct mctp_test_route, rt);
+
+ skb_queue_tail(&test_rt->pkts, skb);
+
+ return 0;
+}
+
+/* local version of mctp_route_alloc() */
+static struct mctp_test_route *mctp_route_test_alloc(void)
+{
+ struct mctp_test_route *rt;
+
+ rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ return NULL;
+
+ INIT_LIST_HEAD(&rt->rt.list);
+ refcount_set(&rt->rt.refs, 1);
+ rt->rt.output = mctp_test_route_output;
+
+ skb_queue_head_init(&rt->pkts);
+
+ return rt;
+}
+
+static struct mctp_test_route *mctp_test_create_route(struct net *net,
+ struct mctp_dev *dev,
+ mctp_eid_t eid,
+ unsigned int mtu)
+{
+ struct mctp_test_route *rt;
+
+ rt = mctp_route_test_alloc();
+ if (!rt)
+ return NULL;
+
+ rt->rt.min = eid;
+ rt->rt.max = eid;
+ rt->rt.mtu = mtu;
+ rt->rt.type = RTN_UNSPEC;
+ if (dev)
+ mctp_dev_hold(dev);
+ rt->rt.dev = dev;
+
+ list_add_rcu(&rt->rt.list, &net->mctp.routes);
+
+ return rt;
+}
+
+static void mctp_test_route_destroy(struct kunit *test,
+ struct mctp_test_route *rt)
+{
+ unsigned int refs;
+
+ rtnl_lock();
+ list_del_rcu(&rt->rt.list);
+ rtnl_unlock();
+
+ skb_queue_purge(&rt->pkts);
+ if (rt->rt.dev)
+ mctp_dev_put(rt->rt.dev);
+
+ refs = refcount_read(&rt->rt.refs);
+ KUNIT_ASSERT_EQ_MSG(test, refs, 1, "route ref imbalance");
+
+ kfree_rcu(&rt->rt, rcu);
+}
+
+static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
+ unsigned int data_len)
+{
+ size_t hdr_len = sizeof(*hdr);
+ struct sk_buff *skb;
+ unsigned int i;
+ u8 *buf;
+
+ skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
+
+ buf = skb_put(skb, data_len);
+ for (i = 0; i < data_len; i++)
+ buf[i] = i & 0xff;
+
+ return skb;
+}
+
+static struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
+ const void *data,
+ size_t data_len)
+{
+ size_t hdr_len = sizeof(*hdr);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ return skb;
+}
+
+#define mctp_test_create_skb_data(h, d) \
+ __mctp_test_create_skb_data(h, d, sizeof(*d))
+
+struct mctp_frag_test {
+ unsigned int mtu;
+ unsigned int msgsize;
+ unsigned int n_frags;
+};
+
+static void mctp_test_fragment(struct kunit *test)
+{
+ const struct mctp_frag_test *params;
+ int rc, i, n, mtu, msgsize;
+ struct mctp_test_route *rt;
+ struct sk_buff *skb;
+ struct mctp_hdr hdr;
+ u8 seq;
+
+ params = test->param_value;
+ mtu = params->mtu;
+ msgsize = params->msgsize;
+
+ hdr.ver = 1;
+ hdr.src = 8;
+ hdr.dest = 10;
+ hdr.flags_seq_tag = MCTP_HDR_FLAG_TO;
+
+ skb = mctp_test_create_skb(&hdr, msgsize);
+ KUNIT_ASSERT_TRUE(test, skb);
+
+ rt = mctp_test_create_route(&init_net, NULL, 10, mtu);
+ KUNIT_ASSERT_TRUE(test, rt);
+
+ /* The refcount would usually be incremented as part of a route lookup,
+ * but we're setting the route directly here.
+ */
+ refcount_inc(&rt->rt.refs);
+
+ rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER);
+ KUNIT_EXPECT_FALSE(test, rc);
+
+ n = rt->pkts.qlen;
+
+ KUNIT_EXPECT_EQ(test, n, params->n_frags);
+
+ for (i = 0;; i++) {
+ struct mctp_hdr *hdr2;
+ struct sk_buff *skb2;
+ u8 tag_mask, seq2;
+ bool first, last;
+
+ first = i == 0;
+ last = i == (n - 1);
+
+ skb2 = skb_dequeue(&rt->pkts);
+
+ if (!skb2)
+ break;
+
+ hdr2 = mctp_hdr(skb2);
+
+ tag_mask = MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO;
+
+ KUNIT_EXPECT_EQ(test, hdr2->ver, hdr.ver);
+ KUNIT_EXPECT_EQ(test, hdr2->src, hdr.src);
+ KUNIT_EXPECT_EQ(test, hdr2->dest, hdr.dest);
+ KUNIT_EXPECT_EQ(test, hdr2->flags_seq_tag & tag_mask,
+ hdr.flags_seq_tag & tag_mask);
+
+ KUNIT_EXPECT_EQ(test,
+ !!(hdr2->flags_seq_tag & MCTP_HDR_FLAG_SOM), first);
+ KUNIT_EXPECT_EQ(test,
+ !!(hdr2->flags_seq_tag & MCTP_HDR_FLAG_EOM), last);
+
+ seq2 = (hdr2->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) &
+ MCTP_HDR_SEQ_MASK;
+
+ if (first) {
+ seq = seq2;
+ } else {
+ seq++;
+ KUNIT_EXPECT_EQ(test, seq2, seq & MCTP_HDR_SEQ_MASK);
+ }
+
+ if (!last)
+ KUNIT_EXPECT_EQ(test, skb2->len, mtu);
+ else
+ KUNIT_EXPECT_LE(test, skb2->len, mtu);
+
+ kfree_skb(skb2);
+ }
+
+ mctp_test_route_destroy(test, rt);
+}
+
+static const struct mctp_frag_test mctp_frag_tests[] = {
+ {.mtu = 68, .msgsize = 63, .n_frags = 1},
+ {.mtu = 68, .msgsize = 64, .n_frags = 1},
+ {.mtu = 68, .msgsize = 65, .n_frags = 2},
+ {.mtu = 68, .msgsize = 66, .n_frags = 2},
+ {.mtu = 68, .msgsize = 127, .n_frags = 2},
+ {.mtu = 68, .msgsize = 128, .n_frags = 2},
+ {.mtu = 68, .msgsize = 129, .n_frags = 3},
+ {.mtu = 68, .msgsize = 130, .n_frags = 3},
+};
+
+static void mctp_frag_test_to_desc(const struct mctp_frag_test *t, char *desc)
+{
+ sprintf(desc, "mtu %d len %d -> %d frags",
+ t->msgsize, t->mtu, t->n_frags);
+}
+
+KUNIT_ARRAY_PARAM(mctp_frag, mctp_frag_tests, mctp_frag_test_to_desc);
+
+struct mctp_rx_input_test {
+ struct mctp_hdr hdr;
+ bool input;
+};
+
+static void mctp_test_rx_input(struct kunit *test)
+{
+ const struct mctp_rx_input_test *params;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+
+ params = test->param_value;
+
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+
+ skb = mctp_test_create_skb(&params->hdr, 1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ __mctp_cb(skb);
+
+ mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL);
+
+ KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input);
+
+ mctp_test_route_destroy(test, rt);
+ mctp_test_destroy_dev(dev);
+}
+
+#define RX_HDR(_ver, _src, _dest, _fst) \
+ { .ver = _ver, .src = _src, .dest = _dest, .flags_seq_tag = _fst }
+
+/* we have a route for EID 8 only */
+static const struct mctp_rx_input_test mctp_rx_input_tests[] = {
+ { .hdr = RX_HDR(1, 10, 8, 0), .input = true },
+ { .hdr = RX_HDR(1, 10, 9, 0), .input = false }, /* no input route */
+ { .hdr = RX_HDR(2, 10, 8, 0), .input = false }, /* invalid version */
+};
+
+static void mctp_rx_input_test_to_desc(const struct mctp_rx_input_test *t,
+ char *desc)
+{
+ sprintf(desc, "{%x,%x,%x,%x}", t->hdr.ver, t->hdr.src, t->hdr.dest,
+ t->hdr.flags_seq_tag);
+}
+
+KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests,
+ mctp_rx_input_test_to_desc);
+
+/* set up a local dev, route on EID 8, and a socket listening on type 0 */
+static void __mctp_route_test_init(struct kunit *test,
+ struct mctp_test_dev **devp,
+ struct mctp_test_route **rtp,
+ struct socket **sockp)
+{
+ struct sockaddr_mctp addr;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct socket *sock;
+ int rc;
+
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+
+ rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ addr.smctp_family = AF_MCTP;
+ addr.smctp_network = MCTP_NET_ANY;
+ addr.smctp_addr.s_addr = 8;
+ addr.smctp_type = 0;
+ rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ *rtp = rt;
+ *devp = dev;
+ *sockp = sock;
+}
+
+static void __mctp_route_test_fini(struct kunit *test,
+ struct mctp_test_dev *dev,
+ struct mctp_test_route *rt,
+ struct socket *sock)
+{
+ sock_release(sock);
+ mctp_test_route_destroy(test, rt);
+ mctp_test_destroy_dev(dev);
+}
+
+struct mctp_route_input_sk_test {
+ struct mctp_hdr hdr;
+ u8 type;
+ bool deliver;
+};
+
+static void mctp_test_route_input_sk(struct kunit *test)
+{
+ const struct mctp_route_input_sk_test *params;
+ struct sk_buff *skb, *skb2;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct socket *sock;
+ int rc;
+
+ params = test->param_value;
+
+ __mctp_route_test_init(test, &dev, &rt, &sock);
+
+ skb = mctp_test_create_skb_data(&params->hdr, &params->type);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ skb->dev = dev->ndev;
+ __mctp_cb(skb);
+
+ rc = mctp_route_input(&rt->rt, skb);
+
+ if (params->deliver) {
+ KUNIT_EXPECT_EQ(test, rc, 0);
+
+ skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
+ KUNIT_EXPECT_EQ(test, skb->len, 1);
+
+ skb_free_datagram(sock->sk, skb2);
+
+ } else {
+ KUNIT_EXPECT_NE(test, rc, 0);
+ skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+ KUNIT_EXPECT_PTR_EQ(test, skb2, NULL);
+ }
+
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+#define FL_S (MCTP_HDR_FLAG_SOM)
+#define FL_E (MCTP_HDR_FLAG_EOM)
+#define FL_T (MCTP_HDR_FLAG_TO)
+
+static const struct mctp_route_input_sk_test mctp_route_input_sk_tests[] = {
+ { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_T), .type = 0, .deliver = true },
+ { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_T), .type = 1, .deliver = false },
+ { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E), .type = 0, .deliver = false },
+ { .hdr = RX_HDR(1, 10, 8, FL_E | FL_T), .type = 0, .deliver = false },
+ { .hdr = RX_HDR(1, 10, 8, FL_T), .type = 0, .deliver = false },
+ { .hdr = RX_HDR(1, 10, 8, 0), .type = 0, .deliver = false },
+};
+
+static void mctp_route_input_sk_to_desc(const struct mctp_route_input_sk_test *t,
+ char *desc)
+{
+ sprintf(desc, "{%x,%x,%x,%x} type %d", t->hdr.ver, t->hdr.src,
+ t->hdr.dest, t->hdr.flags_seq_tag, t->type);
+}
+
+KUNIT_ARRAY_PARAM(mctp_route_input_sk, mctp_route_input_sk_tests,
+ mctp_route_input_sk_to_desc);
+
+struct mctp_route_input_sk_reasm_test {
+ const char *name;
+ struct mctp_hdr hdrs[4];
+ int n_hdrs;
+ int rx_len;
+};
+
+static void mctp_test_route_input_sk_reasm(struct kunit *test)
+{
+ const struct mctp_route_input_sk_reasm_test *params;
+ struct sk_buff *skb, *skb2;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct socket *sock;
+ int i, rc;
+ u8 c;
+
+ params = test->param_value;
+
+ __mctp_route_test_init(test, &dev, &rt, &sock);
+
+ for (i = 0; i < params->n_hdrs; i++) {
+ c = i;
+ skb = mctp_test_create_skb_data(&params->hdrs[i], &c);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ skb->dev = dev->ndev;
+ __mctp_cb(skb);
+
+ rc = mctp_route_input(&rt->rt, skb);
+ }
+
+ skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+
+ if (params->rx_len) {
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
+ KUNIT_EXPECT_EQ(test, skb2->len, params->rx_len);
+ skb_free_datagram(sock->sk, skb2);
+
+ } else {
+ KUNIT_EXPECT_PTR_EQ(test, skb2, NULL);
+ }
+
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+#define RX_FRAG(f, s) RX_HDR(1, 10, 8, FL_T | (f) | ((s) << MCTP_HDR_SEQ_SHIFT))
+
+static const struct mctp_route_input_sk_reasm_test mctp_route_input_sk_reasm_tests[] = {
+ {
+ .name = "single packet",
+ .hdrs = {
+ RX_FRAG(FL_S | FL_E, 0),
+ },
+ .n_hdrs = 1,
+ .rx_len = 1,
+ },
+ {
+ .name = "single packet, offset seq",
+ .hdrs = {
+ RX_FRAG(FL_S | FL_E, 1),
+ },
+ .n_hdrs = 1,
+ .rx_len = 1,
+ },
+ {
+ .name = "start & end packets",
+ .hdrs = {
+ RX_FRAG(FL_S, 0),
+ RX_FRAG(FL_E, 1),
+ },
+ .n_hdrs = 2,
+ .rx_len = 2,
+ },
+ {
+ .name = "start & end packets, offset seq",
+ .hdrs = {
+ RX_FRAG(FL_S, 1),
+ RX_FRAG(FL_E, 2),
+ },
+ .n_hdrs = 2,
+ .rx_len = 2,
+ },
+ {
+ .name = "start & end packets, out of order",
+ .hdrs = {
+ RX_FRAG(FL_E, 1),
+ RX_FRAG(FL_S, 0),
+ },
+ .n_hdrs = 2,
+ .rx_len = 0,
+ },
+ {
+ .name = "start, middle & end packets",
+ .hdrs = {
+ RX_FRAG(FL_S, 0),
+ RX_FRAG(0, 1),
+ RX_FRAG(FL_E, 2),
+ },
+ .n_hdrs = 3,
+ .rx_len = 3,
+ },
+ {
+ .name = "missing seq",
+ .hdrs = {
+ RX_FRAG(FL_S, 0),
+ RX_FRAG(FL_E, 2),
+ },
+ .n_hdrs = 2,
+ .rx_len = 0,
+ },
+ {
+ .name = "seq wrap",
+ .hdrs = {
+ RX_FRAG(FL_S, 3),
+ RX_FRAG(FL_E, 0),
+ },
+ .n_hdrs = 2,
+ .rx_len = 2,
+ },
+};
+
+static void mctp_route_input_sk_reasm_to_desc(
+ const struct mctp_route_input_sk_reasm_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(mctp_route_input_sk_reasm, mctp_route_input_sk_reasm_tests,
+ mctp_route_input_sk_reasm_to_desc);
+
+static struct kunit_case mctp_test_cases[] = {
+ KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params),
+ KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params),
+ KUNIT_CASE_PARAM(mctp_test_route_input_sk, mctp_route_input_sk_gen_params),
+ KUNIT_CASE_PARAM(mctp_test_route_input_sk_reasm,
+ mctp_route_input_sk_reasm_gen_params),
+ {}
+};
+
+static struct kunit_suite mctp_test_suite = {
+ .name = "mctp",
+ .test_cases = mctp_test_cases,
+};
+
+kunit_test_suite(mctp_test_suite);
diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
new file mode 100644
index 000000000000..cc6b8803aa9d
--- /dev/null
+++ b/net/mctp/test/utils.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/netdevice.h>
+#include <linux/mctp.h>
+#include <linux/if_arp.h>
+
+#include <net/mctpdevice.h>
+#include <net/pkt_sched.h>
+
+#include "utils.h"
+
+static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ kfree(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops mctp_test_netdev_ops = {
+ .ndo_start_xmit = mctp_test_dev_tx,
+};
+
+static void mctp_test_dev_setup(struct net_device *ndev)
+{
+ ndev->type = ARPHRD_MCTP;
+ ndev->mtu = MCTP_DEV_TEST_MTU;
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ ndev->flags = IFF_NOARP;
+ ndev->netdev_ops = &mctp_test_netdev_ops;
+ ndev->needs_free_netdev = true;
+}
+
+struct mctp_test_dev *mctp_test_create_dev(void)
+{
+ struct mctp_test_dev *dev;
+ struct net_device *ndev;
+ int rc;
+
+ ndev = alloc_netdev(sizeof(*dev), "mctptest%d", NET_NAME_ENUM,
+ mctp_test_dev_setup);
+ if (!ndev)
+ return NULL;
+
+ dev = netdev_priv(ndev);
+ dev->ndev = ndev;
+
+ rc = register_netdev(ndev);
+ if (rc) {
+ free_netdev(ndev);
+ return NULL;
+ }
+
+ rcu_read_lock();
+ dev->mdev = __mctp_dev_get(ndev);
+ mctp_dev_hold(dev->mdev);
+ rcu_read_unlock();
+
+ return dev;
+}
+
+void mctp_test_destroy_dev(struct mctp_test_dev *dev)
+{
+ mctp_dev_put(dev->mdev);
+ unregister_netdev(dev->ndev);
+}
diff --git a/net/mctp/test/utils.h b/net/mctp/test/utils.h
new file mode 100644
index 000000000000..df6aa1c03440
--- /dev/null
+++ b/net/mctp/test/utils.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NET_MCTP_TEST_UTILS_H
+#define __NET_MCTP_TEST_UTILS_H
+
+#include <kunit/test.h>
+
+#define MCTP_DEV_TEST_MTU 68
+
+struct mctp_test_dev {
+ struct net_device *ndev;
+ struct mctp_dev *mdev;
+};
+
+struct mctp_test_dev;
+
+struct mctp_test_dev *mctp_test_create_dev(void);
+void mctp_test_destroy_dev(struct mctp_test_dev *dev);
+
+#endif /* __NET_MCTP_TEST_UTILS_H */
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index b21ff9be04c6..3240b72271a7 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -72,6 +72,7 @@ bool mptcp_mib_alloc(struct net *net)
void mptcp_seq_show(struct seq_file *seq)
{
+ unsigned long sum[ARRAY_SIZE(mptcp_snmp_list) - 1];
struct net *net = seq->private;
int i;
@@ -81,17 +82,13 @@ void mptcp_seq_show(struct seq_file *seq)
seq_puts(seq, "\nMPTcpExt:");
- if (!net->mib.mptcp_statistics) {
- for (i = 0; mptcp_snmp_list[i].name; i++)
- seq_puts(seq, " 0");
-
- seq_putc(seq, '\n');
- return;
- }
+ memset(sum, 0, sizeof(sum));
+ if (net->mib.mptcp_statistics)
+ snmp_get_cpu_field_batch(sum, mptcp_snmp_list,
+ net->mib.mptcp_statistics);
for (i = 0; mptcp_snmp_list[i].name; i++)
- seq_printf(seq, " %lu",
- snmp_fold_field(net->mib.mptcp_statistics,
- mptcp_snmp_list[i].entry));
+ seq_printf(seq, " %lu", sum[i]);
+
seq_putc(seq, '\n');
}
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index fdf0c1f15a65..f44125dd6697 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -36,7 +36,7 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
struct sock *sk;
net = sock_net(in_skb->sk);
- msk = mptcp_token_get_sock(req->id.idiag_cookie[0]);
+ msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]);
if (!msk)
goto out_nosk;
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index c41273cefc51..422f4acfb3e6 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -748,9 +748,7 @@ static bool mptcp_established_options_mp_prio(struct sock *sk,
/* can't send MP_PRIO with MPC, as they share the same option space:
* 'backup'. Also it makes no sense at all
*/
- if (!subflow->send_mp_prio ||
- ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
- OPTION_MPTCP_MPC_ACK) & opts->suboptions))
+ if (!subflow->send_mp_prio || (opts->suboptions & OPTIONS_MPTCP_MPC))
return false;
/* account for the trailing 'nop' option */
@@ -1019,11 +1017,9 @@ static void ack_update_msk(struct mptcp_sock *msk,
old_snd_una = msk->snd_una;
new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
- /* ACK for data not even sent yet and even above recovery bound? Ignore.*/
- if (unlikely(after64(new_snd_una, snd_nxt))) {
- if (!msk->recovery || after64(new_snd_una, msk->recovery_snd_nxt))
- new_snd_una = old_snd_una;
- }
+ /* ACK for data not even sent yet? Ignore.*/
+ if (unlikely(after64(new_snd_una, snd_nxt)))
+ new_snd_una = old_snd_una;
new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
@@ -1329,8 +1325,7 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
}
}
- } else if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
- OPTION_MPTCP_MPC_ACK) & opts->suboptions) {
+ } else if (OPTIONS_MPTCP_MPC & opts->suboptions) {
u8 len, flag = MPTCP_CAP_HMAC_SHA256;
if (OPTION_MPTCP_MPC_SYN & opts->suboptions) {
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index c4f9a5ce3815..7b96be1e9f14 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -654,9 +654,9 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
}
}
-int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr,
- u8 bkup)
+static int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr,
+ u8 bkup)
{
struct mptcp_subflow_context *subflow;
@@ -1718,9 +1718,7 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
list_for_each_entry(entry, &pernet->local_addr_list, list) {
if (addresses_equal(&entry->addr, &addr.addr, true)) {
- ret = mptcp_nl_addr_backup(net, &entry->addr, bkup);
- if (ret)
- return ret;
+ mptcp_nl_addr_backup(net, &entry->addr, bkup);
if (bkup)
entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
@@ -2054,6 +2052,9 @@ static int __net_init pm_nl_init_net(struct net *net)
struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
+
+ /* Cit. 2 subflows ought to be enough for anybody. */
+ pernet->subflows_max = 2;
pernet->next_id = 1;
pernet->stale_loss_cnt = 4;
spin_lock_init(&pernet->lock);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 2602f1386160..cd6b11c9b54d 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -528,7 +528,6 @@ static bool mptcp_check_data_fin(struct sock *sk)
sk->sk_shutdown |= RCV_SHUTDOWN;
smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
- set_bit(MPTCP_DATA_READY, &msk->flags);
switch (sk->sk_state) {
case TCP_ESTABLISHED:
@@ -742,10 +741,9 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
- if (move_skbs_to_msk(msk, ssk)) {
- set_bit(MPTCP_DATA_READY, &msk->flags);
+ if (move_skbs_to_msk(msk, ssk))
sk->sk_data_ready(sk);
- }
+
mptcp_data_unlock(sk);
}
@@ -847,7 +845,6 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
sk->sk_shutdown |= RCV_SHUTDOWN;
smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
- set_bit(MPTCP_DATA_READY, &msk->flags);
sk->sk_data_ready(sk);
}
@@ -956,9 +953,7 @@ static void __mptcp_update_wmem(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
-#ifdef CONFIG_LOCKDEP
- WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
-#endif
+ lockdep_assert_held_once(&sk->sk_lock.slock);
if (!msk->wmem_reserved)
return;
@@ -1107,7 +1102,8 @@ out:
if (cleaned && tcp_under_memory_pressure(sk))
__mptcp_mem_reclaim_partial(sk);
- if (snd_una == READ_ONCE(msk->snd_nxt) && !msk->recovery) {
+ if (snd_una == READ_ONCE(msk->snd_nxt) &&
+ snd_una == READ_ONCE(msk->write_seq)) {
if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
mptcp_stop_timer(sk);
} else {
@@ -1117,9 +1113,8 @@ out:
static void __mptcp_clean_una_wakeup(struct sock *sk)
{
-#ifdef CONFIG_LOCKDEP
- WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
-#endif
+ lockdep_assert_held_once(&sk->sk_lock.slock);
+
__mptcp_clean_una(sk);
mptcp_write_space(sk);
}
@@ -1224,6 +1219,7 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
if (likely(__mptcp_add_ext(skb, gfp))) {
skb_reserve(skb, MAX_TCP_HEADER);
skb->reserved_tailroom = skb->end - skb->tail;
+ INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
return skb;
}
__kfree_skb(skb);
@@ -1233,31 +1229,23 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
return NULL;
}
-static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
+static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
{
struct sk_buff *skb;
- if (ssk->sk_tx_skb_cache) {
- skb = ssk->sk_tx_skb_cache;
- if (unlikely(!skb_ext_find(skb, SKB_EXT_MPTCP) &&
- !__mptcp_add_ext(skb, gfp)))
- return false;
- return true;
- }
-
skb = __mptcp_do_alloc_tx_skb(sk, gfp);
if (!skb)
- return false;
+ return NULL;
if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
- ssk->sk_tx_skb_cache = skb;
- return true;
+ tcp_skb_entail(ssk, skb);
+ return skb;
}
kfree_skb(skb);
- return false;
+ return NULL;
}
-static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
+static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
{
gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
@@ -1287,23 +1275,29 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
struct mptcp_sendmsg_info *info)
{
u64 data_seq = dfrag->data_seq + info->sent;
+ int offset = dfrag->offset + info->sent;
struct mptcp_sock *msk = mptcp_sk(sk);
bool zero_window_probe = false;
struct mptcp_ext *mpext = NULL;
- struct sk_buff *skb, *tail;
- bool must_collapse = false;
- int size_bias = 0;
- int avail_size;
- size_t ret = 0;
+ bool can_coalesce = false;
+ bool reuse_skb = true;
+ struct sk_buff *skb;
+ size_t copy;
+ int i;
pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
+ if (WARN_ON_ONCE(info->sent > info->limit ||
+ info->limit > dfrag->data_len))
+ return 0;
+
/* compute send limit */
info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
- avail_size = info->size_goal;
+ copy = info->size_goal;
+
skb = tcp_write_queue_tail(ssk);
- if (skb) {
+ if (skb && copy > skb->len) {
/* Limit the write to the size available in the
* current skb, if any, so that we create at most a new skb.
* Explicitly tells TCP internals to avoid collapsing on later
@@ -1316,62 +1310,80 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
goto alloc_skb;
}
- must_collapse = (info->size_goal - skb->len > 0) &&
- (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
- if (must_collapse) {
- size_bias = skb->len;
- avail_size = info->size_goal - skb->len;
+ i = skb_shinfo(skb)->nr_frags;
+ can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
+ if (!can_coalesce && i >= sysctl_max_skb_frags) {
+ tcp_mark_push(tcp_sk(ssk), skb);
+ goto alloc_skb;
}
- }
+ copy -= skb->len;
+ } else {
alloc_skb:
- if (!must_collapse && !ssk->sk_tx_skb_cache &&
- !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
- return 0;
+ skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held);
+ if (!skb)
+ return -ENOMEM;
+
+ i = skb_shinfo(skb)->nr_frags;
+ reuse_skb = false;
+ mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
+ }
/* Zero window and all data acked? Probe. */
- avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size);
- if (avail_size == 0) {
+ copy = mptcp_check_allowed_size(msk, data_seq, copy);
+ if (copy == 0) {
u64 snd_una = READ_ONCE(msk->snd_una);
- if (skb || snd_una != msk->snd_nxt)
+ if (snd_una != msk->snd_nxt) {
+ tcp_remove_empty_skb(ssk, tcp_write_queue_tail(ssk));
return 0;
+ }
+
zero_window_probe = true;
data_seq = snd_una - 1;
- avail_size = 1;
- }
+ copy = 1;
- if (WARN_ON_ONCE(info->sent > info->limit ||
- info->limit > dfrag->data_len))
- return 0;
+ /* all mptcp-level data is acked, no skbs should be present into the
+ * ssk write queue
+ */
+ WARN_ON_ONCE(reuse_skb);
+ }
- ret = info->limit - info->sent;
- tail = tcp_build_frag(ssk, avail_size + size_bias, info->flags,
- dfrag->page, dfrag->offset + info->sent, &ret);
- if (!tail) {
- tcp_remove_empty_skb(sk, tcp_write_queue_tail(ssk));
+ copy = min_t(size_t, copy, info->limit - info->sent);
+ if (!sk_wmem_schedule(ssk, copy)) {
+ tcp_remove_empty_skb(ssk, tcp_write_queue_tail(ssk));
return -ENOMEM;
}
- /* if the tail skb is still the cached one, collapsing really happened.
- */
- if (skb == tail) {
- TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH;
- mpext->data_len += ret;
+ if (can_coalesce) {
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
+ } else {
+ get_page(dfrag->page);
+ skb_fill_page_desc(skb, i, dfrag->page, offset, copy);
+ }
+
+ skb->len += copy;
+ skb->data_len += copy;
+ skb->truesize += copy;
+ sk_wmem_queued_add(ssk, copy);
+ sk_mem_charge(ssk, copy);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
+ TCP_SKB_CB(skb)->end_seq += copy;
+ tcp_skb_pcount_set(skb, 0);
+
+ /* on skb reuse we just need to update the DSS len */
+ if (reuse_skb) {
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+ mpext->data_len += copy;
WARN_ON_ONCE(zero_window_probe);
goto out;
}
- mpext = skb_ext_find(tail, SKB_EXT_MPTCP);
- if (WARN_ON_ONCE(!mpext)) {
- /* should never reach here, stream corrupted */
- return -EINVAL;
- }
-
memset(mpext, 0, sizeof(*mpext));
mpext->data_seq = data_seq;
mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
- mpext->data_len = ret;
+ mpext->data_len = copy;
mpext->use_map = 1;
mpext->dsn64 = 1;
@@ -1380,18 +1392,18 @@ alloc_skb:
mpext->dsn64);
if (zero_window_probe) {
- mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
+ mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
mpext->frozen = 1;
if (READ_ONCE(msk->csum_enabled))
- mptcp_update_data_checksum(tail, ret);
+ mptcp_update_data_checksum(skb, copy);
tcp_push_pending_frames(ssk);
return 0;
}
out:
if (READ_ONCE(msk->csum_enabled))
- mptcp_update_data_checksum(tail, ret);
- mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
- return ret;
+ mptcp_update_data_checksum(skb, copy);
+ mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
+ return copy;
}
#define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
@@ -1508,6 +1520,38 @@ static void mptcp_push_release(struct sock *sk, struct sock *ssk,
release_sock(ssk);
}
+static void mptcp_update_post_push(struct mptcp_sock *msk,
+ struct mptcp_data_frag *dfrag,
+ u32 sent)
+{
+ u64 snd_nxt_new = dfrag->data_seq;
+
+ dfrag->already_sent += sent;
+
+ msk->snd_burst -= sent;
+
+ snd_nxt_new += dfrag->already_sent;
+
+ /* snd_nxt_new can be smaller than snd_nxt in case mptcp
+ * is recovering after a failover. In that event, this re-sends
+ * old segments.
+ *
+ * Thus compute snd_nxt_new candidate based on
+ * the dfrag->data_seq that was sent and the data
+ * that has been handed to the subflow for transmission
+ * and skip update in case it was old dfrag.
+ */
+ if (likely(after64(snd_nxt_new, msk->snd_nxt)))
+ msk->snd_nxt = snd_nxt_new;
+}
+
+static void mptcp_check_and_set_pending(struct sock *sk)
+{
+ if (mptcp_send_head(sk) &&
+ !test_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
+ set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+}
+
void __mptcp_push_pending(struct sock *sk, unsigned int flags)
{
struct sock *prev_ssk = NULL, *ssk = NULL;
@@ -1551,12 +1595,10 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
}
info.sent += ret;
- dfrag->already_sent += ret;
- msk->snd_nxt += ret;
- msk->snd_burst -= ret;
- msk->tx_pending_data -= ret;
copied += ret;
len -= ret;
+
+ mptcp_update_post_push(msk, dfrag, ret);
}
WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
}
@@ -1609,13 +1651,11 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
goto out;
info.sent += ret;
- dfrag->already_sent += ret;
- msk->snd_nxt += ret;
- msk->snd_burst -= ret;
- msk->tx_pending_data -= ret;
copied += ret;
len -= ret;
first = false;
+
+ mptcp_update_post_push(msk, dfrag, ret);
}
WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
}
@@ -1725,7 +1765,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
frag_truesize += psize;
pfrag->offset += frag_truesize;
WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
- msk->tx_pending_data += psize;
/* charge data on mptcp pending queue to the msk socket
* Note: we charge such data both to sk and ssk
@@ -1759,21 +1798,6 @@ out:
return copied ? : ret;
}
-static void mptcp_wait_data(struct sock *sk, long *timeo)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct mptcp_sock *msk = mptcp_sk(sk);
-
- add_wait_queue(sk_sleep(sk), &wait);
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
- sk_wait_event(sk, timeo,
- test_bit(MPTCP_DATA_READY, &msk->flags), &wait);
-
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- remove_wait_queue(sk_sleep(sk), &wait);
-}
-
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
struct msghdr *msg,
size_t len, int flags,
@@ -2077,19 +2101,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
pr_debug("block timeout %ld", timeo);
- mptcp_wait_data(sk, &timeo);
- }
-
- if (skb_queue_empty_lockless(&sk->sk_receive_queue) &&
- skb_queue_empty(&msk->receive_queue)) {
- /* entire backlog drained, clear DATA_READY. */
- clear_bit(MPTCP_DATA_READY, &msk->flags);
-
- /* .. race-breaker: ssk might have gotten new data
- * after last __mptcp_move_skbs() returned false.
- */
- if (unlikely(__mptcp_move_skbs(msk)))
- set_bit(MPTCP_DATA_READY, &msk->flags);
+ sk_wait_data(sk, &timeo, NULL);
}
out_err:
@@ -2098,9 +2110,9 @@ out_err:
tcp_recv_timestamp(msg, sk, &tss);
}
- pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
- msk, test_bit(MPTCP_DATA_READY, &msk->flags),
- skb_queue_empty_lockless(&sk->sk_receive_queue), copied);
+ pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
+ msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+ skb_queue_empty(&msk->receive_queue), copied);
if (!(flags & MSG_PEEK))
mptcp_rcv_space_adjust(msk, copied);
@@ -2213,15 +2225,11 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
return false;
}
- /* will accept ack for reijected data before re-sending them */
- if (!msk->recovery || after64(msk->snd_nxt, msk->recovery_snd_nxt))
- msk->recovery_snd_nxt = msk->snd_nxt;
+ msk->recovery_snd_nxt = msk->snd_nxt;
msk->recovery = true;
mptcp_data_unlock(sk);
msk->first_pending = rtx_head;
- msk->tx_pending_data += msk->snd_nxt - rtx_head->data_seq;
- msk->snd_nxt = rtx_head->data_seq;
msk->snd_burst = 0;
/* be sure to clear the "sent status" on all re-injected fragments */
@@ -2368,7 +2376,6 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
inet_sk_state_store(sk, TCP_CLOSE);
sk->sk_shutdown = SHUTDOWN_MASK;
smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
- set_bit(MPTCP_DATA_READY, &msk->flags);
set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
mptcp_close_wake_up(sk);
@@ -2384,6 +2391,9 @@ static void __mptcp_retrans(struct sock *sk)
int ret;
mptcp_clean_una_wakeup(sk);
+
+ /* first check ssk: need to kick "stale" logic */
+ ssk = mptcp_subflow_get_retrans(msk);
dfrag = mptcp_rtx_head(sk);
if (!dfrag) {
if (mptcp_data_fin_enabled(msk)) {
@@ -2396,10 +2406,12 @@ static void __mptcp_retrans(struct sock *sk)
goto reset_timer;
}
- return;
+ if (!mptcp_send_head(sk))
+ return;
+
+ goto reset_timer;
}
- ssk = mptcp_subflow_get_retrans(msk);
if (!ssk)
goto reset_timer;
@@ -2426,6 +2438,8 @@ static void __mptcp_retrans(struct sock *sk)
release_sock(ssk);
reset_timer:
+ mptcp_check_and_set_pending(sk);
+
if (!mptcp_timer_pending(sk))
mptcp_reset_timer(sk);
}
@@ -2492,7 +2506,6 @@ static int __mptcp_init_sock(struct sock *sk)
msk->first_pending = NULL;
msk->wmem_reserved = 0;
WRITE_ONCE(msk->rmem_released, 0);
- msk->tx_pending_data = 0;
msk->timer_ival = TCP_RTO_MIN;
msk->first = NULL;
@@ -2735,7 +2748,7 @@ cleanup:
inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- bool slow = lock_sock_fast(ssk);
+ bool slow = lock_sock_fast_nested(ssk);
sock_orphan(ssk);
unlock_sock_fast(ssk, slow);
@@ -3385,8 +3398,14 @@ unlock_fail:
static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
{
- return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
- 0;
+ /* Concurrent splices from sk_receive_queue into receive_queue will
+ * always show at least one non-empty queue when checked in this order.
+ */
+ if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
+ skb_queue_empty_lockless(&msk->receive_queue))
+ return 0;
+
+ return EPOLLIN | EPOLLRDNORM;
}
static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
@@ -3421,7 +3440,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
state = inet_sk_state_load(sk);
pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
if (state == TCP_LISTEN)
- return mptcp_check_readable(msk);
+ return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 0;
if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
mask |= mptcp_check_readable(msk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index d3e6fd1615f1..284fdcec067e 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -254,7 +254,6 @@ struct mptcp_sock {
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
struct sk_buff_head receive_queue;
- int tx_pending_data;
struct list_head conn_list;
struct list_head rtx_queue;
struct mptcp_data_frag *first_pending;
@@ -709,7 +708,7 @@ int mptcp_token_new_connect(struct sock *sk);
void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
struct mptcp_sock *msk);
bool mptcp_token_exists(u32 token);
-struct mptcp_sock *mptcp_token_get_sock(u32 token);
+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token);
struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
long *s_num);
void mptcp_token_destroy(struct mptcp_sock *msk);
@@ -737,9 +736,6 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list);
void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup);
-int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr,
- u8 bkup);
void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq);
void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 8137cc3a4296..0f1e661c2032 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -861,6 +861,9 @@ static void mptcp_get_sub_addrs(const struct sock *sk, struct mptcp_subflow_addr
} else if (sk->sk_family == AF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk);
+ if (WARN_ON_ONCE(!np))
+ return;
+
a->sin6_local.sin6_family = AF_INET6;
a->sin6_local.sin6_port = inet->inet_sport;
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 1de7ce883c37..6172f380dfb7 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -86,7 +86,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
struct mptcp_sock *msk;
int local_id;
- msk = mptcp_token_get_sock(subflow_req->token);
+ msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
if (!msk) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
return NULL;
diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c
index 37127781aee9..7f22526346a7 100644
--- a/net/mptcp/syncookies.c
+++ b/net/mptcp/syncookies.c
@@ -108,18 +108,12 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
e->valid = 0;
- msk = mptcp_token_get_sock(e->token);
+ msk = mptcp_token_get_sock(net, e->token);
if (!msk) {
spin_unlock_bh(&join_entry_locks[i]);
return false;
}
- /* If this fails, the token got re-used in the mean time by another
- * mptcp socket in a different netns, i.e. entry is outdated.
- */
- if (!net_eq(sock_net((struct sock *)msk), net))
- goto err_put;
-
subflow_req->remote_nonce = e->remote_nonce;
subflow_req->local_nonce = e->local_nonce;
subflow_req->backup = e->backup;
@@ -128,11 +122,6 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
subflow_req->msk = msk;
spin_unlock_bh(&join_entry_locks[i]);
return true;
-
-err_put:
- spin_unlock_bh(&join_entry_locks[i]);
- sock_put((struct sock *)msk);
- return false;
}
void __init mptcp_join_cookie_init(void)
diff --git a/net/mptcp/token.c b/net/mptcp/token.c
index a98e554b034f..e581b341c5be 100644
--- a/net/mptcp/token.c
+++ b/net/mptcp/token.c
@@ -231,6 +231,7 @@ found:
/**
* mptcp_token_get_sock - retrieve mptcp connection sock using its token
+ * @net: restrict to this namespace
* @token: token of the mptcp connection to retrieve
*
* This function returns the mptcp connection structure with the given token.
@@ -238,7 +239,7 @@ found:
*
* returns NULL if no connection with the given token value exists.
*/
-struct mptcp_sock *mptcp_token_get_sock(u32 token)
+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token)
{
struct hlist_nulls_node *pos;
struct token_bucket *bucket;
@@ -251,11 +252,15 @@ struct mptcp_sock *mptcp_token_get_sock(u32 token)
again:
sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {
msk = mptcp_sk(sk);
- if (READ_ONCE(msk->token) != token)
+ if (READ_ONCE(msk->token) != token ||
+ !net_eq(sock_net(sk), net))
continue;
+
if (!refcount_inc_not_zero(&sk->sk_refcnt))
goto not_found;
- if (READ_ONCE(msk->token) != token) {
+
+ if (READ_ONCE(msk->token) != token ||
+ !net_eq(sock_net(sk), net)) {
sock_put(sk);
goto again;
}
diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
index e1bd6f0a0676..5d984bec1cd8 100644
--- a/net/mptcp/token_test.c
+++ b/net/mptcp/token_test.c
@@ -11,6 +11,7 @@ static struct mptcp_subflow_request_sock *build_req_sock(struct kunit *test)
GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, req);
mptcp_token_init_request((struct request_sock *)req);
+ sock_net_set((struct sock *)req, &init_net);
return req;
}
@@ -22,7 +23,7 @@ static void mptcp_token_test_req_basic(struct kunit *test)
KUNIT_ASSERT_EQ(test, 0,
mptcp_token_new_request((struct request_sock *)req));
KUNIT_EXPECT_NE(test, 0, (int)req->token);
- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(req->token));
+ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, req->token));
/* cleanup */
mptcp_token_destroy_request((struct request_sock *)req);
@@ -55,6 +56,7 @@ static struct mptcp_sock *build_msk(struct kunit *test)
msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
+ sock_net_set((struct sock *)msk, &init_net);
return msk;
}
@@ -74,11 +76,11 @@ static void mptcp_token_test_msk_basic(struct kunit *test)
mptcp_token_new_connect((struct sock *)icsk));
KUNIT_EXPECT_NE(test, 0, (int)ctx->token);
KUNIT_EXPECT_EQ(test, ctx->token, msk->token);
- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(ctx->token));
+ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, ctx->token));
KUNIT_EXPECT_EQ(test, 2, (int)refcount_read(&sk->sk_refcnt));
mptcp_token_destroy(msk);
- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(ctx->token));
+ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, ctx->token));
}
static void mptcp_token_test_accept(struct kunit *test)
@@ -90,11 +92,11 @@ static void mptcp_token_test_accept(struct kunit *test)
mptcp_token_new_request((struct request_sock *)req));
msk->token = req->token;
mptcp_token_accept(req, msk);
- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
/* this is now a no-op */
mptcp_token_destroy_request((struct request_sock *)req);
- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
/* cleanup */
mptcp_token_destroy(msk);
@@ -116,7 +118,7 @@ static void mptcp_token_test_destroyed(struct kunit *test)
/* simulate race on removal */
refcount_set(&sk->sk_refcnt, 0);
- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(msk->token));
+ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, msk->token));
/* cleanup */
mptcp_token_destroy(msk);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 54395266339d..3646fc195e7d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -10,6 +10,17 @@ config NETFILTER_INGRESS
This allows you to classify packets from ingress using the Netfilter
infrastructure.
+config NETFILTER_EGRESS
+ bool "Netfilter egress support"
+ default y
+ select NET_EGRESS
+ help
+ This allows you to classify packets before transmission using the
+ Netfilter infrastructure.
+
+config NETFILTER_SKIP_EGRESS
+ def_bool NETFILTER_EGRESS && (NET_CLS_ACT || IFB)
+
config NETFILTER_NETLINK
tristate
@@ -109,7 +120,7 @@ config NF_CONNTRACK_MARK
config NF_CONNTRACK_SECMARK
bool 'Connection tracking security mark support'
depends on NETWORK_SECMARK
- default m if NETFILTER_ADVANCED=n
+ default y if NETFILTER_ADVANCED=n
help
This option enables security markings to be applied to
connections. Typically they are copied to connections from
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 63d032191e62..6dec9cd395f1 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -317,6 +317,12 @@ nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
return &dev->nf_hooks_ingress;
}
#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ if (hooknum == NF_NETDEV_EGRESS) {
+ if (dev && dev_net(dev) == net)
+ return &dev->nf_hooks_egress;
+ }
+#endif
WARN_ON_ONCE(1);
return NULL;
}
@@ -335,7 +341,8 @@ static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg,
return 0;
}
-static inline bool nf_ingress_hook(const struct nf_hook_ops *reg, int pf)
+static inline bool __maybe_unused nf_ingress_hook(const struct nf_hook_ops *reg,
+ int pf)
{
if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) ||
(pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS))
@@ -344,6 +351,12 @@ static inline bool nf_ingress_hook(const struct nf_hook_ops *reg, int pf)
return false;
}
+static inline bool __maybe_unused nf_egress_hook(const struct nf_hook_ops *reg,
+ int pf)
+{
+ return pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_EGRESS;
+}
+
static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf)
{
#ifdef CONFIG_JUMP_LABEL
@@ -383,9 +396,18 @@ static int __nf_register_net_hook(struct net *net, int pf,
switch (pf) {
case NFPROTO_NETDEV:
- err = nf_ingress_check(net, reg, NF_NETDEV_INGRESS);
- if (err < 0)
- return err;
+#ifndef CONFIG_NETFILTER_INGRESS
+ if (reg->hooknum == NF_NETDEV_INGRESS)
+ return -EOPNOTSUPP;
+#endif
+#ifndef CONFIG_NETFILTER_EGRESS
+ if (reg->hooknum == NF_NETDEV_EGRESS)
+ return -EOPNOTSUPP;
+#endif
+ if ((reg->hooknum != NF_NETDEV_INGRESS &&
+ reg->hooknum != NF_NETDEV_EGRESS) ||
+ !reg->dev || dev_net(reg->dev) != net)
+ return -EINVAL;
break;
case NFPROTO_INET:
if (reg->hooknum != NF_INET_INGRESS)
@@ -418,6 +440,10 @@ static int __nf_register_net_hook(struct net *net, int pf,
if (nf_ingress_hook(reg, pf))
net_inc_ingress_queue();
#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ if (nf_egress_hook(reg, pf))
+ net_inc_egress_queue();
+#endif
nf_static_key_inc(reg, pf);
BUG_ON(p == new_hooks);
@@ -475,6 +501,10 @@ static void __nf_unregister_net_hook(struct net *net, int pf,
if (nf_ingress_hook(reg, pf))
net_dec_ingress_queue();
#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ if (nf_egress_hook(reg, pf))
+ net_dec_egress_queue();
+#endif
nf_static_key_dec(reg, pf);
} else {
WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 6186358eac7c..6e391308431d 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -130,11 +130,11 @@ htable_size(u8 hbits)
{
size_t hsize;
- /* We must fit both into u32 in jhash and size_t */
+ /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */
if (hbits > 31)
return 0;
hsize = jhash_size(hbits);
- if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)
+ if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)
< hsize)
return 0;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index c100c6b112c8..2c467c422dc6 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1468,6 +1468,10 @@ int __init ip_vs_conn_init(void)
int idx;
/* Compute size and mask */
+ if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) {
+ pr_info("conn_tab_bits not in [8, 20]. Using default value\n");
+ ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+ }
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 128690c512df..e93c937a8bf0 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1330,12 +1330,15 @@ drop:
* Check if outgoing packet belongs to the established ip_vs_conn.
*/
static unsigned int
-ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
+ip_vs_out_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state)
{
+ struct netns_ipvs *ipvs = net_ipvs(state->net);
+ unsigned int hooknum = state->hook;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
+ int af = state->pf;
struct sock *sk;
EnterFunction(11);
@@ -1468,56 +1471,6 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
return NF_ACCEPT;
}
-/*
- * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
- * used only for VS/NAT.
- * Check if packet is reply for established ip_vs_conn.
- */
-static unsigned int
-ip_vs_reply4(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
-}
-
-/*
- * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
- * Check if packet is reply for established ip_vs_conn.
- */
-static unsigned int
-ip_vs_local_reply4(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
-}
-
-#ifdef CONFIG_IP_VS_IPV6
-
-/*
- * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
- * used only for VS/NAT.
- * Check if packet is reply for established ip_vs_conn.
- */
-static unsigned int
-ip_vs_reply6(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
-}
-
-/*
- * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
- * Check if packet is reply for established ip_vs_conn.
- */
-static unsigned int
-ip_vs_local_reply6(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
-}
-
-#endif
-
static unsigned int
ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
@@ -1957,8 +1910,10 @@ out:
* and send it on its way...
*/
static unsigned int
-ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
+ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state)
{
+ struct netns_ipvs *ipvs = net_ipvs(state->net);
+ unsigned int hooknum = state->hook;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd;
@@ -1966,6 +1921,7 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
int ret, pkts;
int conn_reuse_mode;
struct sock *sk;
+ int af = state->pf;
/* Already marked as IPVS request or reply? */
if (skb->ipvs_property)
@@ -2138,55 +2094,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
}
/*
- * AF_INET handler in NF_INET_LOCAL_IN chain
- * Schedule and forward packets from remote clients
- */
-static unsigned int
-ip_vs_remote_request4(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
-}
-
-/*
- * AF_INET handler in NF_INET_LOCAL_OUT chain
- * Schedule and forward packets from local clients
- */
-static unsigned int
-ip_vs_local_request4(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
-}
-
-#ifdef CONFIG_IP_VS_IPV6
-
-/*
- * AF_INET6 handler in NF_INET_LOCAL_IN chain
- * Schedule and forward packets from remote clients
- */
-static unsigned int
-ip_vs_remote_request6(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
-}
-
-/*
- * AF_INET6 handler in NF_INET_LOCAL_OUT chain
- * Schedule and forward packets from local clients
- */
-static unsigned int
-ip_vs_local_request6(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
-}
-
-#endif
-
-
-/*
* It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
* related packets destined for 0.0.0.0/0.
* When fwmark-based virtual service is used, such as transparent
@@ -2199,45 +2106,36 @@ static unsigned int
ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- int r;
struct netns_ipvs *ipvs = net_ipvs(state->net);
-
- if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
- return NF_ACCEPT;
+ int r;
/* ipvs enabled in this netns ? */
if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
return NF_ACCEPT;
- return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
-}
-
+ if (state->pf == NFPROTO_IPV4) {
+ if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
+ return NF_ACCEPT;
#ifdef CONFIG_IP_VS_IPV6
-static unsigned int
-ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- int r;
- struct netns_ipvs *ipvs = net_ipvs(state->net);
- struct ip_vs_iphdr iphdr;
+ } else {
+ struct ip_vs_iphdr iphdr;
- ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
- if (iphdr.protocol != IPPROTO_ICMPV6)
- return NF_ACCEPT;
+ ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
- /* ipvs enabled in this netns ? */
- if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
- return NF_ACCEPT;
+ if (iphdr.protocol != IPPROTO_ICMPV6)
+ return NF_ACCEPT;
- return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
-}
+ return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
#endif
+ }
+ return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
+}
static const struct nf_hook_ops ip_vs_ops4[] = {
/* After packet filtering, change source only for VS/NAT */
{
- .hook = ip_vs_reply4,
+ .hook = ip_vs_out_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC - 2,
@@ -2246,21 +2144,21 @@ static const struct nf_hook_ops ip_vs_ops4[] = {
* or VS/NAT(change destination), so that filtering rules can be
* applied to IPVS. */
{
- .hook = ip_vs_remote_request4,
+ .hook = ip_vs_in_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC - 1,
},
/* Before ip_vs_in, change source only for VS/NAT */
{
- .hook = ip_vs_local_reply4,
+ .hook = ip_vs_out_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST + 1,
},
/* After mangle, schedule and forward local requests */
{
- .hook = ip_vs_local_request4,
+ .hook = ip_vs_in_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST + 2,
@@ -2275,7 +2173,7 @@ static const struct nf_hook_ops ip_vs_ops4[] = {
},
/* After packet filtering, change source only for VS/NAT */
{
- .hook = ip_vs_reply4,
+ .hook = ip_vs_out_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = 100,
@@ -2286,7 +2184,7 @@ static const struct nf_hook_ops ip_vs_ops4[] = {
static const struct nf_hook_ops ip_vs_ops6[] = {
/* After packet filtering, change source only for VS/NAT */
{
- .hook = ip_vs_reply6,
+ .hook = ip_vs_out_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC - 2,
@@ -2295,21 +2193,21 @@ static const struct nf_hook_ops ip_vs_ops6[] = {
* or VS/NAT(change destination), so that filtering rules can be
* applied to IPVS. */
{
- .hook = ip_vs_remote_request6,
+ .hook = ip_vs_in_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC - 1,
},
/* Before ip_vs_in, change source only for VS/NAT */
{
- .hook = ip_vs_local_reply6,
+ .hook = ip_vs_out_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 1,
},
/* After mangle, schedule and forward local requests */
{
- .hook = ip_vs_local_request6,
+ .hook = ip_vs_in_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 2,
@@ -2317,14 +2215,14 @@ static const struct nf_hook_ops ip_vs_ops6[] = {
/* After packet filtering (but before ip_vs_out_icmp), catch icmp
* destined for 0.0.0.0/0, which is for incoming IPVS connections */
{
- .hook = ip_vs_forward_icmp_v6,
+ .hook = ip_vs_forward_icmp,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = 99,
},
/* After packet filtering, change source only for VS/NAT */
{
- .hook = ip_vs_reply6,
+ .hook = ip_vs_out_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = 100,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c25097092a06..e62b40bd349e 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2017,6 +2017,12 @@ static struct ctl_table vs_vars[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "run_estimation",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#ifdef CONFIG_IP_VS_DEBUG
{
.procname = "debug_level",
@@ -4090,6 +4096,13 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
+ ipvs->sysctl_run_estimation = 1;
+ tbl[idx++].data = &ipvs->sysctl_run_estimation;
+#ifdef CONFIG_IP_VS_DEBUG
+ /* Global sysctls must be ro in non-init netns */
+ if (!net_eq(net, &init_net))
+ tbl[idx++].mode = 0444;
+#endif
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
if (ipvs->sysctl_hdr == NULL) {
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 05b8112ffb37..9a1a7af6a186 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -100,6 +100,9 @@ static void estimation_timer(struct timer_list *t)
u64 rate;
struct netns_ipvs *ipvs = from_timer(ipvs, t, est_timer);
+ if (!sysctl_run_estimation(ipvs))
+ goto skip;
+
spin_lock(&ipvs->est_lock);
list_for_each_entry(e, &ipvs->est_list, list) {
s = container_of(e, struct ip_vs_stats, est);
@@ -131,6 +134,8 @@ static void estimation_timer(struct timer_list *t)
spin_unlock(&s->lock);
}
spin_unlock(&ipvs->est_lock);
+
+skip:
mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 94e18fb9690d..770a63103c7a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -74,10 +74,14 @@ static __read_mostly struct kmem_cache *nf_conntrack_cachep;
static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;
+/* serialize hash resizes and nf_ct_iterate_cleanup */
+static DEFINE_MUTEX(nf_conntrack_mutex);
+
#define GC_SCAN_INTERVAL (120u * HZ)
#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
-#define MAX_CHAINLEN 64u
+#define MIN_CHAINLEN 8u
+#define MAX_CHAINLEN (32u - MIN_CHAINLEN)
static struct conntrack_gc_work conntrack_gc_work;
@@ -188,11 +192,13 @@ seqcount_spinlock_t nf_conntrack_generation __read_mostly;
static siphash_key_t nf_conntrack_hash_rnd __read_mostly;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
+ unsigned int zoneid,
const struct net *net)
{
struct {
struct nf_conntrack_man src;
union nf_inet_addr dst_addr;
+ unsigned int zone;
u32 net_mix;
u16 dport;
u16 proto;
@@ -205,6 +211,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
/* The direction must be ignored, so handle usable members manually. */
combined.src = tuple->src;
combined.dst_addr = tuple->dst.u3;
+ combined.zone = zoneid;
combined.net_mix = net_hash_mix(net);
combined.dport = (__force __u16)tuple->dst.u.all;
combined.proto = tuple->dst.protonum;
@@ -219,15 +226,17 @@ static u32 scale_hash(u32 hash)
static u32 __hash_conntrack(const struct net *net,
const struct nf_conntrack_tuple *tuple,
+ unsigned int zoneid,
unsigned int size)
{
- return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
+ return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size);
}
static u32 hash_conntrack(const struct net *net,
- const struct nf_conntrack_tuple *tuple)
+ const struct nf_conntrack_tuple *tuple,
+ unsigned int zoneid)
{
- return scale_hash(hash_conntrack_raw(tuple, net));
+ return scale_hash(hash_conntrack_raw(tuple, zoneid, net));
}
static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
@@ -650,9 +659,11 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
reply_hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
clean_from_lists(ct);
@@ -819,8 +830,20 @@ struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
- return __nf_conntrack_find_get(net, zone, tuple,
- hash_conntrack_raw(tuple, net));
+ unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
+ struct nf_conntrack_tuple_hash *thash;
+
+ thash = __nf_conntrack_find_get(net, zone, tuple,
+ hash_conntrack_raw(tuple, zone_id, net));
+
+ if (thash)
+ return thash;
+
+ rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
+ if (rid != zone_id)
+ return __nf_conntrack_find_get(net, zone, tuple,
+ hash_conntrack_raw(tuple, rid, net));
+ return thash;
}
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
@@ -842,6 +865,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
+ unsigned int max_chainlen;
unsigned int chainlen = 0;
unsigned int sequence;
int err = -EEXIST;
@@ -852,18 +876,22 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
reply_hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+ max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+
/* See if there's one in the list already, including reverse */
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
zone, net))
goto out;
- if (chainlen++ > MAX_CHAINLEN)
+ if (chainlen++ > max_chainlen)
goto chaintoolong;
}
@@ -873,7 +901,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
goto out;
- if (chainlen++ > MAX_CHAINLEN)
+ if (chainlen++ > max_chainlen)
goto chaintoolong;
}
@@ -1103,8 +1131,8 @@ drop:
int
__nf_conntrack_confirm(struct sk_buff *skb)
{
+ unsigned int chainlen = 0, sequence, max_chainlen;
const struct nf_conntrack_zone *zone;
- unsigned int chainlen = 0, sequence;
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
@@ -1133,8 +1161,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
hash = scale_hash(hash);
reply_hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
/* We're not in hash table, and we refuse to set up related
@@ -1168,6 +1196,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
goto dying;
}
+ max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
@@ -1175,7 +1204,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
zone, net))
goto out;
- if (chainlen++ > MAX_CHAINLEN)
+ if (chainlen++ > max_chainlen)
goto chaintoolong;
}
@@ -1184,7 +1213,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
goto out;
- if (chainlen++ > MAX_CHAINLEN) {
+ if (chainlen++ > max_chainlen) {
chaintoolong:
nf_ct_add_to_dying_list(ct);
NF_CT_STAT_INC(net, chaintoolong);
@@ -1246,7 +1275,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
rcu_read_lock();
begin:
nf_conntrack_get_ht(&ct_hash, &hsize);
- hash = __hash_conntrack(net, tuple, hsize);
+ hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1687,8 +1716,8 @@ resolve_normal_ct(struct nf_conn *tmpl,
struct nf_conntrack_tuple_hash *h;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
+ u32 hash, zone_id, rid;
struct nf_conn *ct;
- u32 hash;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, state->pf, protonum, state->net,
@@ -1699,8 +1728,20 @@ resolve_normal_ct(struct nf_conn *tmpl,
/* look for tuple match */
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
- hash = hash_conntrack_raw(&tuple, state->net);
+
+ zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
+ hash = hash_conntrack_raw(&tuple, zone_id, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
+
+ if (!h) {
+ rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
+ if (zone_id != rid) {
+ u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);
+
+ h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);
+ }
+ }
+
if (!h) {
h = init_conntrack(state->net, tmpl, &tuple,
skb, dataoff, hash);
@@ -2225,28 +2266,31 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
spinlock_t *lockp;
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+ struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
+
+ if (hlist_nulls_empty(hslot))
+ continue;
+
lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
local_bh_disable();
nf_conntrack_lock(lockp);
- if (*bucket < nf_conntrack_htable_size) {
- hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
- if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
- continue;
- /* All nf_conn objects are added to hash table twice, one
- * for original direction tuple, once for the reply tuple.
- *
- * Exception: In the IPS_NAT_CLASH case, only the reply
- * tuple is added (the original tuple already existed for
- * a different object).
- *
- * We only need to call the iterator once for each
- * conntrack, so we just use the 'reply' direction
- * tuple while iterating.
- */
- ct = nf_ct_tuplehash_to_ctrack(h);
- if (iter(ct, data))
- goto found;
- }
+ hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
+ if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
+ continue;
+ /* All nf_conn objects are added to hash table twice, one
+ * for original direction tuple, once for the reply tuple.
+ *
+ * Exception: In the IPS_NAT_CLASH case, only the reply
+ * tuple is added (the original tuple already existed for
+ * a different object).
+ *
+ * We only need to call the iterator once for each
+ * conntrack, so we just use the 'reply' direction
+ * tuple while iterating.
+ */
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (iter(ct, data))
+ goto found;
}
spin_unlock(lockp);
local_bh_enable();
@@ -2264,26 +2308,20 @@ found:
static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
void *data, u32 portid, int report)
{
- unsigned int bucket = 0, sequence;
+ unsigned int bucket = 0;
struct nf_conn *ct;
might_sleep();
- for (;;) {
- sequence = read_seqcount_begin(&nf_conntrack_generation);
-
- while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
- /* Time to push up daises... */
+ mutex_lock(&nf_conntrack_mutex);
+ while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+ /* Time to push up daises... */
- nf_ct_delete(ct, portid, report);
- nf_ct_put(ct);
- cond_resched();
- }
-
- if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
- break;
- bucket = 0;
+ nf_ct_delete(ct, portid, report);
+ nf_ct_put(ct);
+ cond_resched();
}
+ mutex_unlock(&nf_conntrack_mutex);
}
struct iter_data {
@@ -2519,8 +2557,10 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
if (!hash)
return -ENOMEM;
+ mutex_lock(&nf_conntrack_mutex);
old_size = nf_conntrack_htable_size;
if (old_size == hashsize) {
+ mutex_unlock(&nf_conntrack_mutex);
kvfree(hash);
return 0;
}
@@ -2537,12 +2577,16 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
for (i = 0; i < nf_conntrack_htable_size; i++) {
while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
+ unsigned int zone_id;
+
h = hlist_nulls_entry(nf_conntrack_hash[i].first,
struct nf_conntrack_tuple_hash, hnnode);
ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_del_rcu(&h->hnnode);
+
+ zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h));
bucket = __hash_conntrack(nf_ct_net(ct),
- &h->tuple, hashsize);
+ &h->tuple, zone_id, hashsize);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
@@ -2556,6 +2600,8 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
nf_conntrack_all_unlock();
local_bh_enable();
+ mutex_unlock(&nf_conntrack_mutex);
+
synchronize_net();
kvfree(old_hash);
return 0;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 7008961f5cb0..273117683922 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -150,13 +150,16 @@ static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
/* We keep an extra hash for each conntrack, for fast searching. */
static unsigned int
-hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
struct {
struct nf_conntrack_man src;
u32 net_mix;
u32 protonum;
+ u32 zone;
} __aligned(SIPHASH_ALIGNMENT) combined;
get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
@@ -165,9 +168,13 @@ hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
/* Original src, to ensure we map it consistently if poss. */
combined.src = tuple->src;
- combined.net_mix = net_hash_mix(n);
+ combined.net_mix = net_hash_mix(net);
combined.protonum = tuple->dst.protonum;
+ /* Zone ID can be used provided its valid for both directions */
+ if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)
+ combined.zone = zone->id;
+
hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);
return reciprocal_scale(hash, nf_nat_htable_size);
@@ -272,7 +279,7 @@ find_appropriate_src(struct net *net,
struct nf_conntrack_tuple *result,
const struct nf_nat_range2 *range)
{
- unsigned int h = hash_by_src(net, tuple);
+ unsigned int h = hash_by_src(net, zone, tuple);
const struct nf_conn *ct;
hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
@@ -619,7 +626,7 @@ nf_nat_setup_info(struct nf_conn *ct,
unsigned int srchash;
spinlock_t *lock;
- srchash = hash_by_src(net,
+ srchash = hash_by_src(net, nf_ct_zone(ct),
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
spin_lock_bh(lock);
@@ -788,7 +795,7 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
{
unsigned int h;
- h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
hlist_del_rcu(&ct->nat_bysource);
spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
diff --git a/net/netfilter/nf_nat_masquerade.c b/net/netfilter/nf_nat_masquerade.c
index 8e8a65d46345..acd73f717a08 100644
--- a/net/netfilter/nf_nat_masquerade.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -9,8 +9,19 @@
#include <net/netfilter/nf_nat_masquerade.h>
+struct masq_dev_work {
+ struct work_struct work;
+ struct net *net;
+ union nf_inet_addr addr;
+ int ifindex;
+ int (*iter)(struct nf_conn *i, void *data);
+};
+
+#define MAX_MASQ_WORKER_COUNT 16
+
static DEFINE_MUTEX(masq_mutex);
static unsigned int masq_refcnt __read_mostly;
+static atomic_t masq_worker_count __read_mostly;
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
@@ -63,13 +74,71 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
-static int device_cmp(struct nf_conn *i, void *ifindex)
+static void iterate_cleanup_work(struct work_struct *work)
+{
+ struct masq_dev_work *w;
+
+ w = container_of(work, struct masq_dev_work, work);
+
+ nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
+
+ put_net(w->net);
+ kfree(w);
+ atomic_dec(&masq_worker_count);
+ module_put(THIS_MODULE);
+}
+
+/* Iterate conntrack table in the background and remove conntrack entries
+ * that use the device/address being removed.
+ *
+ * In case too many work items have been queued already or memory allocation
+ * fails iteration is skipped, conntrack entries will time out eventually.
+ */
+static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
+ int ifindex,
+ int (*iter)(struct nf_conn *i, void *data),
+ gfp_t gfp_flags)
+{
+ struct masq_dev_work *w;
+
+ if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)
+ return;
+
+ net = maybe_get_net(net);
+ if (!net)
+ return;
+
+ if (!try_module_get(THIS_MODULE))
+ goto err_module;
+
+ w = kzalloc(sizeof(*w), gfp_flags);
+ if (w) {
+ /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */
+ atomic_inc(&masq_worker_count);
+
+ INIT_WORK(&w->work, iterate_cleanup_work);
+ w->ifindex = ifindex;
+ w->net = net;
+ w->iter = iter;
+ if (addr)
+ w->addr = *addr;
+ schedule_work(&w->work);
+ return;
+ }
+
+ module_put(THIS_MODULE);
+ err_module:
+ put_net(net);
+}
+
+static int device_cmp(struct nf_conn *i, void *arg)
{
const struct nf_conn_nat *nat = nfct_nat(i);
+ const struct masq_dev_work *w = arg;
if (!nat)
return 0;
- return nat->masq_index == (int)(long)ifindex;
+ return nat->masq_index == w->ifindex;
}
static int masq_device_event(struct notifier_block *this,
@@ -85,8 +154,8 @@ static int masq_device_event(struct notifier_block *this,
* and forget them.
*/
- nf_ct_iterate_cleanup_net(net, device_cmp,
- (void *)(long)dev->ifindex, 0, 0);
+ nf_nat_masq_schedule(net, NULL, dev->ifindex,
+ device_cmp, GFP_KERNEL);
}
return NOTIFY_DONE;
@@ -94,35 +163,45 @@ static int masq_device_event(struct notifier_block *this,
static int inet_cmp(struct nf_conn *ct, void *ptr)
{
- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
- struct net_device *dev = ifa->ifa_dev->dev;
struct nf_conntrack_tuple *tuple;
+ struct masq_dev_work *w = ptr;
- if (!device_cmp(ct, (void *)(long)dev->ifindex))
+ if (!device_cmp(ct, ptr))
return 0;
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- return ifa->ifa_address == tuple->dst.u3.ip;
+ return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);
}
static int masq_inet_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
- struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
- struct net *net = dev_net(idev->dev);
+ const struct in_ifaddr *ifa = ptr;
+ const struct in_device *idev;
+ const struct net_device *dev;
+ union nf_inet_addr addr;
+
+ if (event != NETDEV_DOWN)
+ return NOTIFY_DONE;
/* The masq_dev_notifier will catch the case of the device going
* down. So if the inetdev is dead and being destroyed we have
* no work to do. Otherwise this is an individual address removal
* and we have to perform the flush.
*/
+ idev = ifa->ifa_dev;
if (idev->dead)
return NOTIFY_DONE;
- if (event == NETDEV_DOWN)
- nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
+ memset(&addr, 0, sizeof(addr));
+
+ addr.ip = ifa->ifa_address;
+
+ dev = idev->dev;
+ nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,
+ inet_cmp, GFP_KERNEL);
return NOTIFY_DONE;
}
@@ -136,8 +215,6 @@ static struct notifier_block masq_inet_notifier = {
};
#if IS_ENABLED(CONFIG_IPV6)
-static atomic_t v6_worker_count __read_mostly;
-
static int
nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
@@ -187,40 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
-struct masq_dev_work {
- struct work_struct work;
- struct net *net;
- struct in6_addr addr;
- int ifindex;
-};
-
-static int inet6_cmp(struct nf_conn *ct, void *work)
-{
- struct masq_dev_work *w = (struct masq_dev_work *)work;
- struct nf_conntrack_tuple *tuple;
-
- if (!device_cmp(ct, (void *)(long)w->ifindex))
- return 0;
-
- tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-
- return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
-}
-
-static void iterate_cleanup_work(struct work_struct *work)
-{
- struct masq_dev_work *w;
-
- w = container_of(work, struct masq_dev_work, work);
-
- nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
-
- put_net(w->net);
- kfree(w);
- atomic_dec(&v6_worker_count);
- module_put(THIS_MODULE);
-}
-
/* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
*
* Defer it to the system workqueue.
@@ -233,36 +276,19 @@ static int masq_inet6_event(struct notifier_block *this,
{
struct inet6_ifaddr *ifa = ptr;
const struct net_device *dev;
- struct masq_dev_work *w;
- struct net *net;
+ union nf_inet_addr addr;
- if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
+ if (event != NETDEV_DOWN)
return NOTIFY_DONE;
dev = ifa->idev->dev;
- net = maybe_get_net(dev_net(dev));
- if (!net)
- return NOTIFY_DONE;
- if (!try_module_get(THIS_MODULE))
- goto err_module;
+ memset(&addr, 0, sizeof(addr));
- w = kmalloc(sizeof(*w), GFP_ATOMIC);
- if (w) {
- atomic_inc(&v6_worker_count);
-
- INIT_WORK(&w->work, iterate_cleanup_work);
- w->ifindex = dev->ifindex;
- w->net = net;
- w->addr = ifa->addr;
- schedule_work(&w->work);
+ addr.in6 = ifa->addr;
- return NOTIFY_DONE;
- }
-
- module_put(THIS_MODULE);
- err_module:
- put_net(net);
+ nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,
+ GFP_ATOMIC);
return NOTIFY_DONE;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 081437dd75b7..c0851fec11d4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -780,6 +780,7 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
{
struct nftables_pernet *nft_net;
struct sk_buff *skb;
+ u16 flags = 0;
int err;
if (!ctx->report &&
@@ -790,8 +791,11 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
if (skb == NULL)
goto err;
+ if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+ flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq,
- event, 0, ctx->family, ctx->table);
+ event, flags, ctx->family, ctx->table);
if (err < 0) {
kfree_skb(skb);
goto err;
@@ -1563,6 +1567,7 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
{
struct nftables_pernet *nft_net;
struct sk_buff *skb;
+ u16 flags = 0;
int err;
if (!ctx->report &&
@@ -1573,8 +1578,11 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
if (skb == NULL)
goto err;
+ if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+ flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq,
- event, 0, ctx->family, ctx->table,
+ event, flags, ctx->family, ctx->table,
ctx->chain);
if (err < 0) {
kfree_skb(skb);
@@ -2866,8 +2874,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
u32 flags, int family,
const struct nft_table *table,
const struct nft_chain *chain,
- const struct nft_rule *rule,
- const struct nft_rule *prule)
+ const struct nft_rule *rule, u64 handle)
{
struct nlmsghdr *nlh;
const struct nft_expr *expr, *next;
@@ -2887,9 +2894,8 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
NFTA_RULE_PAD))
goto nla_put_failure;
- if (event != NFT_MSG_DELRULE && prule) {
- if (nla_put_be64(skb, NFTA_RULE_POSITION,
- cpu_to_be64(prule->handle),
+ if (event != NFT_MSG_DELRULE && handle) {
+ if (nla_put_be64(skb, NFTA_RULE_POSITION, cpu_to_be64(handle),
NFTA_RULE_PAD))
goto nla_put_failure;
}
@@ -2925,7 +2931,10 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx,
const struct nft_rule *rule, int event)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
+ const struct nft_rule *prule;
struct sk_buff *skb;
+ u64 handle = 0;
+ u16 flags = 0;
int err;
if (!ctx->report &&
@@ -2936,9 +2945,20 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx,
if (skb == NULL)
goto err;
+ if (event == NFT_MSG_NEWRULE &&
+ !list_is_first(&rule->list, &ctx->chain->rules) &&
+ !list_is_last(&rule->list, &ctx->chain->rules)) {
+ prule = list_prev_entry(rule, list);
+ handle = prule->handle;
+ }
+ if (ctx->flags & (NLM_F_APPEND | NLM_F_REPLACE))
+ flags |= NLM_F_APPEND;
+ if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+ flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq,
- event, 0, ctx->family, ctx->table,
- ctx->chain, rule, NULL);
+ event, flags, ctx->family, ctx->table,
+ ctx->chain, rule, handle);
if (err < 0) {
kfree_skb(skb);
goto err;
@@ -2964,6 +2984,7 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
struct net *net = sock_net(skb->sk);
const struct nft_rule *rule, *prule;
unsigned int s_idx = cb->args[0];
+ u64 handle;
prule = NULL;
list_for_each_entry_rcu(rule, &chain->rules, list) {
@@ -2975,12 +2996,17 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
}
+ if (prule)
+ handle = prule->handle;
+ else
+ handle = 0;
+
if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWRULE,
NLM_F_MULTI | NLM_F_APPEND,
table->family,
- table, chain, rule, prule) < 0)
+ table, chain, rule, handle) < 0)
return 1;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -3143,7 +3169,7 @@ static int nf_tables_getrule(struct sk_buff *skb, const struct nfnl_info *info,
err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
- family, table, chain, rule, NULL);
+ family, table, chain, rule, 0);
if (err < 0)
goto err_fill_rule_info;
@@ -3403,17 +3429,15 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
+ err = nft_delrule(&ctx, old_rule);
+ if (err < 0)
+ goto err_destroy_flow_rule;
+
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (trans == NULL) {
err = -ENOMEM;
goto err_destroy_flow_rule;
}
- err = nft_delrule(&ctx, old_rule);
- if (err < 0) {
- nft_trans_destroy(trans);
- goto err_destroy_flow_rule;
- }
-
list_add_tail_rcu(&rule->list, &old_rule->list);
} else {
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
@@ -3943,8 +3967,9 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx,
gfp_t gfp_flags)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
- struct sk_buff *skb;
u32 portid = ctx->portid;
+ struct sk_buff *skb;
+ u16 flags = 0;
int err;
if (!ctx->report &&
@@ -3955,7 +3980,10 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx,
if (skb == NULL)
goto err;
- err = nf_tables_fill_set(skb, ctx, set, event, 0);
+ if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+ flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
+ err = nf_tables_fill_set(skb, ctx, set, event, flags);
if (err < 0) {
kfree_skb(skb);
goto err;
@@ -4336,7 +4364,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (ops->privsize != NULL)
size = ops->privsize(nla, &desc);
alloc_size = sizeof(*set) + size + udlen;
- if (alloc_size < size)
+ if (alloc_size < size || alloc_size > INT_MAX)
return -ENOMEM;
set = kvzalloc(alloc_size, GFP_KERNEL);
if (!set)
@@ -5231,12 +5259,13 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
const struct nft_set *set,
const struct nft_set_elem *elem,
- int event, u16 flags)
+ int event)
{
struct nftables_pernet *nft_net;
struct net *net = ctx->net;
u32 portid = ctx->portid;
struct sk_buff *skb;
+ u16 flags = 0;
int err;
if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
@@ -5246,6 +5275,9 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
if (skb == NULL)
goto err;
+ if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+ flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags,
set, elem);
if (err < 0) {
@@ -6921,7 +6953,7 @@ static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info,
void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq, int event,
- int family, int report, gfp_t gfp)
+ u16 flags, int family, int report, gfp_t gfp)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct sk_buff *skb;
@@ -6946,8 +6978,9 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
if (skb == NULL)
goto err;
- err = nf_tables_fill_obj_info(skb, net, portid, seq, event, 0, family,
- table, obj, false);
+ err = nf_tables_fill_obj_info(skb, net, portid, seq, event,
+ flags & (NLM_F_CREATE | NLM_F_EXCL),
+ family, table, obj, false);
if (err < 0) {
kfree_skb(skb);
goto err;
@@ -6964,7 +6997,7 @@ static void nf_tables_obj_notify(const struct nft_ctx *ctx,
struct nft_object *obj, int event)
{
nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
- ctx->family, ctx->report, GFP_KERNEL);
+ ctx->flags, ctx->family, ctx->report, GFP_KERNEL);
}
/*
@@ -7745,6 +7778,7 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
struct sk_buff *skb;
+ u16 flags = 0;
int err;
if (!ctx->report &&
@@ -7755,8 +7789,11 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
if (skb == NULL)
goto err;
+ if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+ flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid,
- ctx->seq, event, 0,
+ ctx->seq, event, flags,
ctx->family, flowtable, hook_list);
if (err < 0) {
kfree_skb(skb);
@@ -8634,7 +8671,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_setelem_activate(net, te->set, &te->elem);
nf_tables_setelem_notify(&trans->ctx, te->set,
&te->elem,
- NFT_MSG_NEWSETELEM, 0);
+ NFT_MSG_NEWSETELEM);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSETELEM:
@@ -8642,7 +8679,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nf_tables_setelem_notify(&trans->ctx, te->set,
&te->elem,
- NFT_MSG_DELSETELEM, 0);
+ NFT_MSG_DELSETELEM);
nft_setelem_remove(net, te->set, &te->elem);
if (!nft_setelem_is_catchall(te->set, &te->elem)) {
atomic_dec(&te->set->nelems);
@@ -9599,7 +9636,6 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
table->use--;
nf_tables_chain_destroy(&ctx);
}
- list_del(&table->list);
nf_tables_table_destroy(&ctx);
}
@@ -9612,6 +9648,8 @@ static void __nft_release_tables(struct net *net)
if (nft_table_has_owner(table))
continue;
+ list_del(&table->list);
+
__nft_release_table(net, table);
}
}
@@ -9619,31 +9657,38 @@ static void __nft_release_tables(struct net *net)
static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
+ struct nft_table *table, *to_delete[8];
struct nftables_pernet *nft_net;
struct netlink_notify *n = ptr;
- struct nft_table *table, *nt;
struct net *net = n->net;
- bool release = false;
+ unsigned int deleted;
+ bool restart = false;
if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)
return NOTIFY_DONE;
nft_net = nft_pernet(net);
+ deleted = 0;
mutex_lock(&nft_net->commit_mutex);
+again:
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table) &&
n->portid == table->nlpid) {
__nft_release_hook(net, table);
- release = true;
+ list_del_rcu(&table->list);
+ to_delete[deleted++] = table;
+ if (deleted >= ARRAY_SIZE(to_delete))
+ break;
}
}
- if (release) {
+ if (deleted) {
+ restart = deleted >= ARRAY_SIZE(to_delete);
synchronize_rcu();
- list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
- if (nft_table_has_owner(table) &&
- n->portid == table->nlpid)
- __nft_release_table(net, table);
- }
+ while (deleted)
+ __nft_release_table(net, to_delete[--deleted]);
+
+ if (restart)
+ goto again;
}
mutex_unlock(&nft_net->commit_mutex);
diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c
index f554e2ea32ee..d5c719c9e36c 100644
--- a/net/netfilter/nfnetlink_hook.c
+++ b/net/netfilter/nfnetlink_hook.c
@@ -185,7 +185,7 @@ static const struct nf_hook_entries *
nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev)
{
const struct nf_hook_entries *hook_head = NULL;
-#ifdef CONFIG_NETFILTER_INGRESS
+#if defined(CONFIG_NETFILTER_INGRESS) || defined(CONFIG_NETFILTER_EGRESS)
struct net_device *netdev;
#endif
@@ -221,9 +221,9 @@ nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *de
hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
break;
#endif
-#ifdef CONFIG_NETFILTER_INGRESS
+#if defined(CONFIG_NETFILTER_INGRESS) || defined(CONFIG_NETFILTER_EGRESS)
case NFPROTO_NETDEV:
- if (hook != NF_NETDEV_INGRESS)
+ if (hook >= NF_NETDEV_NUMHOOKS)
return ERR_PTR(-EOPNOTSUPP);
if (!dev)
@@ -233,7 +233,15 @@ nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *de
if (!netdev)
return ERR_PTR(-ENODEV);
- return rcu_dereference(netdev->nf_hooks_ingress);
+#ifdef CONFIG_NETFILTER_INGRESS
+ if (hook == NF_NETDEV_INGRESS)
+ return rcu_dereference(netdev->nf_hooks_ingress);
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ if (hook == NF_NETDEV_EGRESS)
+ return rcu_dereference(netdev->nf_hooks_egress);
+#endif
+ fallthrough;
#endif
default:
return ERR_PTR(-EPROTONOSUPPORT);
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 5b02408a920b..c3563f0be269 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -310,9 +310,11 @@ static const struct nft_chain_type nft_chain_filter_netdev = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_NETDEV,
- .hook_mask = (1 << NF_NETDEV_INGRESS),
+ .hook_mask = (1 << NF_NETDEV_INGRESS) |
+ (1 << NF_NETDEV_EGRESS),
.hooks = {
[NF_NETDEV_INGRESS] = nft_do_chain_netdev,
+ [NF_NETDEV_EGRESS] = nft_do_chain_netdev,
},
};
@@ -342,12 +344,6 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
return;
}
- /* UNREGISTER events are also happening on netns exit.
- *
- * Although nf_tables core releases all tables/chains, only this event
- * handler provides guarantee that hook->ops.dev is still accessible,
- * so we cannot skip exiting net namespaces.
- */
__nft_release_basechain(ctx);
}
@@ -366,6 +362,9 @@ static int nf_tables_netdev_event(struct notifier_block *this,
event != NETDEV_CHANGENAME)
return NOTIFY_DONE;
+ if (!check_net(ctx.net))
+ return NOTIFY_DONE;
+
nft_net = nft_pernet(ctx.net);
mutex_lock(&nft_net->commit_mutex);
list_for_each_entry(table, &nft_net->tables, list) {
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 272bcdb1392d..f69cc73c5813 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -19,6 +19,7 @@
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_arp/arp_tables.h>
#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
/* Used for matches where *info is larger than X byte */
#define NFT_MATCH_LARGE_THRESH 192
@@ -257,8 +258,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
nft_compat_wait_for_destructors();
ret = xt_check_target(&par, size, proto, inv);
- if (ret < 0)
+ if (ret < 0) {
+ if (ret == -ENOENT) {
+ const char *modname = NULL;
+
+ if (strcmp(target->name, "LOG") == 0)
+ modname = "nf_log_syslog";
+ else if (strcmp(target->name, "NFLOG") == 0)
+ modname = "nfnetlink_log";
+
+ if (modname &&
+ nft_request_module(ctx->net, "%s", modname) == -EAGAIN)
+ return -EAGAIN;
+ }
+
return ret;
+ }
/* The standard target cannot be used */
if (!target->target)
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 6ba3256fa844..87f3af4645d9 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -198,17 +198,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
return -EBUSY;
priv->op = ntohl(nla_get_be32(tb[NFTA_DYNSET_OP]));
- switch (priv->op) {
- case NFT_DYNSET_OP_ADD:
- case NFT_DYNSET_OP_DELETE:
- break;
- case NFT_DYNSET_OP_UPDATE:
- if (!(set->flags & NFT_SET_TIMEOUT))
- return -EOPNOTSUPP;
- break;
- default:
+ if (priv->op > NFT_DYNSET_OP_DELETE)
return -EOPNOTSUPP;
- }
timeout = 0;
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
index 0363f533a42b..c4d1389f7185 100644
--- a/net/netfilter/nft_quota.c
+++ b/net/netfilter/nft_quota.c
@@ -60,7 +60,7 @@ static void nft_quota_obj_eval(struct nft_object *obj,
if (overquota &&
!test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0,
- NFT_MSG_NEWOBJ, nft_pf(pkt), 0, GFP_ATOMIC);
+ NFT_MSG_NEWOBJ, 0, nft_pf(pkt), 0, GFP_ATOMIC);
}
static int nft_quota_do_init(const struct nlattr * const tb[],
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 7b2f359bfce4..2f7cf5ecebf4 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -137,7 +137,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
- info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+ info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) {
ret = -ENOMEM;
goto out;
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index 2ff75f7637b0..f39244f9c0ed 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -44,6 +44,7 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
static int log_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_log_info *loginfo = par->targinfo;
+ int ret;
if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
return -EINVAL;
@@ -58,7 +59,14 @@ static int log_tg_check(const struct xt_tgchk_param *par)
return -EINVAL;
}
- return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+ if (ret != 0 && !par->nft_compat) {
+ request_module("%s", "nf_log_syslog");
+
+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+ }
+
+ return ret;
}
static void log_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index fb5793208059..e660c3710a10 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -42,13 +42,21 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
static int nflog_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
+ int ret;
if (info->flags & ~XT_NFLOG_MASK)
return -EINVAL;
if (info->prefix[sizeof(info->prefix) - 1] != '\0')
return -EINVAL;
- return nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+ if (ret != 0 && !par->nft_compat) {
+ request_module("%s", "nfnetlink_log");
+
+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+ }
+
+ return ret;
}
static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 0d5c422f8745..8aec1b529364 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -94,11 +94,11 @@ static unsigned int
xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_rateest_target_info *info = par->targinfo;
- struct gnet_stats_basic_packed *stats = &info->est->bstats;
+ struct gnet_stats_basic_sync *stats = &info->est->bstats;
spin_lock_bh(&info->est->lock);
- stats->bytes += skb->len;
- stats->packets++;
+ u64_stats_add(&stats->bytes, skb->len);
+ u64_stats_inc(&stats->packets);
spin_unlock_bh(&info->est->lock);
return XT_CONTINUE;
@@ -143,6 +143,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
if (!est)
goto err1;
+ gnet_stats_basic_sync_init(&est->bstats);
strlcpy(est->name, info->name, sizeof(est->name));
spin_lock_init(&est->lock);
est->refcnt = 1;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 24b7cf447bc5..4c575324a985 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -594,7 +594,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
/* We need to ensure that the socket is hashed and visible. */
smp_wmb();
- nlk_sk(sk)->bound = portid;
+ /* Paired with lockless reads from netlink_bind(),
+ * netlink_connect() and netlink_sendmsg().
+ */
+ WRITE_ONCE(nlk_sk(sk)->bound, portid);
err:
release_sock(sk);
@@ -1012,7 +1015,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (nlk->ngroups < BITS_PER_LONG)
groups &= (1UL << nlk->ngroups) - 1;
- bound = nlk->bound;
+ /* Paired with WRITE_ONCE() in netlink_insert() */
+ bound = READ_ONCE(nlk->bound);
if (bound) {
/* Ensure nlk->portid is up-to-date. */
smp_rmb();
@@ -1098,8 +1102,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
/* No need for barriers here as we return to user-space without
* using any of the bound attributes.
+ * Paired with WRITE_ONCE() in netlink_insert().
*/
- if (!nlk->bound)
+ if (!READ_ONCE(nlk->bound))
err = netlink_autobind(sock);
if (err == 0) {
@@ -1407,8 +1412,6 @@ struct netlink_broadcast_data {
int delivered;
gfp_t allocation;
struct sk_buff *skb, *skb2;
- int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
- void *tx_data;
};
static void do_one_broadcast(struct sock *sk,
@@ -1462,11 +1465,6 @@ static void do_one_broadcast(struct sock *sk,
p->delivery_failure = 1;
goto out;
}
- if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
- kfree_skb(p->skb2);
- p->skb2 = NULL;
- goto out;
- }
if (sk_filter(sk, p->skb2)) {
kfree_skb(p->skb2);
p->skb2 = NULL;
@@ -1489,10 +1487,8 @@ out:
sock_put(sk);
}
-int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
- u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
- void *filter_data)
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
+ u32 group, gfp_t allocation)
{
struct net *net = sock_net(ssk);
struct netlink_broadcast_data info;
@@ -1511,8 +1507,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
info.allocation = allocation;
info.skb = skb;
info.skb2 = NULL;
- info.tx_filter = filter;
- info.tx_data = filter_data;
/* While we sleep in clone, do not allow to change socket list */
@@ -1538,14 +1532,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
}
return -ESRCH;
}
-EXPORT_SYMBOL(netlink_broadcast_filtered);
-
-int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
- u32 group, gfp_t allocation)
-{
- return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
- NULL, NULL);
-}
EXPORT_SYMBOL(netlink_broadcast);
struct netlink_set_err_data {
@@ -1888,7 +1874,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
dst_group = nlk->dst_group;
}
- if (!nlk->bound) {
+ /* Paired with WRITE_ONCE() in netlink_insert() */
+ if (!READ_ONCE(nlk->bound)) {
err = netlink_autobind(sock);
if (err)
goto out;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 6d16e1ab1a8a..775064cdd0ee 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -633,7 +633,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr;
- ax25_address *source = NULL;
+ const ax25_address *source = NULL;
ax25_uid_assoc *user;
struct net_device *dev;
int err = 0;
@@ -673,7 +673,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
err = -ENETUNREACH;
goto out_release;
}
- source = (ax25_address *)dev->dev_addr;
+ source = (const ax25_address *)dev->dev_addr;
user = ax25_findbyuid(current_euid());
if (user) {
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 29e418c8c6c3..3aaac4a22b38 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -108,10 +108,10 @@ static int __must_check nr_set_mac_address(struct net_device *dev, void *addr)
if (err)
return err;
- ax25_listen_release((ax25_address *)dev->dev_addr, NULL);
+ ax25_listen_release((const ax25_address *)dev->dev_addr, NULL);
}
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -120,7 +120,7 @@ static int nr_open(struct net_device *dev)
{
int err;
- err = ax25_listen_register((ax25_address *)dev->dev_addr, NULL);
+ err = ax25_listen_register((const ax25_address *)dev->dev_addr, NULL);
if (err)
return err;
@@ -131,7 +131,7 @@ static int nr_open(struct net_device *dev)
static int nr_close(struct net_device *dev)
{
- ax25_listen_release((ax25_address *)dev->dev_addr, NULL);
+ ax25_listen_release((const ax25_address *)dev->dev_addr, NULL);
netif_stop_queue(dev);
return 0;
}
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index ddd5cbd455e3..baea3cbd76ca 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -598,7 +598,7 @@ struct net_device *nr_dev_get(ax25_address *addr)
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
- ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
+ ax25cmp(addr, (const ax25_address *)dev->dev_addr) == 0) {
dev_hold(dev);
goto out;
}
@@ -825,7 +825,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
ax25s = nr_neigh->ax25;
nr_neigh->ax25 = ax25_send_frame(skb, 256,
- (ax25_address *)dev->dev_addr,
+ (const ax25_address *)dev->dev_addr,
&nr_neigh->callsign,
nr_neigh->digipeat, nr_neigh->dev);
if (ax25s)
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index 6024fad905ff..dda323e0a473 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -60,6 +60,9 @@ int nfc_proto_register(const struct nfc_protocol *nfc_proto)
proto_tab[nfc_proto->id] = nfc_proto;
write_unlock(&proto_tab_lock);
+ if (rc)
+ proto_unregister(nfc_proto->proto);
+
return rc;
}
EXPORT_SYMBOL(nfc_proto_register);
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
index fefc03674f4f..d63d2e5dc60c 100644
--- a/net/nfc/digital_core.c
+++ b/net/nfc/digital_core.c
@@ -277,6 +277,7 @@ int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
{
struct digital_tg_mdaa_params *params;
+ int rc;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
@@ -291,8 +292,12 @@ static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
params->sc = DIGITAL_SENSF_FELICA_SC;
- return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
- 500, digital_tg_recv_atr_req, NULL);
+ rc = digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
+ 500, digital_tg_recv_atr_req, NULL);
+ if (rc)
+ kfree(params);
+
+ return rc;
}
static int digital_tg_listen_md(struct nfc_digital_dev *ddev, u8 rf_tech)
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c
index 84d2345c75a3..3adf4589852a 100644
--- a/net/nfc/digital_technology.c
+++ b/net/nfc/digital_technology.c
@@ -465,8 +465,12 @@ static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
skb_put_u8(skb, sel_cmd);
skb_put_u8(skb, DIGITAL_SDD_REQ_SEL_PAR);
- return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
- target);
+ rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
+ target);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
}
static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg,
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 3a89bd9b89fc..af6bacb3ba98 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -114,8 +114,6 @@ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
{
u8 pipe;
- pr_debug("\n");
-
pipe = hdev->gate2pipe[gate];
if (pipe == NFC_HCI_INVALID_PIPE)
return -EADDRNOTAVAIL;
@@ -130,8 +128,6 @@ int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
{
u8 pipe;
- pr_debug("\n");
-
pipe = hdev->gate2pipe[gate];
if (pipe == NFC_HCI_INVALID_PIPE)
return -EADDRNOTAVAIL;
@@ -205,8 +201,6 @@ static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe)
static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe)
{
- pr_debug("\n");
-
return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE,
NULL, 0, NULL);
}
@@ -242,8 +236,6 @@ static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host,
static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
{
- pr_debug("\n");
-
return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL);
}
@@ -256,8 +248,6 @@ static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
/* TODO: Find out what the identity reference data is
* and fill param with it. HCI spec 6.1.3.5 */
- pr_debug("\n");
-
if (test_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &hdev->quirks))
param_len = 0;
@@ -271,8 +261,6 @@ int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate)
int r;
u8 pipe = hdev->gate2pipe[gate];
- pr_debug("\n");
-
if (pipe == NFC_HCI_INVALID_PIPE)
return -EADDRNOTAVAIL;
@@ -296,8 +284,6 @@ int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
{
int r;
- pr_debug("\n");
-
r = nfc_hci_clear_all_pipes(hdev);
if (r < 0)
return r;
@@ -314,8 +300,6 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
bool pipe_created = false;
int r;
- pr_debug("\n");
-
if (pipe == NFC_HCI_DO_NOT_CREATE_PIPE)
return 0;
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
index 71e10347e6a9..e90f70385813 100644
--- a/net/nfc/hci/llc_shdlc.c
+++ b/net/nfc/hci/llc_shdlc.c
@@ -363,8 +363,6 @@ static int llc_shdlc_connect_initiate(const struct llc_shdlc *shdlc)
{
struct sk_buff *skb;
- pr_debug("\n");
-
skb = llc_shdlc_alloc_skb(shdlc, 2);
if (skb == NULL)
return -ENOMEM;
@@ -379,8 +377,6 @@ static int llc_shdlc_connect_send_ua(const struct llc_shdlc *shdlc)
{
struct sk_buff *skb;
- pr_debug("\n");
-
skb = llc_shdlc_alloc_skb(shdlc, 0);
if (skb == NULL)
return -ENOMEM;
@@ -570,8 +566,6 @@ static void llc_shdlc_connect_timeout(struct timer_list *t)
{
struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer);
- pr_debug("\n");
-
schedule_work(&shdlc->sm_work);
}
@@ -598,8 +592,6 @@ static void llc_shdlc_sm_work(struct work_struct *work)
struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
int r;
- pr_debug("\n");
-
mutex_lock(&shdlc->state_mutex);
switch (shdlc->state) {
@@ -681,8 +673,6 @@ static int llc_shdlc_connect(struct llc_shdlc *shdlc)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
- pr_debug("\n");
-
mutex_lock(&shdlc->state_mutex);
shdlc->state = SHDLC_CONNECTING;
@@ -701,8 +691,6 @@ static int llc_shdlc_connect(struct llc_shdlc *shdlc)
static void llc_shdlc_disconnect(struct llc_shdlc *shdlc)
{
- pr_debug("\n");
-
mutex_lock(&shdlc->state_mutex);
shdlc->state = SHDLC_DISCONNECTED;
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 3c4172a5aeb5..41e3a20c8935 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -337,8 +337,6 @@ int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock)
struct nfc_dev *dev;
struct nfc_llcp_local *local;
- pr_debug("Sending DISC\n");
-
local = sock->local;
if (local == NULL)
return -ENODEV;
@@ -362,8 +360,6 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
struct nfc_llcp_local *local;
u16 size = 0;
- pr_debug("Sending SYMM\n");
-
local = nfc_llcp_find_local(dev);
if (local == NULL)
return -ENODEV;
@@ -399,8 +395,6 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
u16 size = 0;
__be16 miux;
- pr_debug("Sending CONNECT\n");
-
local = sock->local;
if (local == NULL)
return -ENODEV;
@@ -475,8 +469,6 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
u16 size = 0;
__be16 miux;
- pr_debug("Sending CC\n");
-
local = sock->local;
if (local == NULL)
return -ENODEV;
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index eaeb2b1cfa6a..5ad5157aa9c5 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -45,8 +45,6 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
struct nfc_llcp_local *local = sock->local;
struct sk_buff *s, *tmp;
- pr_debug("%p\n", &sock->sk);
-
skb_queue_purge(&sock->tx_queue);
skb_queue_purge(&sock->tx_pending_queue);
@@ -1505,9 +1503,8 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
{
struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
- pr_debug("Received an LLCP PDU\n");
if (err < 0) {
- pr_err("err %d\n", err);
+ pr_err("LLCP PDU receive err %d\n", err);
return;
}
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 82ab39d80726..6fd873aa86be 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -930,8 +930,6 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
unsigned long nci_mode = NCI_DEACTIVATE_TYPE_IDLE_MODE;
- pr_debug("entry\n");
-
if (!ndev->target_active_prot) {
pr_err("unable to deactivate target, no active target\n");
return;
@@ -977,8 +975,6 @@ static int nci_dep_link_down(struct nfc_dev *nfc_dev)
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
- pr_debug("entry\n");
-
if (nfc_dev->rf_mode == NFC_RF_INITIATOR) {
nci_deactivate_target(nfc_dev, NULL, NCI_DEACTIVATE_TYPE_IDLE_MODE);
} else {
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index e199912ee1e5..19703a649b5a 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -432,8 +432,6 @@ void nci_hci_data_received_cb(void *context,
struct sk_buff *frag_skb;
int msg_len;
- pr_debug("\n");
-
if (err) {
nci_req_complete(ndev, err);
return;
@@ -547,8 +545,6 @@ static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host,
static int nci_hci_delete_pipe(struct nci_dev *ndev, u8 pipe)
{
- pr_debug("\n");
-
return nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE,
NCI_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL);
}
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index c5eacaac41ae..282c51051dcc 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -738,8 +738,6 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
const struct nci_nfcee_discover_ntf *nfcee_ntf =
(struct nci_nfcee_discover_ntf *)skb->data;
- pr_debug("\n");
-
/* NFCForum NCI 9.2.1 HCI Network Specific Handling
* If the NFCC supports the HCI Network, it SHALL return one,
* and only one, NFCEE_DISCOVER_NTF with a Protocol type of
@@ -751,12 +749,6 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
nci_req_complete(ndev, status);
}
-static void nci_nfcee_action_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
-{
- pr_debug("\n");
-}
-
void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
__u16 ntf_opcode = nci_opcode(skb->data);
@@ -813,7 +805,6 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
break;
case NCI_OP_RF_NFCEE_ACTION_NTF:
- nci_nfcee_action_ntf_packet(ndev, skb);
break;
default:
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index a2e72c003805..b911ab78bed9 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -334,6 +334,8 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
ndev->cur_conn_id);
if (conn_info) {
list_del(&conn_info->list);
+ if (conn_info == ndev->rf_conn_info)
+ ndev->rf_conn_info = NULL;
devm_kfree(&ndev->nfc_dev->dev, conn_info);
}
}
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index 502e7a3f8948..57500c262cc3 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015, Marvell International Ltd.
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- */
-
-/* Inspired (hugely) by HCI LDISC implementation in Bluetooth.
+ * Inspired (hugely) by HCI LDISC implementation in Bluetooth.
*
* Copyright (C) 2000-2001 Qualcomm Incorporated
* Copyright (C) 2002-2003 Maxim Krasnyansky <[email protected]>
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2a2bc64f75cf..46943a18a10d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -91,6 +91,7 @@
#endif
#include <linux/bpf.h>
#include <net/compat.h>
+#include <linux/netfilter_netdev.h>
#include "internal.h"
@@ -241,8 +242,42 @@ struct packet_skb_cb {
static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
static void __fanout_link(struct sock *sk, struct packet_sock *po);
+#ifdef CONFIG_NETFILTER_EGRESS
+static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
+{
+ struct sk_buff *next, *head = NULL, *tail;
+ int rc;
+
+ rcu_read_lock();
+ for (; skb != NULL; skb = next) {
+ next = skb->next;
+ skb_mark_not_on_list(skb);
+
+ if (!nf_hook_egress(skb, &rc, skb->dev))
+ continue;
+
+ if (!head)
+ head = skb;
+ else
+ tail->next = skb;
+
+ tail = skb;
+ }
+ rcu_read_unlock();
+
+ return head;
+}
+#endif
+
static int packet_direct_xmit(struct sk_buff *skb)
{
+#ifdef CONFIG_NETFILTER_EGRESS
+ if (nf_hook_egress_active()) {
+ skb = nf_hook_direct_egress(skb);
+ if (!skb)
+ return NET_XMIT_DROP;
+ }
+#endif
return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
}
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
index 1b1411d158a7..8e0605f88a73 100644
--- a/net/qrtr/Makefile
+++ b/net/qrtr/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_QRTR) := qrtr.o ns.o
+obj-$(CONFIG_QRTR) += qrtr.o
+qrtr-y := af_qrtr.o ns.o
obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o
qrtr-smd-y := smd.o
diff --git a/net/qrtr/qrtr.c b/net/qrtr/af_qrtr.c
index ec2322529727..ec2322529727 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/af_qrtr.c
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index cf7d974e0f61..30a1cf4c16c6 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -109,7 +109,7 @@ char *rose2asc(char *buf, const rose_address *addr)
/*
* Compare two ROSE addresses, 0 == equal.
*/
-int rosecmp(rose_address *addr1, rose_address *addr2)
+int rosecmp(const rose_address *addr1, const rose_address *addr2)
{
int i;
@@ -123,7 +123,8 @@ int rosecmp(rose_address *addr1, rose_address *addr2)
/*
* Compare two ROSE addresses for only mask digits, 0 == equal.
*/
-int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
+int rosecmpm(const rose_address *addr1, const rose_address *addr2,
+ unsigned short mask)
{
unsigned int i, j;
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 051804fbee17..f1a76a5820f1 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -66,10 +66,10 @@ static int rose_set_mac_address(struct net_device *dev, void *addr)
if (err)
return err;
- rose_del_loopback_node((rose_address *)dev->dev_addr);
+ rose_del_loopback_node((const rose_address *)dev->dev_addr);
}
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -78,7 +78,7 @@ static int rose_open(struct net_device *dev)
{
int err;
- err = rose_add_loopback_node((rose_address *)dev->dev_addr);
+ err = rose_add_loopback_node((const rose_address *)dev->dev_addr);
if (err)
return err;
@@ -90,7 +90,7 @@ static int rose_open(struct net_device *dev)
static int rose_close(struct net_device *dev)
{
netif_stop_queue(dev);
- rose_del_loopback_node((rose_address *)dev->dev_addr);
+ rose_del_loopback_node((const rose_address *)dev->dev_addr);
return 0;
}
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index f6102e6f5161..8b96a56d3a49 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -94,11 +94,11 @@ static void rose_t0timer_expiry(struct timer_list *t)
*/
static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
{
- ax25_address *rose_call;
+ const ax25_address *rose_call;
ax25_cb *ax25s;
if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
- rose_call = (ax25_address *)neigh->dev->dev_addr;
+ rose_call = (const ax25_address *)neigh->dev->dev_addr;
else
rose_call = &rose_callsign;
@@ -117,11 +117,11 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
*/
static int rose_link_up(struct rose_neigh *neigh)
{
- ax25_address *rose_call;
+ const ax25_address *rose_call;
ax25_cb *ax25s;
if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
- rose_call = (ax25_address *)neigh->dev->dev_addr;
+ rose_call = (const ax25_address *)neigh->dev->dev_addr;
else
rose_call = &rose_callsign;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index c0e04c261a15..e2e6b6b78578 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -401,7 +401,7 @@ void rose_add_loopback_neigh(void)
/*
* Add a loopback node.
*/
-int rose_add_loopback_node(rose_address *address)
+int rose_add_loopback_node(const rose_address *address)
{
struct rose_node *rose_node;
int err = 0;
@@ -446,7 +446,7 @@ out:
/*
* Delete a loopback node.
*/
-void rose_del_loopback_node(rose_address *address)
+void rose_del_loopback_node(const rose_address *address)
{
struct rose_node *rose_node;
@@ -629,7 +629,8 @@ struct net_device *rose_dev_get(rose_address *addr)
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
- if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) {
+ if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE &&
+ rosecmp(addr, (const rose_address *)dev->dev_addr) == 0) {
dev_hold(dev);
goto out;
}
@@ -646,7 +647,8 @@ static int rose_dev_exists(rose_address *addr)
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
- if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
+ if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE &&
+ rosecmp(addr, (const rose_address *)dev->dev_addr) == 0)
goto out;
}
dev = NULL;
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index 4e565eeab426..be61d6f5be8d 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -22,7 +22,7 @@ static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
{
- return _usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us);
+ return usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us);
}
static u32 rxrpc_bound_rto(u32 rto)
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 7dd3a2dc5fa4..3258da3d5bed 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -480,16 +480,18 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
atomic_set(&p->tcfa_bindcnt, 1);
if (cpustats) {
- p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+ p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
if (!p->cpu_bstats)
goto err1;
- p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+ p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
if (!p->cpu_bstats_hw)
goto err2;
p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!p->cpu_qstats)
goto err3;
}
+ gnet_stats_basic_sync_init(&p->tcfa_bstats);
+ gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
spin_lock_init(&p->tcfa_lock);
p->tcfa_index = index;
p->tcfa_tm.install = jiffies;
@@ -499,7 +501,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
if (est) {
err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
&p->tcfa_rate_est,
- &p->tcfa_lock, NULL, est);
+ &p->tcfa_lock, false, est);
if (err)
goto err4;
}
@@ -1126,13 +1128,13 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
u64 drops, bool hw)
{
if (a->cpu_bstats) {
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+ _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
this_cpu_ptr(a->cpu_qstats)->drops += drops;
if (hw)
- _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
- bytes, packets);
+ _bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
+ bytes, packets);
return;
}
@@ -1171,9 +1173,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
if (err < 0)
goto errout;
- if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
- gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
- &p->tcfa_bstats_hw) < 0 ||
+ if (gnet_stats_copy_basic(&d, p->cpu_bstats,
+ &p->tcfa_bstats, false) < 0 ||
+ gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
+ &p->tcfa_bstats_hw, false) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
gnet_stats_copy_queue(&d, p->cpu_qstats,
&p->tcfa_qstats,
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 5c36013339e1..f2bf896331a5 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -41,7 +41,7 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
int action, filter_res;
tcf_lastuse_update(&prog->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
filter = rcu_dereference(prog->filter);
if (at_ingress) {
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index ad9df0cb4b98..90866ae45573 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -960,6 +960,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
tmpl = p->tmpl;
tcf_lastuse_update(&c->tcf_tm);
+ tcf_action_update_bstats(&c->common, skb);
if (clear) {
qdisc_skb_cb(skb)->post_ct = false;
@@ -1049,7 +1050,6 @@ out_push:
qdisc_skb_cb(skb)->post_ct = true;
out_clear:
- tcf_action_update_bstats(&c->common, skb);
if (defrag)
qdisc_skb_cb(skb)->pkt_len = skb->len;
return retval;
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 7064a365a1a9..b757f90a2d58 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -718,7 +718,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
u8 *tlv_data;
u16 metalen;
- bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
tcf_lastuse_update(&ife->tcf_tm);
if (skb_at_tc_ingress(skb))
@@ -806,7 +806,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
exceed_mtu = true;
}
- bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
tcf_lastuse_update(&ife->tcf_tm);
if (!metalen) { /* no metadata to send */
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index e4529b428cf4..8faa4c58305e 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -59,7 +59,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
int ret, mac_len;
tcf_lastuse_update(&m->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(m->common.cpu_bstats), skb);
/* Ensure 'data' points at mac_header prior calling mpls manipulating
* functions.
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 832157a840fc..9e77ba8401e5 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -125,7 +125,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
police->common.cpu_bstats,
&police->tcf_rate_est,
&police->tcf_lock,
- NULL, est);
+ false, est);
if (err)
goto failure;
} else if (tb[TCA_POLICE_AVRATE] &&
@@ -248,7 +248,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
int ret;
tcf_lastuse_update(&police->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb);
ret = READ_ONCE(police->tcf_action);
p = rcu_dereference_bh(police->params);
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 230501eb9e06..ce859b0e0deb 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -163,7 +163,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
int retval;
tcf_lastuse_update(&s->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(s->common.cpu_bstats), skb);
retval = READ_ONCE(s->tcf_action);
psample_group = rcu_dereference_bh(s->psample_group);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index cbbe1861d3a2..e617ab4505ca 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -36,7 +36,8 @@ static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a,
* then it would look like "hello_3" (without quotes)
*/
pr_info("simple: %s_%llu\n",
- (char *)d->tcfd_defdata, d->tcf_bstats.packets);
+ (char *)d->tcfd_defdata,
+ u64_stats_read(&d->tcf_bstats.packets));
spin_unlock(&d->tcf_lock);
return d->tcf_action;
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 605418538347..d30ecbfc8f84 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -31,7 +31,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
int action;
tcf_lastuse_update(&d->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
params = rcu_dereference_bh(d->params);
action = READ_ONCE(d->tcf_action);
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index ecb9ee666095..9b6b52c5e24e 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -31,7 +31,7 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
u64 flags;
tcf_lastuse_update(&d->tcf_tm);
- bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
action = READ_ONCE(d->tcf_action);
if (unlikely(action == TC_ACT_SHOT))
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 23b21253b3c3..eb6345a027e1 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -2188,18 +2188,24 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
arg->count = arg->skip;
+ rcu_read_lock();
idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
/* don't return filters that are being deleted */
if (!refcount_inc_not_zero(&f->refcnt))
continue;
+ rcu_read_unlock();
+
if (arg->fn(tp, f, arg) < 0) {
__fl_put(f);
arg->stop = 1;
+ rcu_read_lock();
break;
}
__fl_put(f);
arg->count++;
+ rcu_read_lock();
}
+ rcu_read_unlock();
arg->cookie = id;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 5e90e9b160e3..efcd0b5e9a32 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -507,20 +507,27 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
list_for_each_entry(stab, &qdisc_stab_list, list) {
if (memcmp(&stab->szopts, s, sizeof(*s)))
continue;
- if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
+ if (tsize > 0 &&
+ memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
continue;
stab->refcnt++;
return stab;
}
- stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
+ if (s->size_log > STAB_SIZE_LOG_MAX ||
+ s->cell_log > STAB_SIZE_LOG_MAX) {
+ NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
+ return ERR_PTR(-EINVAL);
+ }
+
+ stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
if (!stab)
return ERR_PTR(-ENOMEM);
stab->refcnt = 1;
stab->szopts = *s;
if (tsize > 0)
- memcpy(stab->data, tab, tsize * sizeof(u16));
+ memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
list_add_tail(&stab->list, &qdisc_stab_list);
@@ -878,7 +885,7 @@ static void qdisc_offload_graft_root(struct net_device *dev,
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 portid, u32 seq, u16 flags, int event)
{
- struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
struct gnet_stats_queue __percpu *cpu_qstats = NULL;
struct tcmsg *tcm;
struct nlmsghdr *nlh;
@@ -936,8 +943,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
cpu_qstats = q->cpu_qstats;
}
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
- &d, cpu_bstats, &q->bstats) < 0 ||
+ if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
goto nla_put_failure;
@@ -1258,26 +1264,17 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
- seqcount_t *running;
-
err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT) {
NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
goto err_out4;
}
- if (sch->parent != TC_H_ROOT &&
- !(sch->flags & TCQ_F_INGRESS) &&
- (!p || !(p->flags & TCQ_F_MQROOT)))
- running = qdisc_root_sleeping_running(sch);
- else
- running = &sch->running;
-
err = gen_new_estimator(&sch->bstats,
sch->cpu_bstats,
&sch->rate_est,
NULL,
- running,
+ true,
tca[TCA_RATE]);
if (err) {
NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
@@ -1353,7 +1350,7 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
sch->cpu_bstats,
&sch->rate_est,
NULL,
- qdisc_root_sleeping_running(sch),
+ true,
tca[TCA_RATE]);
}
out:
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 7d8518176b45..4c8e994cf0a5 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -52,7 +52,7 @@ struct atm_flow_data {
struct atm_qdisc_data *parent; /* parent qdisc */
struct socket *sock; /* for closing */
int ref; /* reference count */
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
struct list_head list;
struct atm_flow_data *excess; /* flow for excess traffic;
@@ -548,6 +548,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
INIT_LIST_HEAD(&p->flows);
INIT_LIST_HEAD(&p->link.list);
+ gnet_stats_basic_sync_init(&p->link.bstats);
list_add(&p->link.list, &p->flows);
p->link.q = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, sch->handle, extack);
@@ -652,8 +653,7 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
{
struct atm_flow_data *flow = (struct atm_flow_data *)arg;
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &flow->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &flow->bstats, true) < 0 ||
gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
return -1;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index e0da15530f0e..02d9f0dfe356 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -116,7 +116,7 @@ struct cbq_class {
long avgidle;
long deficit; /* Saved deficit for WRR */
psched_time_t penalized;
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est;
struct tc_cbq_xstats xstats;
@@ -565,8 +565,7 @@ cbq_update(struct cbq_sched_data *q)
long avgidle = cl->avgidle;
long idle;
- cl->bstats.packets++;
- cl->bstats.bytes += len;
+ _bstats_update(&cl->bstats, len, 1);
/*
* (now - last) is total time between packet right edges.
@@ -1384,8 +1383,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = cl->undertime - q->now;
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1;
@@ -1519,7 +1517,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
NULL,
- qdisc_root_sleeping_running(sch),
+ true,
tca[TCA_RATE]);
if (err) {
NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
@@ -1611,6 +1609,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (cl == NULL)
goto failure;
+ gnet_stats_basic_sync_init(&cl->bstats);
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) {
kfree(cl);
@@ -1619,9 +1618,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
- NULL,
- qdisc_root_sleeping_running(sch),
- tca[TCA_RATE]);
+ NULL, true, tca[TCA_RATE]);
if (err) {
NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
tcf_block_put(cl->block);
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 642cd179b7a7..18e4f7a0b291 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -19,7 +19,7 @@ struct drr_class {
struct Qdisc_class_common common;
unsigned int filter_cnt;
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est;
struct list_head alist;
@@ -85,8 +85,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
- NULL,
- qdisc_root_sleeping_running(sch),
+ NULL, true,
tca[TCA_RATE]);
if (err) {
NL_SET_ERR_MSG(extack, "Failed to replace estimator");
@@ -106,6 +105,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl == NULL)
return -ENOBUFS;
+ gnet_stats_basic_sync_init(&cl->bstats);
cl->common.classid = classid;
cl->quantum = quantum;
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
@@ -118,9 +118,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
- NULL,
- qdisc_root_sleeping_running(sch),
- tca[TCA_RATE]);
+ NULL, true, tca[TCA_RATE]);
if (err) {
NL_SET_ERR_MSG(extack, "Failed to replace estimator");
qdisc_put(cl->qdisc);
@@ -267,8 +265,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (qlen)
xstats.deficit = cl->deficit;
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
return -1;
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index 1f857ffd1ac2..0eae9ff5edf6 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -41,7 +41,7 @@ struct ets_class {
struct Qdisc *qdisc;
u32 quantum;
u32 deficit;
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
};
@@ -325,8 +325,7 @@ static int ets_class_dump_stats(struct Qdisc *sch, unsigned long arg,
struct ets_class *cl = ets_class_from_arg(sch, arg);
struct Qdisc *cl_q = cl->qdisc;
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl_q->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats, true) < 0 ||
qdisc_qstats_copy(d, cl_q) < 0)
return -1;
@@ -661,7 +660,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
q->nbands = nbands;
for (i = nstrict; i < q->nstrict; i++) {
- INIT_LIST_HEAD(&q->classes[i].alist);
if (q->classes[i].qdisc->q.qlen) {
list_add_tail(&q->classes[i].alist, &q->active);
q->classes[i].deficit = quanta[i];
@@ -687,7 +685,11 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
ets_offload_change(sch);
for (i = q->nbands; i < oldbands; i++) {
qdisc_put(q->classes[i].qdisc);
- memset(&q->classes[i], 0, sizeof(q->classes[i]));
+ q->classes[i].qdisc = NULL;
+ q->classes[i].quantum = 0;
+ q->classes[i].deficit = 0;
+ gnet_stats_basic_sync_init(&q->classes[i].bstats);
+ memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
}
return 0;
}
@@ -696,7 +698,7 @@ static int ets_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
struct ets_sched *q = qdisc_priv(sch);
- int err;
+ int err, i;
if (!opt)
return -EINVAL;
@@ -706,6 +708,9 @@ static int ets_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
return err;
INIT_LIST_HEAD(&q->active);
+ for (i = 0; i < TCQ_ETS_MAX_BANDS; i++)
+ INIT_LIST_HEAD(&q->classes[i].alist);
+
return ets_qdisc_change(sch, opt, extack);
}
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index a579a4131d22..e1040421b797 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -233,6 +233,9 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
return 0;
+ if (!q->ops->change)
+ return 0;
+
nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
if (nla) {
nla->nla_type = RTM_NEWQDISC;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index bb0cd6d3d2c2..839e1235db05 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -362,6 +362,8 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
[TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 },
+ [TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 },
};
static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
@@ -408,6 +410,11 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
}
+ if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
+ q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]);
+ if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
+ q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]);
+
if (tb[TCA_FQ_CODEL_INTERVAL]) {
u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
@@ -544,10 +551,15 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
q->flows_cnt))
goto nla_put_failure;
- if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
- nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
- codel_time_to_us(q->cparams.ce_threshold)))
- goto nla_put_failure;
+ if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) {
+ if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
+ codel_time_to_us(q->cparams.ce_threshold)))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask))
+ goto nla_put_failure;
+ }
return nla_nest_end(skb, opts);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8c64a552a64f..b0ff0dff2773 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -304,8 +304,8 @@ trace:
/*
* Transmit possibly several skbs, and handle the return status as
- * required. Owning running seqcount bit guarantees that
- * only one CPU can execute this function.
+ * required. Owning qdisc running bit guarantees that only one CPU
+ * can execute this function.
*
* Returns to the caller:
* false - hardware queue frozen backoff
@@ -606,7 +606,6 @@ struct Qdisc noop_qdisc = {
.ops = &noop_qdisc_ops,
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
- .running = SEQCNT_ZERO(noop_qdisc.running),
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
.gso_skb = {
.next = (struct sk_buff *)&noop_qdisc.gso_skb,
@@ -867,7 +866,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock;
-static struct lock_class_key qdisc_running_key;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
@@ -892,11 +890,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
__skb_queue_head_init(&sch->gso_skb);
__skb_queue_head_init(&sch->skb_bad_txq);
qdisc_skb_head_init(&sch->q);
+ gnet_stats_basic_sync_init(&sch->bstats);
spin_lock_init(&sch->q.lock);
if (ops->static_flags & TCQ_F_CPUSTATS) {
sch->cpu_bstats =
- netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+ netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
if (!sch->cpu_bstats)
goto errout1;
@@ -916,10 +915,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
lockdep_set_class(&sch->seqlock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
- seqcount_init(&sch->running);
- lockdep_set_class(&sch->running,
- dev->qdisc_running_key ?: &qdisc_running_key);
-
sch->ops = ops;
sch->flags = ops->static_flags;
sch->enqueue = ops->enqueue;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 621dc6afde8f..72de08ef8335 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -353,6 +353,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
{
struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_qopt_offload *hw_stats;
+ u64 bytes = 0, packets = 0;
unsigned int i;
int ret;
@@ -364,9 +365,11 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
hw_stats->handle = sch->handle;
hw_stats->parent = sch->parent;
- for (i = 0; i < MAX_DPs; i++)
+ for (i = 0; i < MAX_DPs; i++) {
+ gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
if (table->tab[i])
hw_stats->stats.xstats[i] = &table->tab[i]->stats;
+ }
ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
/* Even if driver returns failure adjust the stats - in case offload
@@ -375,19 +378,19 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
for (i = 0; i < MAX_DPs; i++) {
if (!table->tab[i])
continue;
- table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
- table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
+ table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
+ table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
- _bstats_update(&sch->bstats,
- hw_stats->stats.bstats[i].bytes,
- hw_stats->stats.bstats[i].packets);
+ bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
+ packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
sch->qstats.drops += hw_stats->stats.qstats[i].drops;
sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
}
+ _bstats_update(&sch->bstats, bytes, packets);
kfree(hw_stats);
return ret;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b7ac30cca035..d3979a6000e7 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -111,7 +111,7 @@ enum hfsc_class_flags {
struct hfsc_class {
struct Qdisc_class_common cl_common;
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est;
struct tcf_proto __rcu *filter_list; /* filter list */
@@ -965,7 +965,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
NULL,
- qdisc_root_sleeping_running(sch),
+ true,
tca[TCA_RATE]);
if (err)
return err;
@@ -1033,9 +1033,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
- NULL,
- qdisc_root_sleeping_running(sch),
- tca[TCA_RATE]);
+ NULL, true, tca[TCA_RATE]);
if (err) {
tcf_block_put(cl->block);
kfree(cl);
@@ -1328,7 +1326,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.work = cl->cl_total;
xstats.rtwork = cl->cl_cumul;
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1;
@@ -1406,6 +1404,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
if (err)
return err;
+ gnet_stats_basic_sync_init(&q->root.bstats);
q->root.cl_common.classid = sch->handle;
q->root.sched = q;
q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 5067a6e5d4fd..cf1d45db4e84 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -113,8 +113,8 @@ struct htb_class {
/*
* Written often fields
*/
- struct gnet_stats_basic_packed bstats;
- struct gnet_stats_basic_packed bstats_bias;
+ struct gnet_stats_basic_sync bstats;
+ struct gnet_stats_basic_sync bstats_bias;
struct tc_htb_xstats xstats; /* our special stats */
/* token bucket parameters */
@@ -1308,10 +1308,11 @@ nla_put_failure:
static void htb_offload_aggregate_stats(struct htb_sched *q,
struct htb_class *cl)
{
+ u64 bytes = 0, packets = 0;
struct htb_class *c;
unsigned int i;
- memset(&cl->bstats, 0, sizeof(cl->bstats));
+ gnet_stats_basic_sync_init(&cl->bstats);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
@@ -1323,14 +1324,15 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
if (p != cl)
continue;
- cl->bstats.bytes += c->bstats_bias.bytes;
- cl->bstats.packets += c->bstats_bias.packets;
+ bytes += u64_stats_read(&c->bstats_bias.bytes);
+ packets += u64_stats_read(&c->bstats_bias.packets);
if (c->level == 0) {
- cl->bstats.bytes += c->leaf.q->bstats.bytes;
- cl->bstats.packets += c->leaf.q->bstats.packets;
+ bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
+ packets += u64_stats_read(&c->leaf.q->bstats.packets);
}
}
}
+ _bstats_update(&cl->bstats, bytes, packets);
}
static int
@@ -1357,16 +1359,16 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
if (cl->leaf.q)
cl->bstats = cl->leaf.q->bstats;
else
- memset(&cl->bstats, 0, sizeof(cl->bstats));
- cl->bstats.bytes += cl->bstats_bias.bytes;
- cl->bstats.packets += cl->bstats_bias.packets;
+ gnet_stats_basic_sync_init(&cl->bstats);
+ _bstats_update(&cl->bstats,
+ u64_stats_read(&cl->bstats_bias.bytes),
+ u64_stats_read(&cl->bstats_bias.packets));
} else {
htb_offload_aggregate_stats(q, cl);
}
}
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
return -1;
@@ -1578,8 +1580,9 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
WARN_ON(old != q);
if (cl->parent) {
- cl->parent->bstats_bias.bytes += q->bstats.bytes;
- cl->parent->bstats_bias.packets += q->bstats.packets;
+ _bstats_update(&cl->parent->bstats_bias,
+ u64_stats_read(&q->bstats.bytes),
+ u64_stats_read(&q->bstats.packets));
}
offload_opt = (struct tc_htb_qopt_offload) {
@@ -1849,6 +1852,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!cl)
goto failure;
+ gnet_stats_basic_sync_init(&cl->bstats);
+ gnet_stats_basic_sync_init(&cl->bstats_bias);
+
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) {
kfree(cl);
@@ -1858,7 +1864,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est,
NULL,
- qdisc_root_sleeping_running(sch),
+ true,
tca[TCA_RATE] ? : &est.nla);
if (err)
goto err_block_put;
@@ -1922,8 +1928,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
htb_graft_helper(dev_queue, old_q);
goto err_kill_estimator;
}
- parent->bstats_bias.bytes += old_q->bstats.bytes;
- parent->bstats_bias.packets += old_q->bstats.packets;
+ _bstats_update(&parent->bstats_bias,
+ u64_stats_read(&old_q->bstats.bytes),
+ u64_stats_read(&old_q->bstats.packets));
qdisc_put(old_q);
}
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
@@ -1983,7 +1990,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
NULL,
- qdisc_root_sleeping_running(sch),
+ true,
tca[TCA_RATE]);
if (err)
return err;
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index e04f1a87642b..83d2e54bf303 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -130,10 +130,9 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
struct net_device *dev = qdisc_dev(sch);
struct Qdisc *qdisc;
unsigned int ntx;
- __u32 qlen = 0;
sch->q.qlen = 0;
- memset(&sch->bstats, 0, sizeof(sch->bstats));
+ gnet_stats_basic_sync_init(&sch->bstats);
memset(&sch->qstats, 0, sizeof(sch->qstats));
/* MQ supports lockless qdiscs. However, statistics accounting needs
@@ -145,25 +144,11 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
spin_lock_bh(qdisc_lock(qdisc));
- if (qdisc_is_percpu_stats(qdisc)) {
- qlen = qdisc_qlen_sum(qdisc);
- __gnet_stats_copy_basic(NULL, &sch->bstats,
- qdisc->cpu_bstats,
- &qdisc->bstats);
- __gnet_stats_copy_queue(&sch->qstats,
- qdisc->cpu_qstats,
- &qdisc->qstats, qlen);
- sch->q.qlen += qlen;
- } else {
- sch->q.qlen += qdisc->q.qlen;
- sch->bstats.bytes += qdisc->bstats.bytes;
- sch->bstats.packets += qdisc->bstats.packets;
- sch->qstats.qlen += qdisc->qstats.qlen;
- sch->qstats.backlog += qdisc->qstats.backlog;
- sch->qstats.drops += qdisc->qstats.drops;
- sch->qstats.requeues += qdisc->qstats.requeues;
- sch->qstats.overlimits += qdisc->qstats.overlimits;
- }
+ gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
+ &qdisc->bstats, false);
+ gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
+ &qdisc->qstats);
+ sch->q.qlen += qdisc_qlen(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
@@ -246,8 +231,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
- &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
return -1;
return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 0bc10234e306..b29f3453c6ea 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -390,7 +390,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
unsigned int ntx, tc;
sch->q.qlen = 0;
- memset(&sch->bstats, 0, sizeof(sch->bstats));
+ gnet_stats_basic_sync_init(&sch->bstats);
memset(&sch->qstats, 0, sizeof(sch->qstats));
/* MQ supports lockless qdiscs. However, statistics accounting needs
@@ -402,25 +402,11 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
spin_lock_bh(qdisc_lock(qdisc));
- if (qdisc_is_percpu_stats(qdisc)) {
- __u32 qlen = qdisc_qlen_sum(qdisc);
-
- __gnet_stats_copy_basic(NULL, &sch->bstats,
- qdisc->cpu_bstats,
- &qdisc->bstats);
- __gnet_stats_copy_queue(&sch->qstats,
- qdisc->cpu_qstats,
- &qdisc->qstats, qlen);
- sch->q.qlen += qlen;
- } else {
- sch->q.qlen += qdisc->q.qlen;
- sch->bstats.bytes += qdisc->bstats.bytes;
- sch->bstats.packets += qdisc->bstats.packets;
- sch->qstats.backlog += qdisc->qstats.backlog;
- sch->qstats.drops += qdisc->qstats.drops;
- sch->qstats.requeues += qdisc->qstats.requeues;
- sch->qstats.overlimits += qdisc->qstats.overlimits;
- }
+ gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
+ &qdisc->bstats, false);
+ gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
+ &qdisc->qstats);
+ sch->q.qlen += qdisc_qlen(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
@@ -512,12 +498,13 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
{
if (cl >= TC_H_MIN_PRIORITY) {
int i;
- __u32 qlen = 0;
+ __u32 qlen;
struct gnet_stats_queue qstats = {0};
- struct gnet_stats_basic_packed bstats = {0};
+ struct gnet_stats_basic_sync bstats;
struct net_device *dev = qdisc_dev(sch);
struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
+ gnet_stats_basic_sync_init(&bstats);
/* Drop lock here it will be reclaimed before touching
* statistics this is required because the d->lock we
* hold here is the look on dev_queue->qdisc_sleeping
@@ -529,37 +516,31 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
for (i = tc.offset; i < tc.offset + tc.count; i++) {
struct netdev_queue *q = netdev_get_tx_queue(dev, i);
struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
- struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
- struct gnet_stats_queue __percpu *cpu_qstats = NULL;
spin_lock_bh(qdisc_lock(qdisc));
- if (qdisc_is_percpu_stats(qdisc)) {
- cpu_bstats = qdisc->cpu_bstats;
- cpu_qstats = qdisc->cpu_qstats;
- }
- qlen = qdisc_qlen_sum(qdisc);
- __gnet_stats_copy_basic(NULL, &sch->bstats,
- cpu_bstats, &qdisc->bstats);
- __gnet_stats_copy_queue(&sch->qstats,
- cpu_qstats,
- &qdisc->qstats,
- qlen);
+ gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
+ &qdisc->bstats, false);
+ gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
+ &qdisc->qstats);
+ sch->q.qlen += qdisc_qlen(qdisc);
+
spin_unlock_bh(qdisc_lock(qdisc));
}
+ qlen = qdisc_qlen(sch) + qstats.qlen;
/* Reclaim root sleeping lock before completing stats */
if (d->lock)
spin_lock_bh(d->lock);
- if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
return -1;
} else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
- sch->cpu_bstats, &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, sch->cpu_bstats,
+ &sch->bstats, true) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
return -1;
}
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index e282e7382117..cd8ab90c4765 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -338,8 +338,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, &cl_q->bstats, true) < 0 ||
qdisc_qstats_copy(d, cl_q) < 0)
return -1;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 0c345e43a09a..ecbb10db1111 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -785,7 +785,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
if (!n || n > NETEM_DIST_MAX)
return -EINVAL;
- d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
+ d = kvmalloc(struct_size(d, table, n), GFP_KERNEL);
if (!d)
return -ENOMEM;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 03fdf31ccb6a..3b8d7197c06b 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -361,8 +361,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, cl_q->cpu_bstats,
+ &cl_q->bstats, true) < 0 ||
qdisc_qstats_copy(d, cl_q) < 0)
return -1;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 58a9d42b52b8..0b7f9ba28deb 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -131,7 +131,7 @@ struct qfq_class {
unsigned int filter_cnt;
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est;
struct Qdisc *qdisc;
@@ -451,7 +451,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
NULL,
- qdisc_root_sleeping_running(sch),
+ true,
tca[TCA_RATE]);
if (err)
return err;
@@ -465,6 +465,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl == NULL)
return -ENOBUFS;
+ gnet_stats_basic_sync_init(&cl->bstats);
cl->common.classid = classid;
cl->deficit = lmax;
@@ -477,7 +478,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est,
NULL,
- qdisc_root_sleeping_running(sch),
+ true,
tca[TCA_RATE]);
if (err)
goto destroy_class;
@@ -639,8 +640,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.weight = cl->agg->class_weight;
xstats.lmax = cl->agg->lmax;
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
qdisc_qstats_copy(d, cl->qdisc) < 0)
return -1;
@@ -1234,8 +1234,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- cl->bstats.bytes += len;
- cl->bstats.packets += gso_segs;
+ _bstats_update(&cl->bstats, len, gso_segs);
sch->qstats.backlog += len;
++sch->q.qlen;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 1ab2fc933a21..9ab068fa2672 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1641,6 +1641,10 @@ static void taprio_destroy(struct Qdisc *sch)
list_del(&q->taprio_list);
spin_unlock(&taprio_list_lock);
+ /* Note that taprio_reset() might not be called if an error
+ * happens in qdisc_create(), after taprio_init() has been called.
+ */
+ hrtimer_cancel(&q->advance_timer);
taprio_disable_offload(dev, q, NULL);
@@ -1973,7 +1977,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
return -1;
return 0;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 78e79029dc63..72102277449e 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -184,6 +184,20 @@ static int tbf_offload_dump(struct Qdisc *sch)
return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_TBF, &qopt);
}
+static void tbf_offload_graft(struct Qdisc *sch, struct Qdisc *new,
+ struct Qdisc *old, struct netlink_ext_ack *extack)
+{
+ struct tc_tbf_qopt_offload graft_offload = {
+ .handle = sch->handle,
+ .parent = sch->parent,
+ .child_handle = new->handle,
+ .command = TC_TBF_GRAFT,
+ };
+
+ qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
+ TC_SETUP_QDISC_TBF, &graft_offload, extack);
+}
+
/* GSO packet is too big, segment it so that tbf can transmit
* each segment in time
*/
@@ -547,6 +561,8 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new = &noop_qdisc;
*old = qdisc_replace(sch, new, &q->qdisc);
+
+ tbf_offload_graft(sch, new, *old, extack);
return 0;
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5ef86fdb1176..1f1786021d9c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -702,7 +702,7 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
/* Break out if chunk length is less then minimal. */
- if (ntohs(ch->length) < sizeof(_ch))
+ if (!ch || ntohs(ch->length) < sizeof(_ch))
break;
ch_end = offset + SCTP_PAD4(ntohs(ch->length));
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b8fa8f1a7277..c7503fd64915 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3697,7 +3697,7 @@ struct sctp_chunk *sctp_make_strreset_req(
outlen = (sizeof(outreq) + stream_len) * out;
inlen = (sizeof(inreq) + stream_len) * in;
- retval = sctp_make_reconf(asoc, outlen + inlen);
+ retval = sctp_make_reconf(asoc, SCTP_PAD4(outlen) + SCTP_PAD4(inlen));
if (!retval)
return NULL;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index f69ef3f2019f..5e50e007a7da 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -439,6 +439,47 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
return 0;
}
+static bool smc_isascii(char *hostname)
+{
+ int i;
+
+ for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
+ if (!isascii(hostname[i]))
+ return false;
+ return true;
+}
+
+static void smc_conn_save_peer_info_fce(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm *clc)
+{
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
+ (struct smc_clc_msg_accept_confirm_v2 *)clc;
+ struct smc_clc_first_contact_ext *fce;
+ int clc_v2_len;
+
+ if (clc->hdr.version == SMC_V1 ||
+ !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK))
+ return;
+
+ if (smc->conn.lgr->is_smcd) {
+ memcpy(smc->conn.lgr->negotiated_eid, clc_v2->d1.eid,
+ SMC_MAX_EID_LEN);
+ clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2,
+ d1);
+ } else {
+ memcpy(smc->conn.lgr->negotiated_eid, clc_v2->r1.eid,
+ SMC_MAX_EID_LEN);
+ clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2,
+ r1);
+ }
+ fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc_v2) + clc_v2_len);
+ smc->conn.lgr->peer_os = fce->os_type;
+ smc->conn.lgr->peer_smc_release = fce->release;
+ if (smc_isascii(fce->hostname))
+ memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
+ SMC_MAX_HOSTNAME_LEN);
+}
+
static void smcr_conn_save_peer_info(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm *clc)
{
@@ -451,16 +492,6 @@ static void smcr_conn_save_peer_info(struct smc_sock *smc,
smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
}
-static bool smc_isascii(char *hostname)
-{
- int i;
-
- for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
- if (!isascii(hostname[i]))
- return false;
- return true;
-}
-
static void smcd_conn_save_peer_info(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm *clc)
{
@@ -472,22 +503,6 @@ static void smcd_conn_save_peer_info(struct smc_sock *smc,
smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
- if (clc->hdr.version > SMC_V1 &&
- (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) {
- struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
- (struct smc_clc_msg_accept_confirm_v2 *)clc;
- struct smc_clc_first_contact_ext *fce =
- (struct smc_clc_first_contact_ext *)
- (((u8 *)clc_v2) + sizeof(*clc_v2));
-
- memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid,
- SMC_MAX_EID_LEN);
- smc->conn.lgr->peer_os = fce->os_type;
- smc->conn.lgr->peer_smc_release = fce->release;
- if (smc_isascii(fce->hostname))
- memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
- SMC_MAX_HOSTNAME_LEN);
- }
}
static void smc_conn_save_peer_info(struct smc_sock *smc,
@@ -497,14 +512,16 @@ static void smc_conn_save_peer_info(struct smc_sock *smc,
smcd_conn_save_peer_info(smc, clc);
else
smcr_conn_save_peer_info(smc, clc);
+ smc_conn_save_peer_info_fce(smc, clc);
}
static void smc_link_save_peer_info(struct smc_link *link,
- struct smc_clc_msg_accept_confirm *clc)
+ struct smc_clc_msg_accept_confirm *clc,
+ struct smc_init_info *ini)
{
link->peer_qpn = ntoh24(clc->r0.qpn);
- memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE);
- memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac));
+ memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE);
+ memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac));
link->peer_psn = ntoh24(clc->r0.psn);
link->peer_mtu = clc->r0.qp_mtu;
}
@@ -608,7 +625,9 @@ static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
* used for the internal TCP socket
*/
smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
- if (!ini->ib_dev)
+ if (!ini->check_smcrv2 && !ini->ib_dev)
+ return SMC_CLC_DECL_NOSMCRDEV;
+ if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2)
return SMC_CLC_DECL_NOSMCRDEV;
return 0;
}
@@ -692,27 +711,42 @@ static int smc_find_proposal_devices(struct smc_sock *smc,
int rc = 0;
/* check if there is an ism device available */
- if (ini->smcd_version & SMC_V1) {
- if (smc_find_ism_device(smc, ini) ||
- smc_connect_ism_vlan_setup(smc, ini)) {
- if (ini->smc_type_v1 == SMC_TYPE_B)
- ini->smc_type_v1 = SMC_TYPE_R;
- else
- ini->smc_type_v1 = SMC_TYPE_N;
- } /* else ISM V1 is supported for this connection */
- if (smc_find_rdma_device(smc, ini)) {
- if (ini->smc_type_v1 == SMC_TYPE_B)
- ini->smc_type_v1 = SMC_TYPE_D;
- else
- ini->smc_type_v1 = SMC_TYPE_N;
- } /* else RDMA is supported for this connection */
- }
- if (smc_ism_is_v2_capable() && smc_find_ism_v2_device_clnt(smc, ini))
- ini->smc_type_v2 = SMC_TYPE_N;
+ if (!(ini->smcd_version & SMC_V1) ||
+ smc_find_ism_device(smc, ini) ||
+ smc_connect_ism_vlan_setup(smc, ini))
+ ini->smcd_version &= ~SMC_V1;
+ /* else ISM V1 is supported for this connection */
+
+ /* check if there is an rdma device available */
+ if (!(ini->smcr_version & SMC_V1) ||
+ smc_find_rdma_device(smc, ini))
+ ini->smcr_version &= ~SMC_V1;
+ /* else RDMA is supported for this connection */
+
+ ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1,
+ ini->smcr_version & SMC_V1);
+
+ /* check if there is an ism v2 device available */
+ if (!(ini->smcd_version & SMC_V2) ||
+ !smc_ism_is_v2_capable() ||
+ smc_find_ism_v2_device_clnt(smc, ini))
+ ini->smcd_version &= ~SMC_V2;
+
+ /* check if there is an rdma v2 device available */
+ ini->check_smcrv2 = true;
+ ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
+ if (!(ini->smcr_version & SMC_V2) ||
+ smc->clcsock->sk->sk_family != AF_INET ||
+ !smc_clc_ueid_count() ||
+ smc_find_rdma_device(smc, ini))
+ ini->smcr_version &= ~SMC_V2;
+ ini->check_smcrv2 = false;
+
+ ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2,
+ ini->smcr_version & SMC_V2);
/* if neither ISM nor RDMA are supported, fallback */
- if (!smcr_indicated(ini->smc_type_v1) &&
- ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
+ if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
rc = SMC_CLC_DECL_NOSMCDEV;
return rc;
@@ -752,6 +786,64 @@ static int smc_connect_clc(struct smc_sock *smc,
SMC_CLC_ACCEPT, CLC_WAIT_TIME);
}
+void smc_fill_gid_list(struct smc_link_group *lgr,
+ struct smc_gidlist *gidlist,
+ struct smc_ib_device *known_dev, u8 *known_gid)
+{
+ struct smc_init_info *alt_ini = NULL;
+
+ memset(gidlist, 0, sizeof(*gidlist));
+ memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE);
+
+ alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL);
+ if (!alt_ini)
+ goto out;
+
+ alt_ini->vlan_id = lgr->vlan_id;
+ alt_ini->check_smcrv2 = true;
+ alt_ini->smcrv2.saddr = lgr->saddr;
+ smc_pnet_find_alt_roce(lgr, alt_ini, known_dev);
+
+ if (!alt_ini->smcrv2.ib_dev_v2)
+ goto out;
+
+ memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2,
+ SMC_GID_SIZE);
+
+out:
+ kfree(alt_ini);
+}
+
+static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm *aclc,
+ struct smc_init_info *ini)
+{
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
+ (struct smc_clc_msg_accept_confirm_v2 *)aclc;
+ struct smc_clc_first_contact_ext *fce =
+ (struct smc_clc_first_contact_ext *)
+ (((u8 *)clc_v2) + sizeof(*clc_v2));
+
+ if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
+ return 0;
+
+ if (fce->v2_direct) {
+ memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
+ ini->smcrv2.uses_gateway = false;
+ } else {
+ if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr,
+ smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
+ ini->smcrv2.nexthop_mac,
+ &ini->smcrv2.uses_gateway))
+ return SMC_CLC_DECL_NOROUTE;
+ if (!ini->smcrv2.uses_gateway) {
+ /* mismatch: peer claims indirect, but its direct */
+ return SMC_CLC_DECL_NOINDIRECT;
+ }
+ }
+ return 0;
+}
+
/* setup for RDMA connection of client */
static int smc_connect_rdma(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm *aclc,
@@ -759,11 +851,18 @@ static int smc_connect_rdma(struct smc_sock *smc,
{
int i, reason_code = 0;
struct smc_link *link;
+ u8 *eid = NULL;
ini->is_smcd = false;
- ini->ib_lcl = &aclc->r0.lcl;
ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
+ memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN);
+ memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE);
+ memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN);
+
+ reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini);
+ if (reason_code)
+ return reason_code;
mutex_lock(&smc_client_lgr_pending);
reason_code = smc_conn_create(smc, ini);
@@ -785,8 +884,9 @@ static int smc_connect_rdma(struct smc_sock *smc,
if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
!memcmp(l->peer_gid, &aclc->r0.lcl.gid,
SMC_GID_SIZE) &&
- !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
- sizeof(l->peer_mac))) {
+ (aclc->hdr.version > SMC_V1 ||
+ !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
+ sizeof(l->peer_mac)))) {
link = l;
break;
}
@@ -805,7 +905,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
}
if (ini->first_contact_local)
- smc_link_save_peer_info(link, aclc);
+ smc_link_save_peer_info(link, aclc, ini);
if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
reason_code = SMC_CLC_DECL_ERR_RTOK;
@@ -828,8 +928,18 @@ static int smc_connect_rdma(struct smc_sock *smc,
}
smc_rmb_sync_sg_for_device(&smc->conn);
+ if (aclc->hdr.version > SMC_V1) {
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
+ (struct smc_clc_msg_accept_confirm_v2 *)aclc;
+
+ eid = clc_v2->r1.eid;
+ if (ini->first_contact_local)
+ smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist,
+ link->smcibdev, link->gid);
+ }
+
reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
- SMC_V1, NULL);
+ aclc->hdr.version, eid, ini);
if (reason_code)
goto connect_abort;
@@ -869,7 +979,7 @@ smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc,
int i;
for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
- if (ini->ism_chid[i] == ntohs(aclc->chid)) {
+ if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
ini->ism_selected = i;
return 0;
}
@@ -923,11 +1033,11 @@ static int smc_connect_ism(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
(struct smc_clc_msg_accept_confirm_v2 *)aclc;
- eid = clc_v2->eid;
+ eid = clc_v2->d1.eid;
}
rc = smc_clc_send_confirm(smc, ini->first_contact_local,
- aclc->hdr.version, eid);
+ aclc->hdr.version, eid, NULL);
if (rc)
goto connect_abort;
mutex_unlock(&smc_server_lgr_pending);
@@ -950,17 +1060,24 @@ connect_abort:
static int smc_connect_check_aclc(struct smc_init_info *ini,
struct smc_clc_msg_accept_confirm *aclc)
{
- if ((aclc->hdr.typev1 == SMC_TYPE_R &&
- !smcr_indicated(ini->smc_type_v1)) ||
- (aclc->hdr.typev1 == SMC_TYPE_D &&
- ((!smcd_indicated(ini->smc_type_v1) &&
- !smcd_indicated(ini->smc_type_v2)) ||
- (aclc->hdr.version == SMC_V1 &&
- !smcd_indicated(ini->smc_type_v1)) ||
- (aclc->hdr.version == SMC_V2 &&
- !smcd_indicated(ini->smc_type_v2)))))
+ if (aclc->hdr.typev1 != SMC_TYPE_R &&
+ aclc->hdr.typev1 != SMC_TYPE_D)
return SMC_CLC_DECL_MODEUNSUPP;
+ if (aclc->hdr.version >= SMC_V2) {
+ if ((aclc->hdr.typev1 == SMC_TYPE_R &&
+ !smcr_indicated(ini->smc_type_v2)) ||
+ (aclc->hdr.typev1 == SMC_TYPE_D &&
+ !smcd_indicated(ini->smc_type_v2)))
+ return SMC_CLC_DECL_MODEUNSUPP;
+ } else {
+ if ((aclc->hdr.typev1 == SMC_TYPE_R &&
+ !smcr_indicated(ini->smc_type_v1)) ||
+ (aclc->hdr.typev1 == SMC_TYPE_D &&
+ !smcd_indicated(ini->smc_type_v1)))
+ return SMC_CLC_DECL_MODEUNSUPP;
+ }
+
return 0;
}
@@ -991,14 +1108,15 @@ static int __smc_connect(struct smc_sock *smc)
return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
version);
- ini->smcd_version = SMC_V1;
- ini->smcd_version |= smc_ism_is_v2_capable() ? SMC_V2 : 0;
+ ini->smcd_version = SMC_V1 | SMC_V2;
+ ini->smcr_version = SMC_V1 | SMC_V2;
ini->smc_type_v1 = SMC_TYPE_B;
- ini->smc_type_v2 = smc_ism_is_v2_capable() ? SMC_TYPE_D : SMC_TYPE_N;
+ ini->smc_type_v2 = SMC_TYPE_B;
/* get vlan id from IP device */
if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
ini->smcd_version &= ~SMC_V1;
+ ini->smcr_version = 0;
ini->smc_type_v1 = SMC_TYPE_N;
if (!ini->smcd_version) {
rc = SMC_CLC_DECL_GETVLANERR;
@@ -1026,15 +1144,17 @@ static int __smc_connect(struct smc_sock *smc)
/* check if smc modes and versions of CLC proposal and accept match */
rc = smc_connect_check_aclc(ini, aclc);
version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
- ini->smcd_version = version;
if (rc)
goto vlan_cleanup;
/* depending on previous steps, connect using rdma or ism */
- if (aclc->hdr.typev1 == SMC_TYPE_R)
+ if (aclc->hdr.typev1 == SMC_TYPE_R) {
+ ini->smcr_version = version;
rc = smc_connect_rdma(smc, aclc, ini);
- else if (aclc->hdr.typev1 == SMC_TYPE_D)
+ } else if (aclc->hdr.typev1 == SMC_TYPE_D) {
+ ini->smcd_version = version;
rc = smc_connect_ism(smc, aclc, ini);
+ }
if (rc)
goto vlan_cleanup;
@@ -1315,7 +1435,7 @@ static int smcr_serv_conf_first_link(struct smc_sock *smc)
smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
/* initial contact - try to establish second link */
- smc_llc_srv_add_link(link);
+ smc_llc_srv_add_link(link, NULL);
return 0;
}
@@ -1395,33 +1515,48 @@ static int smc_listen_v2_check(struct smc_sock *new_smc,
ini->smc_type_v1 = pclc->hdr.typev1;
ini->smc_type_v2 = pclc->hdr.typev2;
- ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0;
- if (pclc->hdr.version > SMC_V1)
- ini->smcd_version |=
- ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0;
- if (!(ini->smcd_version & SMC_V2)) {
+ ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
+ ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
+ if (pclc->hdr.version > SMC_V1) {
+ if (smcd_indicated(ini->smc_type_v2))
+ ini->smcd_version |= SMC_V2;
+ if (smcr_indicated(ini->smc_type_v2))
+ ini->smcr_version |= SMC_V2;
+ }
+ if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) {
rc = SMC_CLC_DECL_PEERNOSMC;
goto out;
}
- if (!smc_ism_is_v2_capable()) {
- ini->smcd_version &= ~SMC_V2;
- rc = SMC_CLC_DECL_NOISM2SUPP;
- goto out;
- }
pclc_v2_ext = smc_get_clc_v2_ext(pclc);
if (!pclc_v2_ext) {
ini->smcd_version &= ~SMC_V2;
+ ini->smcr_version &= ~SMC_V2;
rc = SMC_CLC_DECL_NOV2EXT;
goto out;
}
pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
- if (!pclc_smcd_v2_ext) {
- ini->smcd_version &= ~SMC_V2;
- rc = SMC_CLC_DECL_NOV2DEXT;
+ if (ini->smcd_version & SMC_V2) {
+ if (!smc_ism_is_v2_capable()) {
+ ini->smcd_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOISM2SUPP;
+ } else if (!pclc_smcd_v2_ext) {
+ ini->smcd_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOV2DEXT;
+ } else if (!pclc_v2_ext->hdr.eid_cnt &&
+ !pclc_v2_ext->hdr.flag.seid) {
+ ini->smcd_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOUEID;
+ }
+ }
+ if (ini->smcr_version & SMC_V2) {
+ if (!pclc_v2_ext->hdr.eid_cnt) {
+ ini->smcr_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOUEID;
+ }
}
out:
- if (!ini->smcd_version)
+ if (!ini->smcd_version && !ini->smcr_version)
return rc;
return 0;
@@ -1541,10 +1676,6 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
pclc_smcd = smc_get_clc_msg_smcd(pclc);
smc_v2_ext = smc_get_clc_v2_ext(pclc);
smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
- if (!smcd_v2_ext) {
- smc_find_ism_store_rc(SMC_CLC_DECL_NOV2DEXT, ini);
- goto not_found;
- }
mutex_lock(&smcd_dev_list.mutex);
if (pclc_smcd->ism.chid)
@@ -1562,8 +1693,10 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
}
mutex_unlock(&smcd_dev_list.mutex);
- if (!ini->ism_dev[0])
+ if (!ini->ism_dev[0]) {
+ smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini);
goto not_found;
+ }
smc_ism_get_system_eid(&eid);
if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext,
@@ -1616,6 +1749,7 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
not_found:
smc_find_ism_store_rc(rc, ini);
+ ini->smcd_version &= ~SMC_V1;
ini->ism_dev[0] = NULL;
ini->is_smcd = false;
}
@@ -1634,24 +1768,69 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
return 0;
}
+static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
+ struct smc_clc_msg_proposal *pclc,
+ struct smc_init_info *ini)
+{
+ struct smc_clc_v2_extension *smc_v2_ext;
+ u8 smcr_version;
+ int rc;
+
+ if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2))
+ goto not_found;
+
+ smc_v2_ext = smc_get_clc_v2_ext(pclc);
+ if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
+ goto not_found;
+
+ /* prepare RDMA check */
+ memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
+ memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE);
+ memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
+ ini->check_smcrv2 = true;
+ ini->smcrv2.clc_sk = new_smc->clcsock->sk;
+ ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr;
+ ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce);
+ rc = smc_find_rdma_device(new_smc, ini);
+ if (rc) {
+ smc_find_ism_store_rc(rc, ini);
+ goto not_found;
+ }
+ if (!ini->smcrv2.uses_gateway)
+ memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN);
+
+ smcr_version = ini->smcr_version;
+ ini->smcr_version = SMC_V2;
+ rc = smc_listen_rdma_init(new_smc, ini);
+ if (!rc)
+ rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
+ if (!rc)
+ return;
+ ini->smcr_version = smcr_version;
+ smc_find_ism_store_rc(rc, ini);
+
+not_found:
+ ini->smcr_version &= ~SMC_V2;
+ ini->check_smcrv2 = false;
+}
+
static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
struct smc_clc_msg_proposal *pclc,
struct smc_init_info *ini)
{
int rc;
- if (!smcr_indicated(ini->smc_type_v1))
+ if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1))
return SMC_CLC_DECL_NOSMCDEV;
/* prepare RDMA check */
- ini->ib_lcl = &pclc->lcl;
+ memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
+ memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE);
+ memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
rc = smc_find_rdma_device(new_smc, ini);
if (rc) {
/* no RDMA device found */
- if (ini->smc_type_v1 == SMC_TYPE_B)
- /* neither ISM nor RDMA device found */
- rc = SMC_CLC_DECL_NOSMCDEV;
- return rc;
+ return SMC_CLC_DECL_NOSMCDEV;
}
rc = smc_listen_rdma_init(new_smc, ini);
if (rc)
@@ -1664,51 +1843,60 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
struct smc_clc_msg_proposal *pclc,
struct smc_init_info *ini)
{
- int rc;
+ int prfx_rc;
/* check for ISM device matching V2 proposed device */
smc_find_ism_v2_device_serv(new_smc, pclc, ini);
if (ini->ism_dev[0])
return 0;
- if (!(ini->smcd_version & SMC_V1))
- return ini->rc ?: SMC_CLC_DECL_NOSMCD2DEV;
-
- /* check for matching IP prefix and subnet length */
- rc = smc_listen_prfx_check(new_smc, pclc);
- if (rc)
- return ini->rc ?: rc;
+ /* check for matching IP prefix and subnet length (V1) */
+ prfx_rc = smc_listen_prfx_check(new_smc, pclc);
+ if (prfx_rc)
+ smc_find_ism_store_rc(prfx_rc, ini);
/* get vlan id from IP device */
if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
/* check for ISM device matching V1 proposed device */
- smc_find_ism_v1_device_serv(new_smc, pclc, ini);
+ if (!prfx_rc)
+ smc_find_ism_v1_device_serv(new_smc, pclc, ini);
if (ini->ism_dev[0])
return 0;
- if (pclc->hdr.typev1 == SMC_TYPE_D)
+ if (!smcr_indicated(pclc->hdr.typev1) &&
+ !smcr_indicated(pclc->hdr.typev2))
/* skip RDMA and decline */
return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
- /* check if RDMA is available */
- rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
- smc_find_ism_store_rc(rc, ini);
+ /* check if RDMA V2 is available */
+ smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
+ if (ini->smcrv2.ib_dev_v2)
+ return 0;
- return (!rc) ? 0 : ini->rc;
+ /* check if RDMA V1 is available */
+ if (!prfx_rc) {
+ int rc;
+
+ rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
+ smc_find_ism_store_rc(rc, ini);
+ return (!rc) ? 0 : ini->rc;
+ }
+ return SMC_CLC_DECL_NOSMCDEV;
}
/* listen worker: finish RDMA setup */
static int smc_listen_rdma_finish(struct smc_sock *new_smc,
struct smc_clc_msg_accept_confirm *cclc,
- bool local_first)
+ bool local_first,
+ struct smc_init_info *ini)
{
struct smc_link *link = new_smc->conn.lnk;
int reason_code = 0;
if (local_first)
- smc_link_save_peer_info(link, cclc);
+ smc_link_save_peer_info(link, cclc, ini);
if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
return SMC_CLC_DECL_ERR_RTOK;
@@ -1729,12 +1917,13 @@ static void smc_listen_work(struct work_struct *work)
{
struct smc_sock *new_smc = container_of(work, struct smc_sock,
smc_listen_work);
- u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
struct socket *newclcsock = new_smc->clcsock;
struct smc_clc_msg_accept_confirm *cclc;
struct smc_clc_msg_proposal_area *buf;
struct smc_clc_msg_proposal *pclc;
struct smc_init_info *ini = NULL;
+ u8 proposal_version = SMC_V1;
+ u8 accept_version;
int rc = 0;
if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
@@ -1765,7 +1954,9 @@ static void smc_listen_work(struct work_struct *work)
SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
if (rc)
goto out_decl;
- version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version;
+
+ if (pclc->hdr.version > SMC_V1)
+ proposal_version = SMC_V2;
/* IPSec connections opt out of SMC optimizations */
if (using_ipsec(new_smc)) {
@@ -1795,9 +1986,9 @@ static void smc_listen_work(struct work_struct *work)
goto out_unlock;
/* send SMC Accept CLC message */
+ accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version;
rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
- ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1,
- ini->negotiated_eid);
+ accept_version, ini->negotiated_eid);
if (rc)
goto out_unlock;
@@ -1819,7 +2010,7 @@ static void smc_listen_work(struct work_struct *work)
/* finish worker */
if (!ini->is_smcd) {
rc = smc_listen_rdma_finish(new_smc, cclc,
- ini->first_contact_local);
+ ini->first_contact_local, ini);
if (rc)
goto out_unlock;
mutex_unlock(&smc_server_lgr_pending);
@@ -1833,7 +2024,7 @@ out_unlock:
mutex_unlock(&smc_server_lgr_pending);
out_decl:
smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
- version);
+ proposal_version);
out_free:
kfree(ini);
kfree(buf);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5e7def3ab730..f4286ca1f228 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -56,7 +56,20 @@ enum smc_state { /* possible states of an SMC socket */
struct smc_link_group;
struct smc_wr_rx_hdr { /* common prefix part of LLC and CDC to demultiplex */
- u8 type;
+ union {
+ u8 type;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ struct {
+ u8 llc_version:4,
+ llc_type:4;
+ };
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ struct {
+ u8 llc_type:4,
+ llc_version:4;
+ };
+#endif
+ };
} __aligned(1);
struct smc_cdc_conn_state_flags {
@@ -286,7 +299,12 @@ static inline bool using_ipsec(struct smc_sock *smc)
}
#endif
+struct smc_gidlist;
+
struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock);
void smc_close_non_accepted(struct sock *sk);
+void smc_fill_gid_list(struct smc_link_group *lgr,
+ struct smc_gidlist *gidlist,
+ struct smc_ib_device *known_dev, u8 *known_gid);
#endif /* __SMC_H */
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index f23f558054a7..99acd337ba90 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -150,9 +150,11 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
again:
link = conn->lnk;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend);
if (rc)
- return rc;
+ goto put_out;
spin_lock_bh(&conn->send_lock);
if (link != conn->lnk) {
@@ -160,6 +162,7 @@ again:
spin_unlock_bh(&conn->send_lock);
smc_wr_tx_put_slot(link,
(struct smc_wr_tx_pend_priv *)pend);
+ smc_wr_tx_link_put(link);
if (again)
return -ENOLINK;
again = true;
@@ -167,6 +170,8 @@ again:
}
rc = smc_cdc_msg_send(conn, wr_buf, pend);
spin_unlock_bh(&conn->send_lock);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 4afd9e71e5c2..8409ab71a5e4 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -31,6 +31,7 @@
#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
#define SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 78
+#define SMCR_CLC_ACCEPT_CONFIRM_LEN_V2 108
#define SMC_CLC_RECV_BUF_LEN 100
/* eye catcher "SMCR" EBCDIC for CLC messages */
@@ -114,6 +115,17 @@ err_out:
return rc;
}
+int smc_clc_ueid_count(void)
+{
+ int count;
+
+ read_lock(&smc_clc_eid_table.lock);
+ count = smc_clc_eid_table.ueid_cnt;
+ read_unlock(&smc_clc_eid_table.lock);
+
+ return count;
+}
+
int smc_nl_add_ueid(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *nla_ueid = info->attrs[SMC_NLA_EID_TABLE_ENTRY];
@@ -298,7 +310,8 @@ bool smc_clc_match_eid(u8 *negotiated_eid,
negotiated_eid[0] = 0;
read_lock(&smc_clc_eid_table.lock);
- if (smc_clc_eid_table.seid_enabled &&
+ if (peer_eid && local_eid &&
+ smc_clc_eid_table.seid_enabled &&
smc_v2_ext->hdr.flag.seid &&
!memcmp(peer_eid, local_eid, SMC_MAX_EID_LEN)) {
memcpy(negotiated_eid, peer_eid, SMC_MAX_EID_LEN);
@@ -380,6 +393,27 @@ smc_clc_msg_acc_conf_valid(struct smc_clc_msg_accept_confirm_v2 *clc_v2)
(ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 +
sizeof(struct smc_clc_first_contact_ext)))
return false;
+ if (hdr->typev1 == SMC_TYPE_R &&
+ ntohs(hdr->length) < SMCR_CLC_ACCEPT_CONFIRM_LEN_V2)
+ return false;
+ }
+ return true;
+}
+
+/* check arriving CLC decline */
+static bool
+smc_clc_msg_decl_valid(struct smc_clc_msg_decline *dclc)
+{
+ struct smc_clc_msg_hdr *hdr = &dclc->hdr;
+
+ if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D)
+ return false;
+ if (hdr->version == SMC_V1) {
+ if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline))
+ return false;
+ } else {
+ if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline_v2))
+ return false;
}
return true;
}
@@ -425,9 +459,9 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
break;
case SMC_CLC_DECLINE:
dclc = (struct smc_clc_msg_decline *)clcm;
- if (ntohs(dclc->hdr.length) != sizeof(*dclc))
+ if (!smc_clc_msg_decl_valid(dclc))
return false;
- trl = &dclc->trl;
+ check_trl = false;
break;
default:
return false;
@@ -510,7 +544,8 @@ static int smc_clc_prfx_set(struct socket *clcsock,
goto out_rel;
}
/* get address to which the internal TCP socket is bound */
- kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
+ if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
+ goto out_rel;
/* analyze IP specific data of net_device belonging to TCP socket */
addr6 = (struct sockaddr_in6 *)&addrs;
rcu_read_lock();
@@ -725,15 +760,16 @@ out:
/* send CLC DECLINE message across internal TCP socket */
int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version)
{
- struct smc_clc_msg_decline dclc;
+ struct smc_clc_msg_decline *dclc_v1;
+ struct smc_clc_msg_decline_v2 dclc;
struct msghdr msg;
+ int len, send_len;
struct kvec vec;
- int len;
+ dclc_v1 = (struct smc_clc_msg_decline *)&dclc;
memset(&dclc, 0, sizeof(dclc));
memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
dclc.hdr.type = SMC_CLC_DECLINE;
- dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
dclc.hdr.version = version;
dclc.os_type = version == SMC_V1 ? 0 : SMC_CLC_OS_LINUX;
dclc.hdr.typev2 = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ?
@@ -743,14 +779,22 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version)
memcpy(dclc.id_for_peer, local_systemid,
sizeof(local_systemid));
dclc.peer_diagnosis = htonl(peer_diag_info);
- memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+ if (version == SMC_V1) {
+ memcpy(dclc_v1->trl.eyecatcher, SMC_EYECATCHER,
+ sizeof(SMC_EYECATCHER));
+ send_len = sizeof(*dclc_v1);
+ } else {
+ memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER,
+ sizeof(SMC_EYECATCHER));
+ send_len = sizeof(dclc);
+ }
+ dclc.hdr.length = htons(send_len);
memset(&msg, 0, sizeof(msg));
vec.iov_base = &dclc;
- vec.iov_len = sizeof(struct smc_clc_msg_decline);
- len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
- sizeof(struct smc_clc_msg_decline));
- if (len < 0 || len < sizeof(struct smc_clc_msg_decline))
+ vec.iov_len = send_len;
+ len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, send_len);
+ if (len < 0 || len < send_len)
len = -EPROTO;
return len > 0 ? 0 : len;
}
@@ -832,8 +876,8 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
} else {
struct smc_clc_eid_entry *ueident;
u16 v2_ext_offset;
- u8 *eid = NULL;
+ v2_ext->hdr.flag.release = SMC_RELEASE;
v2_ext_offset = sizeof(*pclc_smcd) -
offsetofend(struct smc_clc_msg_smcd, v2_ext_offset);
if (ini->smc_type_v1 != SMC_TYPE_N)
@@ -841,6 +885,7 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
pclc_prfx->ipv6_prefixes_cnt *
sizeof(ipv6_prfx[0]);
pclc_smcd->v2_ext_offset = htons(v2_ext_offset);
+ plen += sizeof(*v2_ext);
read_lock(&smc_clc_eid_table.lock);
v2_ext->hdr.eid_cnt = smc_clc_eid_table.ueid_cnt;
@@ -850,10 +895,13 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
memcpy(v2_ext->user_eids[i++], ueident->eid,
sizeof(ueident->eid));
}
- v2_ext->hdr.flag.seid = smc_clc_eid_table.seid_enabled;
read_unlock(&smc_clc_eid_table.lock);
+ }
+ if (smcd_indicated(ini->smc_type_v2)) {
+ u8 *eid = NULL;
+
+ v2_ext->hdr.flag.seid = smc_clc_eid_table.seid_enabled;
v2_ext->hdr.ism_gid_cnt = ini->ism_offered_cnt;
- v2_ext->hdr.flag.release = SMC_RELEASE;
v2_ext->hdr.smcd_v2_ext_offset = htons(sizeof(*v2_ext) -
offsetofend(struct smc_clnt_opts_area_hdr,
smcd_v2_ext_offset) +
@@ -861,7 +909,7 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
smc_ism_get_system_eid(&eid);
if (eid && v2_ext->hdr.flag.seid)
memcpy(smcd_v2_ext->system_eid, eid, SMC_MAX_EID_LEN);
- plen += sizeof(*v2_ext) + sizeof(*smcd_v2_ext);
+ plen += sizeof(*smcd_v2_ext);
if (ini->ism_offered_cnt) {
for (i = 1; i <= ini->ism_offered_cnt; i++) {
gidchids[i - 1].gid =
@@ -873,6 +921,9 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
sizeof(struct smc_clc_smcd_gid_chid);
}
}
+ if (smcr_indicated(ini->smc_type_v2))
+ memcpy(v2_ext->roce, ini->smcrv2.ib_gid_v2, SMC_GID_SIZE);
+
pclc_base->hdr.length = htons(plen);
memcpy(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
@@ -896,12 +947,14 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
vec[i].iov_base = v2_ext;
vec[i++].iov_len = sizeof(*v2_ext) +
(v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN);
- vec[i].iov_base = smcd_v2_ext;
- vec[i++].iov_len = sizeof(*smcd_v2_ext);
- if (ini->ism_offered_cnt) {
- vec[i].iov_base = gidchids;
- vec[i++].iov_len = ini->ism_offered_cnt *
+ if (smcd_indicated(ini->smc_type_v2)) {
+ vec[i].iov_base = smcd_v2_ext;
+ vec[i++].iov_len = sizeof(*smcd_v2_ext);
+ if (ini->ism_offered_cnt) {
+ vec[i].iov_base = gidchids;
+ vec[i++].iov_len = ini->ism_offered_cnt *
sizeof(struct smc_clc_smcd_gid_chid);
+ }
}
}
vec[i].iov_base = trl;
@@ -924,13 +977,14 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
static int smc_clc_send_confirm_accept(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm_v2 *clc_v2,
int first_contact, u8 version,
- u8 *eid)
+ u8 *eid, struct smc_init_info *ini)
{
struct smc_connection *conn = &smc->conn;
struct smc_clc_msg_accept_confirm *clc;
struct smc_clc_first_contact_ext fce;
+ struct smc_clc_fce_gid_ext gle;
struct smc_clc_msg_trail trl;
- struct kvec vec[3];
+ struct kvec vec[5];
struct msghdr msg;
int i, len;
@@ -952,9 +1006,10 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
if (version == SMC_V1) {
clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
} else {
- clc_v2->chid = htons(smc_ism_get_chid(conn->lgr->smcd));
- if (eid[0])
- memcpy(clc_v2->eid, eid, SMC_MAX_EID_LEN);
+ clc_v2->d1.chid =
+ htons(smc_ism_get_chid(conn->lgr->smcd));
+ if (eid && eid[0])
+ memcpy(clc_v2->d1.eid, eid, SMC_MAX_EID_LEN);
len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
if (first_contact)
smc_clc_fill_fce(&fce, &len);
@@ -993,6 +1048,26 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
clc->r0.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
(conn->rmb_desc->sgt[link->link_idx].sgl));
hton24(clc->r0.psn, link->psn_initial);
+ if (version == SMC_V1) {
+ clc->hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
+ } else {
+ if (eid && eid[0])
+ memcpy(clc_v2->r1.eid, eid, SMC_MAX_EID_LEN);
+ len = SMCR_CLC_ACCEPT_CONFIRM_LEN_V2;
+ if (first_contact) {
+ smc_clc_fill_fce(&fce, &len);
+ fce.v2_direct = !link->lgr->uses_gateway;
+ memset(&gle, 0, sizeof(gle));
+ if (ini && clc->hdr.type == SMC_CLC_CONFIRM) {
+ gle.gid_cnt = ini->smcrv2.gidlist.len;
+ len += sizeof(gle);
+ len += gle.gid_cnt * sizeof(gle.gid[0]);
+ } else {
+ len += sizeof(gle.reserved);
+ }
+ }
+ clc_v2->hdr.length = htons(len);
+ }
memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
}
@@ -1000,7 +1075,10 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
i = 0;
vec[i].iov_base = clc_v2;
if (version > SMC_V1)
- vec[i++].iov_len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 - sizeof(trl);
+ vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ?
+ SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 :
+ SMCR_CLC_ACCEPT_CONFIRM_LEN_V2) -
+ sizeof(trl);
else
vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ?
SMCD_CLC_ACCEPT_CONFIRM_LEN :
@@ -1009,6 +1087,18 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
if (version > SMC_V1 && first_contact) {
vec[i].iov_base = &fce;
vec[i++].iov_len = sizeof(fce);
+ if (!conn->lgr->is_smcd) {
+ if (clc->hdr.type == SMC_CLC_CONFIRM) {
+ vec[i].iov_base = &gle;
+ vec[i++].iov_len = sizeof(gle);
+ vec[i].iov_base = &ini->smcrv2.gidlist.list;
+ vec[i++].iov_len = gle.gid_cnt *
+ sizeof(gle.gid[0]);
+ } else {
+ vec[i].iov_base = &gle.reserved;
+ vec[i++].iov_len = sizeof(gle.reserved);
+ }
+ }
}
vec[i].iov_base = &trl;
vec[i++].iov_len = sizeof(trl);
@@ -1018,7 +1108,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
/* send CLC CONFIRM message across internal TCP socket */
int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact,
- u8 version, u8 *eid)
+ u8 version, u8 *eid, struct smc_init_info *ini)
{
struct smc_clc_msg_accept_confirm_v2 cclc_v2;
int reason_code = 0;
@@ -1028,7 +1118,7 @@ int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact,
memset(&cclc_v2, 0, sizeof(cclc_v2));
cclc_v2.hdr.type = SMC_CLC_CONFIRM;
len = smc_clc_send_confirm_accept(smc, &cclc_v2, clnt_first_contact,
- version, eid);
+ version, eid, ini);
if (len < ntohs(cclc_v2.hdr.length)) {
if (len >= 0) {
reason_code = -ENETUNREACH;
@@ -1051,7 +1141,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, bool srv_first_contact,
memset(&aclc_v2, 0, sizeof(aclc_v2));
aclc_v2.hdr.type = SMC_CLC_ACCEPT;
len = smc_clc_send_confirm_accept(new_smc, &aclc_v2, srv_first_contact,
- version, negotiated_eid);
+ version, negotiated_eid, NULL);
if (len < ntohs(aclc_v2.hdr.length))
len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err;
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 974d01d16bb5..83f02f131fc0 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -44,6 +44,7 @@
#define SMC_CLC_DECL_NOV2DEXT 0x03030005 /* peer sent no clc SMC-Dv2 ext. */
#define SMC_CLC_DECL_NOSEID 0x03030006 /* peer sent no SEID */
#define SMC_CLC_DECL_NOSMCD2DEV 0x03030007 /* no SMC-Dv2 device found */
+#define SMC_CLC_DECL_NOUEID 0x03030008 /* peer sent no UEID */
#define SMC_CLC_DECL_MODEUNSUPP 0x03040000 /* smc modes do not match (R or D)*/
#define SMC_CLC_DECL_RMBE_EC 0x03050000 /* peer has eyecatcher in RMBE */
#define SMC_CLC_DECL_OPTUNSUPP 0x03060000 /* fastopen sockopt not supported */
@@ -54,6 +55,8 @@
#define SMC_CLC_DECL_NOSRVLINK 0x030b0000 /* SMC-R link from srv not found */
#define SMC_CLC_DECL_VERSMISMAT 0x030c0000 /* SMC version mismatch */
#define SMC_CLC_DECL_MAX_DMB 0x030d0000 /* SMC-D DMB limit exceeded */
+#define SMC_CLC_DECL_NOROUTE 0x030e0000 /* SMC-Rv2 conn. no route to peer */
+#define SMC_CLC_DECL_NOINDIRECT 0x030f0000 /* SMC-Rv2 conn. indirect mismatch*/
#define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */
#define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */
#define SMC_CLC_DECL_INTERR 0x09990000 /* internal error */
@@ -213,11 +216,14 @@ struct smcd_clc_msg_accept_confirm_common { /* SMCD accept/confirm */
#define SMC_CLC_OS_AIX 3
struct smc_clc_first_contact_ext {
- u8 reserved1;
#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 v2_direct : 1,
+ reserved : 7;
u8 os_type : 4,
release : 4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 reserved : 7,
+ v2_direct : 1;
u8 release : 4,
os_type : 4;
#endif
@@ -225,6 +231,13 @@ struct smc_clc_first_contact_ext {
u8 hostname[SMC_MAX_HOSTNAME_LEN];
};
+struct smc_clc_fce_gid_ext {
+ u8 reserved[16];
+ u8 gid_cnt;
+ u8 reserved2[3];
+ u8 gid[][SMC_GID_SIZE];
+};
+
struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */
struct smc_clc_msg_hdr hdr;
union {
@@ -239,13 +252,17 @@ struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */
struct smc_clc_msg_accept_confirm_v2 { /* clc accept / confirm message */
struct smc_clc_msg_hdr hdr;
union {
- struct smcr_clc_msg_accept_confirm r0; /* SMC-R */
+ struct { /* SMC-R */
+ struct smcr_clc_msg_accept_confirm r0;
+ u8 eid[SMC_MAX_EID_LEN];
+ u8 reserved6[8];
+ } r1;
struct { /* SMC-D */
struct smcd_clc_msg_accept_confirm_common d0;
__be16 chid;
u8 eid[SMC_MAX_EID_LEN];
u8 reserved5[8];
- };
+ } d1;
};
};
@@ -264,6 +281,24 @@ struct smc_clc_msg_decline { /* clc decline message */
struct smc_clc_msg_trail trl; /* eye catcher "SMCD" or "SMCR" EBCDIC */
} __aligned(4);
+#define SMC_DECL_DIAG_COUNT_V2 4 /* no. of additional peer diagnosis codes */
+
+struct smc_clc_msg_decline_v2 { /* clc decline message */
+ struct smc_clc_msg_hdr hdr;
+ u8 id_for_peer[SMC_SYSTEMID_LEN]; /* sender peer_id */
+ __be32 peer_diagnosis; /* diagnosis information */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 os_type : 4,
+ reserved : 4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 reserved : 4,
+ os_type : 4;
+#endif
+ u8 reserved2[3];
+ __be32 peer_diagnosis_v2[SMC_DECL_DIAG_COUNT_V2];
+ struct smc_clc_msg_trail trl; /* eye catcher "SMCD" or "SMCR" EBCDIC */
+} __aligned(4);
+
/* determine start of the prefix area within the proposal message */
static inline struct smc_clc_msg_proposal_prefix *
smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
@@ -282,6 +317,17 @@ static inline bool smcd_indicated(int smc_type)
return smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B;
}
+static inline u8 smc_indicated_type(int is_smcd, int is_smcr)
+{
+ if (is_smcd && is_smcr)
+ return SMC_TYPE_B;
+ if (is_smcd)
+ return SMC_TYPE_D;
+ if (is_smcr)
+ return SMC_TYPE_R;
+ return SMC_TYPE_N;
+}
+
/* get SMC-D info from proposal message */
static inline struct smc_clc_msg_smcd *
smc_get_clc_msg_smcd(struct smc_clc_msg_proposal *prop)
@@ -334,7 +380,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version);
int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini);
int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact,
- u8 version, u8 *eid);
+ u8 version, u8 *eid, struct smc_init_info *ini);
int smc_clc_send_accept(struct smc_sock *smc, bool srv_first_contact,
u8 version, u8 *negotiated_eid);
void smc_clc_init(void) __init;
@@ -343,6 +389,7 @@ void smc_clc_get_hostname(u8 **host);
bool smc_clc_match_eid(u8 *negotiated_eid,
struct smc_clc_v2_extension *smc_v2_ext,
u8 *peer_eid, u8 *local_eid);
+int smc_clc_ueid_count(void);
int smc_nl_dump_ueid(struct sk_buff *skb, struct netlink_callback *cb);
int smc_nl_add_ueid(struct sk_buff *skb, struct genl_info *info);
int smc_nl_remove_ueid(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 4d0fcd8f8eba..8e642f8f334f 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -244,6 +244,8 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
goto errattr;
if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable()))
goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_SYS_IS_SMCR_V2, true))
+ goto errattr;
smc_clc_get_hostname(&host);
if (host) {
memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
@@ -271,12 +273,65 @@ errmsg:
return skb->len;
}
+/* Fill SMC_NLA_LGR_D_V2_COMMON/SMC_NLA_LGR_R_V2_COMMON nested attributes */
+static int smc_nl_fill_lgr_v2_common(struct smc_link_group *lgr,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct nlattr *v2_attrs)
+{
+ char smc_host[SMC_MAX_HOSTNAME_LEN + 1];
+ char smc_eid[SMC_MAX_EID_LEN + 1];
+
+ if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version))
+ goto errv2attr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release))
+ goto errv2attr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
+ goto errv2attr;
+ memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
+ smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
+ if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
+ goto errv2attr;
+ memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
+ smc_eid[SMC_MAX_EID_LEN] = 0;
+ if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
+ goto errv2attr;
+
+ nla_nest_end(skb, v2_attrs);
+ return 0;
+
+errv2attr:
+ nla_nest_cancel(skb, v2_attrs);
+ return -EMSGSIZE;
+}
+
+static int smc_nl_fill_smcr_lgr_v2(struct smc_link_group *lgr,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlattr *v2_attrs;
+
+ v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2);
+ if (!v2_attrs)
+ goto errattr;
+ if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_DIRECT, !lgr->uses_gateway))
+ goto errv2attr;
+
+ nla_nest_end(skb, v2_attrs);
+ return 0;
+
+errv2attr:
+ nla_nest_cancel(skb, v2_attrs);
+errattr:
+ return -EMSGSIZE;
+}
+
static int smc_nl_fill_lgr(struct smc_link_group *lgr,
struct sk_buff *skb,
struct netlink_callback *cb)
{
char smc_target[SMC_MAX_PNETID_LEN + 1];
- struct nlattr *attrs;
+ struct nlattr *attrs, *v2_attrs;
attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR);
if (!attrs)
@@ -296,6 +351,15 @@ static int smc_nl_fill_lgr(struct smc_link_group *lgr,
smc_target[SMC_MAX_PNETID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
goto errattr;
+ if (lgr->smc_version > SMC_V1) {
+ v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2_COMMON);
+ if (!v2_attrs)
+ goto errattr;
+ if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
+ goto errattr;
+ if (smc_nl_fill_smcr_lgr_v2(lgr, skb, cb))
+ goto errattr;
+ }
nla_nest_end(skb, attrs);
return 0;
@@ -428,10 +492,7 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
struct sk_buff *skb,
struct netlink_callback *cb)
{
- char smc_host[SMC_MAX_HOSTNAME_LEN + 1];
char smc_pnet[SMC_MAX_PNETID_LEN + 1];
- char smc_eid[SMC_MAX_EID_LEN + 1];
- struct nlattr *v2_attrs;
struct nlattr *attrs;
void *nlh;
@@ -463,32 +524,19 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
smc_pnet[SMC_MAX_PNETID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
goto errattr;
+ if (lgr->smc_version > SMC_V1) {
+ struct nlattr *v2_attrs;
- v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_V2);
- if (!v2_attrs)
- goto errattr;
- if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version))
- goto errv2attr;
- if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release))
- goto errv2attr;
- if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
- goto errv2attr;
- memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
- smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
- if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
- goto errv2attr;
- memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
- smc_eid[SMC_MAX_EID_LEN] = 0;
- if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
- goto errv2attr;
-
- nla_nest_end(skb, v2_attrs);
+ v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_D_V2_COMMON);
+ if (!v2_attrs)
+ goto errattr;
+ if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs))
+ goto errattr;
+ }
nla_nest_end(skb, attrs);
genlmsg_end(skb, nlh);
return 0;
-errv2attr:
- nla_nest_cancel(skb, v2_attrs);
errattr:
nla_nest_cancel(skb, attrs);
errout:
@@ -684,24 +732,30 @@ static void smcr_copy_dev_info_to_link(struct smc_link *link)
int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
u8 link_idx, struct smc_init_info *ini)
{
+ struct smc_ib_device *smcibdev;
u8 rndvec[3];
int rc;
- get_device(&ini->ib_dev->ibdev->dev);
- atomic_inc(&ini->ib_dev->lnk_cnt);
+ if (lgr->smc_version == SMC_V2) {
+ lnk->smcibdev = ini->smcrv2.ib_dev_v2;
+ lnk->ibport = ini->smcrv2.ib_port_v2;
+ } else {
+ lnk->smcibdev = ini->ib_dev;
+ lnk->ibport = ini->ib_port;
+ }
+ get_device(&lnk->smcibdev->ibdev->dev);
+ atomic_inc(&lnk->smcibdev->lnk_cnt);
+ lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu;
lnk->link_id = smcr_next_link_id(lgr);
lnk->lgr = lgr;
lnk->link_idx = link_idx;
- lnk->smcibdev = ini->ib_dev;
- lnk->ibport = ini->ib_port;
smc_ibdev_cnt_inc(lnk);
smcr_copy_dev_info_to_link(lnk);
- lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
atomic_set(&lnk->conn_cnt, 0);
smc_llc_link_set_uid(lnk);
INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
- if (!ini->ib_dev->initialized) {
- rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
+ if (!lnk->smcibdev->initialized) {
+ rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev);
if (rc)
goto out;
}
@@ -709,7 +763,9 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
(rndvec[2] << 16);
rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
- ini->vlan_id, lnk->gid, &lnk->sgid_index);
+ ini->vlan_id, lnk->gid, &lnk->sgid_index,
+ lgr->smc_version == SMC_V2 ?
+ &ini->smcrv2 : NULL);
if (rc)
goto out;
rc = smc_llc_link_init(lnk);
@@ -740,11 +796,12 @@ clear_llc_lnk:
smc_llc_link_clear(lnk, false);
out:
smc_ibdev_cnt_dec(lnk);
- put_device(&ini->ib_dev->ibdev->dev);
+ put_device(&lnk->smcibdev->ibdev->dev);
+ smcibdev = lnk->smcibdev;
memset(lnk, 0, sizeof(struct smc_link));
lnk->state = SMC_LNK_UNUSED;
- if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
- wake_up(&ini->ib_dev->lnks_deleted);
+ if (!atomic_dec_return(&smcibdev->lnk_cnt))
+ wake_up(&smcibdev->lnks_deleted);
return rc;
}
@@ -808,18 +865,37 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
} else {
/* SMC-R specific settings */
+ struct smc_ib_device *ibdev;
+ int ibport;
+
lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
- memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
+ lgr->smc_version = ini->smcr_version;
+ memcpy(lgr->peer_systemid, ini->peer_systemid,
SMC_SYSTEMID_LEN);
- memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1],
+ if (lgr->smc_version == SMC_V2) {
+ ibdev = ini->smcrv2.ib_dev_v2;
+ ibport = ini->smcrv2.ib_port_v2;
+ lgr->saddr = ini->smcrv2.saddr;
+ lgr->uses_gateway = ini->smcrv2.uses_gateway;
+ memcpy(lgr->nexthop_mac, ini->smcrv2.nexthop_mac,
+ ETH_ALEN);
+ } else {
+ ibdev = ini->ib_dev;
+ ibport = ini->ib_port;
+ }
+ memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1],
SMC_MAX_PNETID_LEN);
+ if (smc_wr_alloc_lgr_mem(lgr))
+ goto free_wq;
smc_llc_lgr_init(lgr, smc);
link_idx = SMC_SINGLE_LINK;
lnk = &lgr->lnk[link_idx];
rc = smcr_link_init(lgr, lnk, link_idx, ini);
- if (rc)
+ if (rc) {
+ smc_wr_free_lgr_mem(lgr);
goto free_wq;
+ }
lgr_list = &smc_lgr_list.list;
lgr_lock = &smc_lgr_list.lock;
atomic_inc(&lgr_cnt);
@@ -943,7 +1019,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
to_lnk = &lgr->lnk[i];
break;
}
- if (!to_lnk) {
+ if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
smc_lgr_terminate_sched(lgr);
return NULL;
}
@@ -975,24 +1051,26 @@ again:
read_unlock_bh(&lgr->conns_lock);
/* pre-fetch buffer outside of send_lock, might sleep */
rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
- if (rc) {
- smcr_link_down_cond_sched(to_lnk);
- return NULL;
- }
+ if (rc)
+ goto err_out;
/* avoid race with smcr_tx_sndbuf_nonempty() */
spin_lock_bh(&conn->send_lock);
smc_switch_link_and_count(conn, to_lnk);
rc = smc_switch_cursor(smc, pend, wr_buf);
spin_unlock_bh(&conn->send_lock);
sock_put(&smc->sk);
- if (rc) {
- smcr_link_down_cond_sched(to_lnk);
- return NULL;
- }
+ if (rc)
+ goto err_out;
goto again;
}
read_unlock_bh(&lgr->conns_lock);
+ smc_wr_tx_link_put(to_lnk);
return to_lnk;
+
+err_out:
+ smcr_link_down_cond_sched(to_lnk);
+ smc_wr_tx_link_put(to_lnk);
+ return NULL;
}
static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
@@ -1224,6 +1302,7 @@ static void smc_lgr_free(struct smc_link_group *lgr)
if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
wake_up(&lgr->smcd->lgrs_deleted);
} else {
+ smc_wr_free_lgr_mem(lgr);
if (!atomic_dec_return(&lgr_cnt))
wake_up(&lgrs_deleted);
}
@@ -1468,7 +1547,9 @@ static void smc_conn_abort_work(struct work_struct *work)
abort_work);
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+ lock_sock(&smc->sk);
smc_conn_kill(conn, true);
+ release_sock(&smc->sk);
sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
}
@@ -1632,13 +1713,15 @@ out:
return rc;
}
-static bool smcr_lgr_match(struct smc_link_group *lgr,
- struct smc_clc_msg_local *lcl,
+static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version,
+ u8 peer_systemid[],
+ u8 peer_gid[],
+ u8 peer_mac_v1[],
enum smc_lgr_role role, u32 clcqpn)
{
int i;
- if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
+ if (memcmp(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN) ||
lgr->role != role)
return false;
@@ -1646,8 +1729,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr,
if (!smc_link_active(&lgr->lnk[i]))
continue;
if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
- !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
- !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
+ !memcmp(lgr->lnk[i].peer_gid, peer_gid, SMC_GID_SIZE) &&
+ (smcr_version == SMC_V2 ||
+ !memcmp(lgr->lnk[i].peer_mac, peer_mac_v1, ETH_ALEN)))
return true;
}
return false;
@@ -1686,7 +1770,10 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
if ((ini->is_smcd ?
smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
ini->ism_peer_gid[ini->ism_selected]) :
- smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
+ smcr_lgr_match(lgr, ini->smcr_version,
+ ini->peer_systemid,
+ ini->peer_gid, ini->peer_mac, role,
+ ini->ib_clcqpn)) &&
!lgr->sync_err &&
(ini->smcd_version == SMC_V2 ||
lgr->vlan_id == ini->vlan_id) &&
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 83d30b06016f..59cef3b830d8 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -42,11 +42,16 @@ enum smc_link_state { /* possible states of a link */
};
#define SMC_WR_BUF_SIZE 48 /* size of work request buffer */
+#define SMC_WR_BUF_V2_SIZE 8192 /* size of v2 work request buffer */
struct smc_wr_buf {
u8 raw[SMC_WR_BUF_SIZE];
};
+struct smc_wr_v2_buf {
+ u8 raw[SMC_WR_BUF_V2_SIZE];
+};
+
#define SMC_WR_REG_MR_WAIT_TIME (5 * HZ)/* wait time for ib_wr_reg_mr result */
enum smc_wr_reg_state {
@@ -92,7 +97,11 @@ struct smc_link {
struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */
struct completion *wr_tx_compl; /* WR send CQE completion */
/* above four vectors have wr_tx_cnt elements and use the same index */
+ struct ib_send_wr *wr_tx_v2_ib; /* WR send v2 meta data */
+ struct ib_sge *wr_tx_v2_sge; /* WR send v2 gather meta data*/
+ struct smc_wr_tx_pend *wr_tx_v2_pend; /* WR send v2 waiting for CQE */
dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
+ dma_addr_t wr_tx_v2_dma_addr; /* DMA address of v2 tx buf*/
atomic_long_t wr_tx_id; /* seq # of last sent WR */
unsigned long *wr_tx_mask; /* bit mask of used indexes */
u32 wr_tx_cnt; /* number of WR send buffers */
@@ -104,6 +113,7 @@ struct smc_link {
struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */
/* above three vectors have wr_rx_cnt elements and use the same index */
dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
+ dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/
u64 wr_rx_id; /* seq # of last recv WR */
u32 wr_rx_cnt; /* number of WR recv buffers */
unsigned long wr_rx_tstamp; /* jiffies when last buf rx */
@@ -208,6 +218,7 @@ enum smc_llc_flowtype {
SMC_LLC_FLOW_NONE = 0,
SMC_LLC_FLOW_ADD_LINK = 2,
SMC_LLC_FLOW_DEL_LINK = 4,
+ SMC_LLC_FLOW_REQ_ADD_LINK = 5,
SMC_LLC_FLOW_RKEY = 6,
};
@@ -250,6 +261,10 @@ struct smc_link_group {
/* client or server */
struct smc_link lnk[SMC_LINKS_PER_LGR_MAX];
/* smc link */
+ struct smc_wr_v2_buf *wr_rx_buf_v2;
+ /* WR v2 recv payload buffer */
+ struct smc_wr_v2_buf *wr_tx_buf_v2;
+ /* WR v2 send payload buffer */
char peer_systemid[SMC_SYSTEMID_LEN];
/* unique system_id of peer */
struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX]
@@ -288,6 +303,9 @@ struct smc_link_group {
/* link keep alive time */
u32 llc_termination_rsn;
/* rsn code for termination */
+ u8 nexthop_mac[ETH_ALEN];
+ u8 uses_gateway;
+ __be32 saddr;
};
struct { /* SMC-D */
u64 peer_gid;
@@ -302,6 +320,31 @@ struct smc_link_group {
struct smc_clc_msg_local;
+#define GID_LIST_SIZE 2
+
+struct smc_gidlist {
+ u8 len;
+ u8 list[GID_LIST_SIZE][SMC_GID_SIZE];
+};
+
+struct smc_init_info_smcrv2 {
+ /* Input fields */
+ __be32 saddr;
+ struct sock *clc_sk;
+ __be32 daddr;
+
+ /* Output fields when saddr is set */
+ struct smc_ib_device *ib_dev_v2;
+ u8 ib_port_v2;
+ u8 ib_gid_v2[SMC_GID_SIZE];
+
+ /* Additional output fields when clc_sk and daddr is set as well */
+ u8 uses_gateway;
+ u8 nexthop_mac[ETH_ALEN];
+
+ struct smc_gidlist gidlist;
+};
+
struct smc_init_info {
u8 is_smcd;
u8 smc_type_v1;
@@ -312,11 +355,16 @@ struct smc_init_info {
u32 rc;
u8 negotiated_eid[SMC_MAX_EID_LEN];
/* SMC-R */
- struct smc_clc_msg_local *ib_lcl;
+ u8 smcr_version;
+ u8 check_smcrv2;
+ u8 peer_gid[SMC_GID_SIZE];
+ u8 peer_mac[ETH_ALEN];
+ u8 peer_systemid[SMC_SYSTEMID_LEN];
struct smc_ib_device *ib_dev;
u8 ib_gid[SMC_GID_SIZE];
u8 ib_port;
u32 ib_clcqpn;
+ struct smc_init_info_smcrv2 smcrv2;
/* SMC-D */
u64 ism_peer_gid[SMC_MAX_ISM_DEVS + 1];
struct smcd_dev *ism_dev[SMC_MAX_ISM_DEVS + 1];
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index a8845343d183..d93055ec17ae 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -17,6 +17,7 @@
#include <linux/scatterlist.h>
#include <linux/wait.h>
#include <linux/mutex.h>
+#include <linux/inetdevice.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
@@ -62,16 +63,23 @@ static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
struct ib_qp_attr qp_attr;
+ u8 hop_lim = 1;
memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.qp_state = IB_QPS_RTR;
qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
- rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0);
+ if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
+ hop_lim = IPV6_DEFAULT_HOPLIMIT;
+ rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, hop_lim, 0);
rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
- memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
- sizeof(lnk->peer_mac));
+ if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
+ memcpy(&qp_attr.ah_attr.roce.dmac, lnk->lgr->nexthop_mac,
+ sizeof(lnk->lgr->nexthop_mac));
+ else
+ memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
+ sizeof(lnk->peer_mac));
qp_attr.dest_qp_num = lnk->peer_qpn;
qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
@@ -183,9 +191,81 @@ bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
}
+int smc_ib_find_route(__be32 saddr, __be32 daddr,
+ u8 nexthop_mac[], u8 *uses_gateway)
+{
+ struct neighbour *neigh = NULL;
+ struct rtable *rt = NULL;
+ struct flowi4 fl4 = {
+ .saddr = saddr,
+ .daddr = daddr
+ };
+
+ if (daddr == cpu_to_be32(INADDR_NONE))
+ goto out;
+ rt = ip_route_output_flow(&init_net, &fl4, NULL);
+ if (IS_ERR(rt))
+ goto out;
+ if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
+ goto out;
+ neigh = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
+ if (neigh) {
+ memcpy(nexthop_mac, neigh->ha, ETH_ALEN);
+ *uses_gateway = rt->rt_uses_gateway;
+ return 0;
+ }
+out:
+ return -ENOENT;
+}
+
+static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
+ const struct ib_gid_attr *attr,
+ u8 gid[], u8 *sgid_index,
+ struct smc_init_info_smcrv2 *smcrv2)
+{
+ if (!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) {
+ if (gid)
+ memcpy(gid, &attr->gid, SMC_GID_SIZE);
+ if (sgid_index)
+ *sgid_index = attr->index;
+ return 0;
+ }
+ if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
+ smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) {
+ struct in_device *in_dev = __in_dev_get_rcu(ndev);
+ const struct in_ifaddr *ifa;
+ bool subnet_match = false;
+
+ if (!in_dev)
+ goto out;
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ if (!inet_ifa_match(smcrv2->saddr, ifa))
+ continue;
+ subnet_match = true;
+ break;
+ }
+ if (!subnet_match)
+ goto out;
+ if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr,
+ smcrv2->daddr,
+ smcrv2->nexthop_mac,
+ &smcrv2->uses_gateway))
+ goto out;
+
+ if (gid)
+ memcpy(gid, &attr->gid, SMC_GID_SIZE);
+ if (sgid_index)
+ *sgid_index = attr->index;
+ return 0;
+ }
+out:
+ return -ENODEV;
+}
+
/* determine the gid for an ib-device port and vlan id */
int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
- unsigned short vlan_id, u8 gid[], u8 *sgid_index)
+ unsigned short vlan_id, u8 gid[], u8 *sgid_index,
+ struct smc_init_info_smcrv2 *smcrv2)
{
const struct ib_gid_attr *attr;
const struct net_device *ndev;
@@ -201,15 +281,13 @@ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
if (!IS_ERR(ndev) &&
((!vlan_id && !is_vlan_dev(ndev)) ||
(vlan_id && is_vlan_dev(ndev) &&
- vlan_dev_vlan_id(ndev) == vlan_id)) &&
- attr->gid_type == IB_GID_TYPE_ROCE) {
- rcu_read_unlock();
- if (gid)
- memcpy(gid, &attr->gid, SMC_GID_SIZE);
- if (sgid_index)
- *sgid_index = attr->index;
- rdma_put_gid_attr(attr);
- return 0;
+ vlan_dev_vlan_id(ndev) == vlan_id))) {
+ if (!smc_ib_determine_gid_rcu(ndev, attr, gid,
+ sgid_index, smcrv2)) {
+ rcu_read_unlock();
+ rdma_put_gid_attr(attr);
+ return 0;
+ }
}
rcu_read_unlock();
rdma_put_gid_attr(attr);
@@ -217,6 +295,58 @@ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
return -ENODEV;
}
+/* check if gid is still defined on smcibdev */
+static bool smc_ib_check_link_gid(u8 gid[SMC_GID_SIZE], bool smcrv2,
+ struct smc_ib_device *smcibdev, u8 ibport)
+{
+ const struct ib_gid_attr *attr;
+ bool rc = false;
+ int i;
+
+ for (i = 0; !rc && i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
+ attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
+ if (IS_ERR(attr))
+ continue;
+
+ rcu_read_lock();
+ if ((!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) ||
+ (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
+ !(ipv6_addr_type((const struct in6_addr *)&attr->gid)
+ & IPV6_ADDR_LINKLOCAL)))
+ if (!memcmp(gid, &attr->gid, SMC_GID_SIZE))
+ rc = true;
+ rcu_read_unlock();
+ rdma_put_gid_attr(attr);
+ }
+ return rc;
+}
+
+/* check all links if the gid is still defined on smcibdev */
+static void smc_ib_gid_check(struct smc_ib_device *smcibdev, u8 ibport)
+{
+ struct smc_link_group *lgr;
+ int i;
+
+ spin_lock_bh(&smc_lgr_list.lock);
+ list_for_each_entry(lgr, &smc_lgr_list.list, list) {
+ if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
+ SMC_MAX_PNETID_LEN))
+ continue; /* lgr is not affected */
+ if (list_empty(&lgr->list))
+ continue;
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+ if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
+ lgr->lnk[i].smcibdev != smcibdev)
+ continue;
+ if (!smc_ib_check_link_gid(lgr->lnk[i].gid,
+ lgr->smc_version == SMC_V2,
+ smcibdev, ibport))
+ smcr_port_err(smcibdev, ibport);
+ }
+ }
+ spin_unlock_bh(&smc_lgr_list.lock);
+}
+
static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
{
int rc;
@@ -255,6 +385,7 @@ static void smc_ib_port_event_work(struct work_struct *work)
} else {
clear_bit(port_idx, smcibdev->ports_going_away);
smcr_port_add(smcibdev, port_idx + 1);
+ smc_ib_gid_check(smcibdev, port_idx + 1);
}
}
}
@@ -523,6 +654,7 @@ void smc_ib_destroy_queue_pair(struct smc_link *lnk)
/* create a queue pair within the protection domain for a link */
int smc_ib_create_queue_pair(struct smc_link *lnk)
{
+ int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
struct ib_qp_init_attr qp_attr = {
.event_handler = smc_ib_qp_event_handler,
.qp_context = lnk,
@@ -536,7 +668,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk)
.max_send_wr = SMC_WR_BUF_CNT * 3,
.max_recv_wr = SMC_WR_BUF_CNT * 3,
.max_send_sge = SMC_IB_MAX_SEND_SGE,
- .max_recv_sge = 1,
+ .max_recv_sge = sges_per_buf,
},
.sq_sig_type = IB_SIGNAL_REQ_WR,
.qp_type = IB_QPT_RC,
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 3085f5180da7..07585937370e 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -59,6 +59,17 @@ struct smc_ib_device { /* ib-device infos for smc */
int ndev_ifidx[SMC_MAX_PORTS]; /* ndev if indexes */
};
+static inline __be32 smc_ib_gid_to_ipv4(u8 gid[SMC_GID_SIZE])
+{
+ struct in6_addr *addr6 = (struct in6_addr *)gid;
+
+ if (ipv6_addr_v4mapped(addr6) ||
+ !(addr6->s6_addr32[0] | addr6->s6_addr32[1] | addr6->s6_addr32[2]))
+ return addr6->s6_addr32[3];
+ return cpu_to_be32(INADDR_NONE);
+}
+
+struct smc_init_info_smcrv2;
struct smc_buf_desc;
struct smc_link;
@@ -90,7 +101,10 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk,
struct smc_buf_desc *buf_slot,
enum dma_data_direction data_direction);
int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
- unsigned short vlan_id, u8 gid[], u8 *sgid_index);
+ unsigned short vlan_id, u8 gid[], u8 *sgid_index,
+ struct smc_init_info_smcrv2 *smcrv2);
+int smc_ib_find_route(__be32 saddr, __be32 daddr,
+ u8 nexthop_mac[], u8 *uses_gateway);
bool smc_ib_is_valid_local_systemid(void);
int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
#endif
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 2e7560eba981..a9623c952007 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -23,16 +23,24 @@
struct smc_llc_hdr {
struct smc_wr_rx_hdr common;
- u8 length; /* 44 */
-#if defined(__BIG_ENDIAN_BITFIELD)
- u8 reserved:4,
- add_link_rej_rsn:4;
+ union {
+ struct {
+ u8 length; /* 44 */
+ #if defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:4,
+ add_link_rej_rsn:4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 add_link_rej_rsn:4,
- reserved:4;
+ u8 add_link_rej_rsn:4,
+ reserved:4;
#endif
+ };
+ u16 length_v2; /* 44 - 8192*/
+ };
u8 flags;
-};
+} __packed; /* format defined in
+ * IBM Shared Memory Communications Version 2
+ * (https://www.ibm.com/support/pages/node/6326337)
+ */
#define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
@@ -76,6 +84,32 @@ struct smc_llc_msg_add_link_cont_rt {
__be64 rmb_vaddr_new;
};
+struct smc_llc_msg_add_link_v2_ext {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 v2_direct : 1,
+ reserved : 7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 reserved : 7,
+ v2_direct : 1;
+#endif
+ u8 reserved2;
+ u8 client_target_gid[SMC_GID_SIZE];
+ u8 reserved3[8];
+ u16 num_rkeys;
+ struct smc_llc_msg_add_link_cont_rt rt[];
+} __packed; /* format defined in
+ * IBM Shared Memory Communications Version 2
+ * (https://www.ibm.com/support/pages/node/6326337)
+ */
+
+struct smc_llc_msg_req_add_link_v2 {
+ struct smc_llc_hdr hd;
+ u8 reserved[20];
+ u8 gid_cnt;
+ u8 reserved2[3];
+ u8 gid[][SMC_GID_SIZE];
+};
+
#define SMC_LLC_RKEYS_PER_CONT_MSG 2
struct smc_llc_msg_add_link_cont { /* type 0x03 */
@@ -114,7 +148,8 @@ struct smc_rmb_rtoken {
__be64 rmb_vaddr;
} __packed; /* format defined in RFC7609 */
-#define SMC_LLC_RKEYS_PER_MSG 3
+#define SMC_LLC_RKEYS_PER_MSG 3
+#define SMC_LLC_RKEYS_PER_MSG_V2 255
struct smc_llc_msg_confirm_rkey { /* type 0x06 */
struct smc_llc_hdr hd;
@@ -135,9 +170,18 @@ struct smc_llc_msg_delete_rkey { /* type 0x09 */
u8 reserved2[4];
};
+struct smc_llc_msg_delete_rkey_v2 { /* type 0x29 */
+ struct smc_llc_hdr hd;
+ u8 num_rkeys;
+ u8 num_inval_rkeys;
+ u8 reserved[2];
+ __be32 rkey[];
+};
+
union smc_llc_msg {
struct smc_llc_msg_confirm_link confirm_link;
struct smc_llc_msg_add_link add_link;
+ struct smc_llc_msg_req_add_link_v2 req_add_link;
struct smc_llc_msg_add_link_cont add_link_cont;
struct smc_llc_msg_del_link delete_link;
@@ -189,7 +233,7 @@ static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
struct smc_llc_qentry *qentry)
{
- u8 msg_type = qentry->msg.raw.hdr.common.type;
+ u8 msg_type = qentry->msg.raw.hdr.common.llc_type;
if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
flow_type != msg_type && !lgr->delayed_event) {
@@ -219,7 +263,7 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
spin_unlock_bh(&lgr->llc_flow_lock);
return false;
}
- switch (qentry->msg.raw.hdr.common.type) {
+ switch (qentry->msg.raw.hdr.common.llc_type) {
case SMC_LLC_ADD_LINK:
flow->type = SMC_LLC_FLOW_ADD_LINK;
break;
@@ -306,7 +350,7 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
smc_llc_flow_qentry_del(flow);
goto out;
}
- rcv_msg = flow->qentry->msg.raw.hdr.common.type;
+ rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type;
if (exp_msg && rcv_msg != exp_msg) {
if (exp_msg == SMC_LLC_ADD_LINK &&
rcv_msg == SMC_LLC_DELETE_LINK) {
@@ -374,6 +418,30 @@ static int smc_llc_add_pending_send(struct smc_link *link,
return 0;
}
+static int smc_llc_add_pending_send_v2(struct smc_link *link,
+ struct smc_wr_v2_buf **wr_buf,
+ struct smc_wr_tx_pend_priv **pend)
+{
+ int rc;
+
+ rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr,
+ struct smc_link_group *lgr, size_t len)
+{
+ if (lgr->smc_version == SMC_V2) {
+ hdr->common.llc_version = SMC_V2;
+ hdr->length_v2 = len;
+ } else {
+ hdr->common.llc_version = 0;
+ hdr->length = len;
+ }
+}
+
/* high-level API to send LLC confirm link */
int smc_llc_send_confirm_link(struct smc_link *link,
enum smc_llc_reqresp reqresp)
@@ -383,13 +451,15 @@ int smc_llc_send_confirm_link(struct smc_link *link,
struct smc_wr_buf *wr_buf;
int rc;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
memset(confllc, 0, sizeof(*confllc));
- confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
- confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
+ confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK;
+ smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc));
confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
if (reqresp == SMC_LLC_RESP)
confllc->hd.flags |= SMC_LLC_FLAG_RESP;
@@ -402,6 +472,8 @@ int smc_llc_send_confirm_link(struct smc_link *link,
confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
/* send llc message */
rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
@@ -415,13 +487,15 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
struct smc_link *link;
int i, rc, rtok_ix;
+ if (!smc_wr_tx_link_hold(send_link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
memset(rkeyllc, 0, sizeof(*rkeyllc));
- rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
- rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey);
+ rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY;
+ smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc));
rtok_ix = 1;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
@@ -444,6 +518,8 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
(u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
/* send llc message */
rc = smc_wr_tx_send(send_link, pend);
+put_out:
+ smc_wr_tx_link_put(send_link);
return rc;
}
@@ -456,38 +532,134 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
struct smc_wr_buf *wr_buf;
int rc;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
memset(rkeyllc, 0, sizeof(*rkeyllc));
- rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
- rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
+ rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY;
+ smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc));
rkeyllc->num_rkeys = 1;
rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
/* send llc message */
rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
+/* return first buffer from any of the next buf lists */
+static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
+ int *buf_lst)
+{
+ struct smc_buf_desc *buf_pos;
+
+ while (*buf_lst < SMC_RMBE_SIZES) {
+ buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
+ struct smc_buf_desc, list);
+ if (buf_pos)
+ return buf_pos;
+ (*buf_lst)++;
+ }
+ return NULL;
+}
+
+/* return next rmb from buffer lists */
+static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
+ int *buf_lst,
+ struct smc_buf_desc *buf_pos)
+{
+ struct smc_buf_desc *buf_next;
+
+ if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
+ (*buf_lst)++;
+ return _smc_llc_get_next_rmb(lgr, buf_lst);
+ }
+ buf_next = list_next_entry(buf_pos, list);
+ return buf_next;
+}
+
+static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
+ int *buf_lst)
+{
+ *buf_lst = 0;
+ return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
+}
+
+static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
+ struct smc_link *link, struct smc_link *link_new)
+{
+ struct smc_link_group *lgr = link->lgr;
+ struct smc_buf_desc *buf_pos;
+ int prim_lnk_idx, lnk_idx, i;
+ struct smc_buf_desc *rmb;
+ int len = sizeof(*ext);
+ int buf_lst;
+
+ ext->v2_direct = !lgr->uses_gateway;
+ memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE);
+
+ prim_lnk_idx = link->link_idx;
+ lnk_idx = link_new->link_idx;
+ mutex_lock(&lgr->rmbs_lock);
+ ext->num_rkeys = lgr->conns_num;
+ if (!ext->num_rkeys)
+ goto out;
+ buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
+ for (i = 0; i < ext->num_rkeys; i++) {
+ if (!buf_pos)
+ break;
+ rmb = buf_pos;
+ ext->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
+ ext->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
+ ext->rt[i].rmb_vaddr_new =
+ cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
+ buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
+ while (buf_pos && !(buf_pos)->used)
+ buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
+ }
+ len += i * sizeof(ext->rt[0]);
+out:
+ mutex_unlock(&lgr->rmbs_lock);
+ return len;
+}
+
/* send ADD LINK request or response */
int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
struct smc_link *link_new,
enum smc_llc_reqresp reqresp)
{
+ struct smc_llc_msg_add_link_v2_ext *ext = NULL;
struct smc_llc_msg_add_link *addllc;
struct smc_wr_tx_pend_priv *pend;
- struct smc_wr_buf *wr_buf;
+ int len = sizeof(*addllc);
int rc;
- rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
- if (rc)
- return rc;
- addllc = (struct smc_llc_msg_add_link *)wr_buf;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
+ if (link->lgr->smc_version == SMC_V2) {
+ struct smc_wr_v2_buf *wr_buf;
+
+ rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
+ if (rc)
+ goto put_out;
+ addllc = (struct smc_llc_msg_add_link *)wr_buf;
+ ext = (struct smc_llc_msg_add_link_v2_ext *)
+ &wr_buf->raw[sizeof(*addllc)];
+ memset(ext, 0, SMC_WR_TX_SIZE);
+ } else {
+ struct smc_wr_buf *wr_buf;
+
+ rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+ if (rc)
+ goto put_out;
+ addllc = (struct smc_llc_msg_add_link *)wr_buf;
+ }
memset(addllc, 0, sizeof(*addllc));
- addllc->hd.common.type = SMC_LLC_ADD_LINK;
- addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
+ addllc->hd.common.llc_type = SMC_LLC_ADD_LINK;
if (reqresp == SMC_LLC_RESP)
addllc->hd.flags |= SMC_LLC_FLAG_RESP;
memcpy(addllc->sender_mac, mac, ETH_ALEN);
@@ -502,8 +674,16 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
addllc->qp_mtu = min(link_new->path_mtu,
link_new->peer_mtu);
}
+ if (ext && link_new)
+ len += smc_llc_fill_ext_v2(ext, link, link_new);
+ smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len);
/* send llc message */
- rc = smc_wr_tx_send(link, pend);
+ if (link->lgr->smc_version == SMC_V2)
+ rc = smc_wr_tx_v2_send(link, pend, len);
+ else
+ rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
@@ -517,14 +697,16 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
struct smc_wr_buf *wr_buf;
int rc;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
delllc = (struct smc_llc_msg_del_link *)wr_buf;
memset(delllc, 0, sizeof(*delllc));
- delllc->hd.common.type = SMC_LLC_DELETE_LINK;
- delllc->hd.length = sizeof(struct smc_llc_msg_del_link);
+ delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK;
+ smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc));
if (reqresp == SMC_LLC_RESP)
delllc->hd.flags |= SMC_LLC_FLAG_RESP;
if (orderly)
@@ -536,6 +718,8 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
delllc->reason = htonl(reason);
/* send llc message */
rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
@@ -547,16 +731,20 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
struct smc_wr_buf *wr_buf;
int rc;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
testllc = (struct smc_llc_msg_test_link *)wr_buf;
memset(testllc, 0, sizeof(*testllc));
- testllc->hd.common.type = SMC_LLC_TEST_LINK;
- testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
+ testllc->hd.common.llc_type = SMC_LLC_TEST_LINK;
+ smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc));
memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
/* send llc message */
rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
return rc;
}
@@ -567,13 +755,16 @@ static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
struct smc_wr_buf *wr_buf;
int rc;
- if (!smc_link_usable(link))
+ if (!smc_wr_tx_link_hold(link))
return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
- return smc_wr_tx_send(link, pend);
+ rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
+ return rc;
}
/* schedule an llc send on link, may wait for buffers,
@@ -586,13 +777,16 @@ static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
struct smc_wr_buf *wr_buf;
int rc;
- if (!smc_link_usable(link))
+ if (!smc_wr_tx_link_hold(link))
return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
- return smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
+ rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
+put_out:
+ smc_wr_tx_link_put(link);
+ return rc;
}
/********************************* receive ***********************************/
@@ -621,44 +815,6 @@ static int smc_llc_alloc_alt_link(struct smc_link_group *lgr,
return -EMLINK;
}
-/* return first buffer from any of the next buf lists */
-static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
- int *buf_lst)
-{
- struct smc_buf_desc *buf_pos;
-
- while (*buf_lst < SMC_RMBE_SIZES) {
- buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
- struct smc_buf_desc, list);
- if (buf_pos)
- return buf_pos;
- (*buf_lst)++;
- }
- return NULL;
-}
-
-/* return next rmb from buffer lists */
-static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
- int *buf_lst,
- struct smc_buf_desc *buf_pos)
-{
- struct smc_buf_desc *buf_next;
-
- if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
- (*buf_lst)++;
- return _smc_llc_get_next_rmb(lgr, buf_lst);
- }
- buf_next = list_next_entry(buf_pos, list);
- return buf_next;
-}
-
-static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
- int *buf_lst)
-{
- *buf_lst = 0;
- return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
-}
-
/* send one add_link_continue msg */
static int smc_llc_add_link_cont(struct smc_link *link,
struct smc_link *link_new, u8 *num_rkeys_todo,
@@ -672,9 +828,11 @@ static int smc_llc_add_link_cont(struct smc_link *link,
struct smc_buf_desc *rmb;
u8 n;
+ if (!smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
if (rc)
- return rc;
+ goto put_out;
addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
memset(addc_llc, 0, sizeof(*addc_llc));
@@ -702,11 +860,14 @@ static int smc_llc_add_link_cont(struct smc_link *link,
while (*buf_pos && !(*buf_pos)->used)
*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
}
- addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT;
+ addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
if (lgr->role == SMC_CLNT)
addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
- return smc_wr_tx_send(link, pend);
+ rc = smc_wr_tx_send(link, pend);
+put_out:
+ smc_wr_tx_link_put(link);
+ return rc;
}
static int smc_llc_cli_rkey_exchange(struct smc_link *link,
@@ -758,6 +919,8 @@ static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry)
qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
+ smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
+ sizeof(qentry->msg));
return smc_llc_send_message(qentry->link, &qentry->msg);
}
@@ -778,7 +941,7 @@ static int smc_llc_cli_conf_link(struct smc_link *link,
SMC_LLC_DEL_LOST_PATH);
return -ENOLINK;
}
- if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
+ if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
/* received DELETE_LINK instead */
qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
smc_llc_send_message(link, &qentry->msg);
@@ -819,6 +982,26 @@ static int smc_llc_cli_conf_link(struct smc_link *link,
return 0;
}
+static void smc_llc_save_add_link_rkeys(struct smc_link *link,
+ struct smc_link *link_new)
+{
+ struct smc_llc_msg_add_link_v2_ext *ext;
+ struct smc_link_group *lgr = link->lgr;
+ int max, i;
+
+ ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
+ SMC_WR_TX_SIZE);
+ max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
+ mutex_lock(&lgr->rmbs_lock);
+ for (i = 0; i < max; i++) {
+ smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
+ ext->rt[i].rmb_key,
+ ext->rt[i].rmb_vaddr_new,
+ ext->rt[i].rmb_key_new);
+ }
+ mutex_unlock(&lgr->rmbs_lock);
+}
+
static void smc_llc_save_add_link_info(struct smc_link *link,
struct smc_llc_msg_add_link *add_llc)
{
@@ -835,31 +1018,47 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
struct smc_link_group *lgr = smc_get_lgr(link);
+ struct smc_init_info *ini = NULL;
struct smc_link *lnk_new = NULL;
- struct smc_init_info ini;
int lnk_idx, rc = 0;
if (!llc->qp_mtu)
goto out_reject;
- ini.vlan_id = lgr->vlan_id;
- smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
+ ini = kzalloc(sizeof(*ini), GFP_KERNEL);
+ if (!ini) {
+ rc = -ENOMEM;
+ goto out_reject;
+ }
+
+ ini->vlan_id = lgr->vlan_id;
+ if (lgr->smc_version == SMC_V2) {
+ ini->check_smcrv2 = true;
+ ini->smcrv2.saddr = lgr->saddr;
+ ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid);
+ }
+ smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
- !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) {
- if (!ini.ib_dev)
+ (lgr->smc_version == SMC_V2 ||
+ !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) {
+ if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2)
goto out_reject;
lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
}
- if (!ini.ib_dev) {
+ if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
- ini.ib_dev = link->smcibdev;
- ini.ib_port = link->ibport;
+ ini->smcrv2.ib_dev_v2 = link->smcibdev;
+ ini->smcrv2.ib_port_v2 = link->ibport;
+ } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
+ lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
+ ini->ib_dev = link->smcibdev;
+ ini->ib_port = link->ibport;
}
lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
if (lnk_idx < 0)
goto out_reject;
lnk_new = &lgr->lnk[lnk_idx];
- rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini);
+ rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini);
if (rc)
goto out_reject;
smc_llc_save_add_link_info(lnk_new, llc);
@@ -875,16 +1074,20 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
goto out_clear_lnk;
rc = smc_llc_send_add_link(link,
- lnk_new->smcibdev->mac[ini.ib_port - 1],
+ lnk_new->smcibdev->mac[lnk_new->ibport - 1],
lnk_new->gid, lnk_new, SMC_LLC_RESP);
if (rc)
goto out_clear_lnk;
- rc = smc_llc_cli_rkey_exchange(link, lnk_new);
- if (rc) {
- rc = 0;
- goto out_clear_lnk;
+ if (lgr->smc_version == SMC_V2) {
+ smc_llc_save_add_link_rkeys(link, lnk_new);
+ } else {
+ rc = smc_llc_cli_rkey_exchange(link, lnk_new);
+ if (rc) {
+ rc = 0;
+ goto out_clear_lnk;
+ }
}
- rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t);
+ rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t);
if (!rc)
goto out;
out_clear_lnk:
@@ -893,29 +1096,78 @@ out_clear_lnk:
out_reject:
smc_llc_cli_add_link_reject(qentry);
out:
+ kfree(ini);
kfree(qentry);
return rc;
}
+static void smc_llc_send_request_add_link(struct smc_link *link)
+{
+ struct smc_llc_msg_req_add_link_v2 *llc;
+ struct smc_wr_tx_pend_priv *pend;
+ struct smc_wr_v2_buf *wr_buf;
+ struct smc_gidlist gidlist;
+ int rc, len, i;
+
+ if (!smc_wr_tx_link_hold(link))
+ return;
+ if (link->lgr->type == SMC_LGR_SYMMETRIC ||
+ link->lgr->type == SMC_LGR_ASYMMETRIC_PEER)
+ goto put_out;
+
+ smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid);
+ if (gidlist.len <= 1)
+ goto put_out;
+
+ rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
+ if (rc)
+ goto put_out;
+ llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf;
+ memset(llc, 0, SMC_WR_TX_SIZE);
+
+ llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK;
+ for (i = 0; i < gidlist.len; i++)
+ memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0]));
+ llc->gid_cnt = gidlist.len;
+ len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0]));
+ smc_llc_init_msg_hdr(&llc->hd, link->lgr, len);
+ rc = smc_wr_tx_v2_send(link, pend, len);
+ if (!rc)
+ /* set REQ_ADD_LINK flow and wait for response from peer */
+ link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK;
+put_out:
+ smc_wr_tx_link_put(link);
+}
+
/* as an SMC client, invite server to start the add_link processing */
static void smc_llc_cli_add_link_invite(struct smc_link *link,
struct smc_llc_qentry *qentry)
{
struct smc_link_group *lgr = smc_get_lgr(link);
- struct smc_init_info ini;
+ struct smc_init_info *ini = NULL;
+
+ if (lgr->smc_version == SMC_V2) {
+ smc_llc_send_request_add_link(link);
+ goto out;
+ }
if (lgr->type == SMC_LGR_SYMMETRIC ||
lgr->type == SMC_LGR_ASYMMETRIC_PEER)
goto out;
- ini.vlan_id = lgr->vlan_id;
- smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
- if (!ini.ib_dev)
+ ini = kzalloc(sizeof(*ini), GFP_KERNEL);
+ if (!ini)
goto out;
- smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1],
- ini.ib_gid, NULL, SMC_LLC_REQ);
+ ini->vlan_id = lgr->vlan_id;
+ smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
+ if (!ini->ib_dev)
+ goto out;
+
+ smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1],
+ ini->ib_gid, NULL, SMC_LLC_REQ);
out:
+ kfree(ini);
kfree(qentry);
}
@@ -931,7 +1183,7 @@ static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
{
- if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
+ if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK &&
smc_llc_is_empty_llc_message(llc))
return true;
return false;
@@ -1098,7 +1350,7 @@ static int smc_llc_srv_conf_link(struct smc_link *link,
/* receive CONFIRM LINK response over the RoCE fabric */
qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
if (!qentry ||
- qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
+ qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
/* send DELETE LINK */
smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
false, SMC_LLC_DEL_LOST_PATH);
@@ -1117,37 +1369,80 @@ static int smc_llc_srv_conf_link(struct smc_link *link,
return 0;
}
-int smc_llc_srv_add_link(struct smc_link *link)
+static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry)
+{
+ qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
+ smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
+ sizeof(qentry->msg));
+ memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data));
+ smc_llc_send_message(qentry->link, &qentry->msg);
+}
+
+int smc_llc_srv_add_link(struct smc_link *link,
+ struct smc_llc_qentry *req_qentry)
{
enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
struct smc_link_group *lgr = link->lgr;
struct smc_llc_msg_add_link *add_llc;
struct smc_llc_qentry *qentry = NULL;
- struct smc_link *link_new;
- struct smc_init_info ini;
+ bool send_req_add_link_resp = false;
+ struct smc_link *link_new = NULL;
+ struct smc_init_info *ini = NULL;
int lnk_idx, rc = 0;
+ if (req_qentry &&
+ req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK)
+ send_req_add_link_resp = true;
+
+ ini = kzalloc(sizeof(*ini), GFP_KERNEL);
+ if (!ini) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
/* ignore client add link recommendation, start new flow */
- ini.vlan_id = lgr->vlan_id;
- smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
- if (!ini.ib_dev) {
+ ini->vlan_id = lgr->vlan_id;
+ if (lgr->smc_version == SMC_V2) {
+ ini->check_smcrv2 = true;
+ ini->smcrv2.saddr = lgr->saddr;
+ if (send_req_add_link_resp) {
+ struct smc_llc_msg_req_add_link_v2 *req_add =
+ &req_qentry->msg.req_add_link;
+
+ ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]);
+ }
+ }
+ smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
+ if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
+ lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
+ ini->smcrv2.ib_dev_v2 = link->smcibdev;
+ ini->smcrv2.ib_port_v2 = link->ibport;
+ } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
- ini.ib_dev = link->smcibdev;
- ini.ib_port = link->ibport;
+ ini->ib_dev = link->smcibdev;
+ ini->ib_port = link->ibport;
}
lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
- if (lnk_idx < 0)
- return 0;
+ if (lnk_idx < 0) {
+ rc = 0;
+ goto out;
+ }
- rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini);
+ rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini);
if (rc)
- return rc;
+ goto out;
link_new = &lgr->lnk[lnk_idx];
+
+ rc = smcr_buf_map_lgr(link_new);
+ if (rc)
+ goto out_err;
+
rc = smc_llc_send_add_link(link,
- link_new->smcibdev->mac[ini.ib_port - 1],
+ link_new->smcibdev->mac[link_new->ibport-1],
link_new->gid, link_new, SMC_LLC_REQ);
if (rc)
goto out_err;
+ send_req_add_link_resp = false;
/* receive ADD LINK response over the RoCE fabric */
qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
if (!qentry) {
@@ -1162,7 +1457,8 @@ int smc_llc_srv_add_link(struct smc_link *link)
}
if (lgr->type == SMC_LGR_SINGLE &&
(!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
- !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) {
+ (lgr->smc_version == SMC_V2 ||
+ !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) {
lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
}
smc_llc_save_add_link_info(link_new, add_llc);
@@ -1171,39 +1467,49 @@ int smc_llc_srv_add_link(struct smc_link *link)
rc = smc_ib_ready_link(link_new);
if (rc)
goto out_err;
- rc = smcr_buf_map_lgr(link_new);
- if (rc)
- goto out_err;
rc = smcr_buf_reg_lgr(link_new);
if (rc)
goto out_err;
- rc = smc_llc_srv_rkey_exchange(link, link_new);
- if (rc)
- goto out_err;
+ if (lgr->smc_version == SMC_V2) {
+ smc_llc_save_add_link_rkeys(link, link_new);
+ } else {
+ rc = smc_llc_srv_rkey_exchange(link, link_new);
+ if (rc)
+ goto out_err;
+ }
rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t);
if (rc)
goto out_err;
+ kfree(ini);
return 0;
out_err:
- link_new->state = SMC_LNK_INACTIVE;
- smcr_link_clear(link_new, false);
+ if (link_new) {
+ link_new->state = SMC_LNK_INACTIVE;
+ smcr_link_clear(link_new, false);
+ }
+out:
+ kfree(ini);
+ if (send_req_add_link_resp)
+ smc_llc_send_req_add_link_response(req_qentry);
return rc;
}
static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
{
struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
+ struct smc_llc_qentry *qentry;
int rc;
- smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
+ qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
mutex_lock(&lgr->llc_conf_mutex);
- rc = smc_llc_srv_add_link(link);
+ rc = smc_llc_srv_add_link(link, qentry);
if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
/* delete any asymmetric link */
smc_llc_delete_asym_link(lgr);
}
mutex_unlock(&lgr->llc_conf_mutex);
+ kfree(qentry);
}
/* enqueue a local add_link req to trigger a new add_link flow */
@@ -1211,8 +1517,8 @@ void smc_llc_add_link_local(struct smc_link *link)
{
struct smc_llc_msg_add_link add_llc = {};
- add_llc.hd.length = sizeof(add_llc);
- add_llc.hd.common.type = SMC_LLC_ADD_LINK;
+ add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK;
+ smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc));
/* no dev and port needed */
smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
}
@@ -1234,7 +1540,8 @@ static void smc_llc_add_link_work(struct work_struct *work)
else
smc_llc_process_srv_add_link(lgr);
out:
- smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
+ if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK)
+ smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
}
/* enqueue a local del_link msg to trigger a new del_link flow,
@@ -1244,8 +1551,8 @@ void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id)
{
struct smc_llc_msg_del_link del_llc = {};
- del_llc.hd.length = sizeof(del_llc);
- del_llc.hd.common.type = SMC_LLC_DELETE_LINK;
+ del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
+ smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc));
del_llc.link_num = del_link_id;
del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH);
del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
@@ -1315,8 +1622,8 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
struct smc_llc_msg_del_link delllc = {};
int i;
- delllc.hd.common.type = SMC_LLC_DELETE_LINK;
- delllc.hd.length = sizeof(delllc);
+ delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
+ smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc));
if (ord)
delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
@@ -1432,6 +1739,8 @@ static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
link = qentry->link;
num_entries = llc->rtoken[0].num_rkeys;
+ if (num_entries > SMC_LLC_RKEYS_PER_MSG)
+ goto out_err;
/* first rkey entry is for receiving link */
rk_idx = smc_rtoken_add(link,
llc->rtoken[0].rmb_vaddr,
@@ -1450,6 +1759,7 @@ out_err:
llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
out:
llc->hd.flags |= SMC_LLC_FLAG_RESP;
+ smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
smc_llc_send_message(link, &qentry->msg);
smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
}
@@ -1467,6 +1777,28 @@ static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
llc = &qentry->msg.delete_rkey;
link = qentry->link;
+ if (lgr->smc_version == SMC_V2) {
+ struct smc_llc_msg_delete_rkey_v2 *llcv2;
+
+ memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc));
+ llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2;
+ llcv2->num_inval_rkeys = 0;
+
+ max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
+ for (i = 0; i < max; i++) {
+ if (smc_rtoken_delete(link, llcv2->rkey[i]))
+ llcv2->num_inval_rkeys++;
+ }
+ memset(&llc->rkey[0], 0, sizeof(llc->rkey));
+ memset(&llc->reserved2, 0, sizeof(llc->reserved2));
+ smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
+ if (llcv2->num_inval_rkeys) {
+ llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
+ llc->err_mask = llcv2->num_inval_rkeys;
+ }
+ goto finish;
+ }
+
max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
for (i = 0; i < max; i++) {
if (smc_rtoken_delete(link, llc->rkey[i]))
@@ -1476,6 +1808,7 @@ static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
llc->err_mask = err_mask;
}
+finish:
llc->hd.flags |= SMC_LLC_FLAG_RESP;
smc_llc_send_message(link, &qentry->msg);
smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
@@ -1511,7 +1844,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
if (!smc_link_usable(link))
goto out;
- switch (llc->raw.hdr.common.type) {
+ switch (llc->raw.hdr.common.llc_type) {
case SMC_LLC_TEST_LINK:
llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
smc_llc_send_message(link, llc);
@@ -1536,8 +1869,18 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
qentry);
wake_up(&lgr->llc_msg_waiter);
- } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
- qentry)) {
+ return;
+ }
+ if (lgr->llc_flow_lcl.type ==
+ SMC_LLC_FLOW_REQ_ADD_LINK) {
+ /* server started add_link processing */
+ lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
+ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
+ qentry);
+ schedule_work(&lgr->llc_add_link_work);
+ return;
+ }
+ if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
schedule_work(&lgr->llc_add_link_work);
}
} else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
@@ -1585,6 +1928,23 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
}
return;
+ case SMC_LLC_REQ_ADD_LINK:
+ /* handle response here, smc_llc_flow_stop() cannot be called
+ * in tasklet context
+ */
+ if (lgr->role == SMC_CLNT &&
+ lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK &&
+ (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) {
+ smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl);
+ } else if (lgr->role == SMC_SERV) {
+ if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
+ /* as smc server, handle client suggestion */
+ lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
+ schedule_work(&lgr->llc_add_link_work);
+ }
+ return;
+ }
+ break;
default:
smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type);
break;
@@ -1628,7 +1988,7 @@ static void smc_llc_rx_response(struct smc_link *link,
{
enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
- u8 llc_type = qentry->msg.raw.hdr.common.type;
+ u8 llc_type = qentry->msg.raw.hdr.common.llc_type;
switch (llc_type) {
case SMC_LLC_TEST_LINK:
@@ -1654,7 +2014,8 @@ static void smc_llc_rx_response(struct smc_link *link,
/* not used because max links is 3 */
break;
default:
- smc_llc_protocol_violation(link->lgr, llc_type);
+ smc_llc_protocol_violation(link->lgr,
+ qentry->msg.raw.hdr.common.type);
break;
}
kfree(qentry);
@@ -1679,7 +2040,8 @@ static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
/* process responses immediately */
- if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) {
+ if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) &&
+ llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) {
smc_llc_rx_response(link, qentry);
return;
}
@@ -1699,8 +2061,13 @@ static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
if (wc->byte_len < sizeof(*llc))
return; /* short message */
- if (llc->raw.hdr.length != sizeof(*llc))
- return; /* invalid message */
+ if (!llc->raw.hdr.common.llc_version) {
+ if (llc->raw.hdr.length != sizeof(*llc))
+ return; /* invalid message */
+ } else {
+ if (llc->raw.hdr.length_v2 < sizeof(*llc))
+ return; /* invalid message */
+ }
smc_llc_enqueue(link, llc);
}
@@ -1919,6 +2286,35 @@ static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
.handler = smc_llc_rx_handler,
.type = SMC_LLC_DELETE_RKEY
},
+ /* V2 types */
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_CONFIRM_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_TEST_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_ADD_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_DELETE_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_REQ_ADD_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_CONFIRM_RKEY_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_DELETE_RKEY_V2
+ },
{
.handler = NULL,
}
diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h
index cc00a2ec4e92..4404e52b3346 100644
--- a/net/smc/smc_llc.h
+++ b/net/smc/smc_llc.h
@@ -30,10 +30,19 @@ enum smc_llc_msg_type {
SMC_LLC_ADD_LINK = 0x02,
SMC_LLC_ADD_LINK_CONT = 0x03,
SMC_LLC_DELETE_LINK = 0x04,
+ SMC_LLC_REQ_ADD_LINK = 0x05,
SMC_LLC_CONFIRM_RKEY = 0x06,
SMC_LLC_TEST_LINK = 0x07,
SMC_LLC_CONFIRM_RKEY_CONT = 0x08,
SMC_LLC_DELETE_RKEY = 0x09,
+ /* V2 types */
+ SMC_LLC_CONFIRM_LINK_V2 = 0x21,
+ SMC_LLC_ADD_LINK_V2 = 0x22,
+ SMC_LLC_DELETE_LINK_V2 = 0x24,
+ SMC_LLC_REQ_ADD_LINK_V2 = 0x25,
+ SMC_LLC_CONFIRM_RKEY_V2 = 0x26,
+ SMC_LLC_TEST_LINK_V2 = 0x27,
+ SMC_LLC_DELETE_RKEY_V2 = 0x29,
};
#define smc_link_downing(state) \
@@ -102,7 +111,8 @@ void smc_llc_flow_qentry_del(struct smc_llc_flow *flow);
void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord,
u32 rsn);
int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry);
-int smc_llc_srv_add_link(struct smc_link *link);
+int smc_llc_srv_add_link(struct smc_link *link,
+ struct smc_llc_qentry *req_qentry);
void smc_llc_add_link_local(struct smc_link *link);
int smc_llc_init(void) __init;
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 4a964e9190b0..67e9d9fde085 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -953,6 +953,26 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
return rc;
}
+static int smc_pnet_determine_gid(struct smc_ib_device *ibdev, int i,
+ struct smc_init_info *ini)
+{
+ if (!ini->check_smcrv2 &&
+ !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->ib_gid, NULL,
+ NULL)) {
+ ini->ib_dev = ibdev;
+ ini->ib_port = i;
+ return 0;
+ }
+ if (ini->check_smcrv2 &&
+ !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->smcrv2.ib_gid_v2,
+ NULL, &ini->smcrv2)) {
+ ini->smcrv2.ib_dev_v2 = ibdev;
+ ini->smcrv2.ib_port_v2 = i;
+ return 0;
+ }
+ return -ENODEV;
+}
+
/* find a roce device for the given pnetid */
static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
struct smc_init_info *ini,
@@ -961,7 +981,6 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
struct smc_ib_device *ibdev;
int i;
- ini->ib_dev = NULL;
mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
if (ibdev == known_dev)
@@ -971,12 +990,9 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
continue;
if (smc_pnet_match(ibdev->pnetid[i - 1], pnet_id) &&
smc_ib_port_active(ibdev, i) &&
- !test_bit(i - 1, ibdev->ports_going_away) &&
- !smc_ib_determine_gid(ibdev, i, ini->vlan_id,
- ini->ib_gid, NULL)) {
- ini->ib_dev = ibdev;
- ini->ib_port = i;
- goto out;
+ !test_bit(i - 1, ibdev->ports_going_away)) {
+ if (!smc_pnet_determine_gid(ibdev, i, ini))
+ goto out;
}
}
}
@@ -1016,12 +1032,9 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
dev_put(ndev);
if (netdev == ndev &&
smc_ib_port_active(ibdev, i) &&
- !test_bit(i - 1, ibdev->ports_going_away) &&
- !smc_ib_determine_gid(ibdev, i, ini->vlan_id,
- ini->ib_gid, NULL)) {
- ini->ib_dev = ibdev;
- ini->ib_port = i;
- break;
+ !test_bit(i - 1, ibdev->ports_going_away)) {
+ if (!smc_pnet_determine_gid(ibdev, i, ini))
+ break;
}
}
}
@@ -1083,8 +1096,6 @@ void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini)
{
struct dst_entry *dst = sk_dst_get(sk);
- ini->ib_dev = NULL;
- ini->ib_port = 0;
if (!dst)
goto out;
if (!dst->dev)
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index c79361dfcdfb..738a4a99c827 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
/* Wakeup sndbuf consumers from any context (IRQ or process)
* since there is more data to transmit; usable snd_wnd as max transmit
*/
-static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
{
struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
struct smc_link *link = conn->lnk;
@@ -505,8 +505,11 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
struct smc_wr_buf *wr_buf;
int rc;
+ if (!link || !smc_wr_tx_link_hold(link))
+ return -ENOLINK;
rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);
if (rc < 0) {
+ smc_wr_tx_link_put(link);
if (rc == -EBUSY) {
struct smc_sock *smc =
container_of(conn, struct smc_sock, conn);
@@ -547,22 +550,7 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
out_unlock:
spin_unlock_bh(&conn->send_lock);
- return rc;
-}
-
-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
-{
- struct smc_link *link = conn->lnk;
- int rc = -ENOLINK;
-
- if (!link)
- return rc;
-
- atomic_inc(&link->wr_tx_refcnt);
- if (smc_link_usable(link))
- rc = _smcr_tx_sndbuf_nonempty(conn);
- if (atomic_dec_and_test(&link->wr_tx_refcnt))
- wake_up_all(&link->wr_tx_wait);
+ smc_wr_tx_link_put(link);
return rc;
}
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index a419e9af36b9..600ab5889227 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -101,19 +101,32 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
}
pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
- if (pnd_snd_idx == link->wr_tx_cnt)
- return;
- link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
- if (link->wr_tx_pends[pnd_snd_idx].compl_requested)
- complete(&link->wr_tx_compl[pnd_snd_idx]);
- memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd));
- /* clear the full struct smc_wr_tx_pend including .priv */
- memset(&link->wr_tx_pends[pnd_snd_idx], 0,
- sizeof(link->wr_tx_pends[pnd_snd_idx]));
- memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
- sizeof(link->wr_tx_bufs[pnd_snd_idx]));
- if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
- return;
+ if (pnd_snd_idx == link->wr_tx_cnt) {
+ if (link->lgr->smc_version != SMC_V2 ||
+ link->wr_tx_v2_pend->wr_id != wc->wr_id)
+ return;
+ link->wr_tx_v2_pend->wc_status = wc->status;
+ memcpy(&pnd_snd, link->wr_tx_v2_pend, sizeof(pnd_snd));
+ /* clear the full struct smc_wr_tx_pend including .priv */
+ memset(link->wr_tx_v2_pend, 0,
+ sizeof(*link->wr_tx_v2_pend));
+ memset(link->lgr->wr_tx_buf_v2, 0,
+ sizeof(*link->lgr->wr_tx_buf_v2));
+ } else {
+ link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
+ if (link->wr_tx_pends[pnd_snd_idx].compl_requested)
+ complete(&link->wr_tx_compl[pnd_snd_idx]);
+ memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx],
+ sizeof(pnd_snd));
+ /* clear the full struct smc_wr_tx_pend including .priv */
+ memset(&link->wr_tx_pends[pnd_snd_idx], 0,
+ sizeof(link->wr_tx_pends[pnd_snd_idx]));
+ memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
+ sizeof(link->wr_tx_bufs[pnd_snd_idx]));
+ if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
+ return;
+ }
+
if (wc->status) {
for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
/* clear full struct smc_wr_tx_pend including .priv */
@@ -123,6 +136,12 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
sizeof(link->wr_tx_bufs[i]));
clear_bit(i, link->wr_tx_mask);
}
+ if (link->lgr->smc_version == SMC_V2) {
+ memset(link->wr_tx_v2_pend, 0,
+ sizeof(*link->wr_tx_v2_pend));
+ memset(link->lgr->wr_tx_buf_v2, 0,
+ sizeof(*link->lgr->wr_tx_buf_v2));
+ }
/* terminate link */
smcr_link_down_cond_sched(link);
}
@@ -239,6 +258,33 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
return 0;
}
+int smc_wr_tx_get_v2_slot(struct smc_link *link,
+ smc_wr_tx_handler handler,
+ struct smc_wr_v2_buf **wr_buf,
+ struct smc_wr_tx_pend_priv **wr_pend_priv)
+{
+ struct smc_wr_tx_pend *wr_pend;
+ struct ib_send_wr *wr_ib;
+ u64 wr_id;
+
+ if (link->wr_tx_v2_pend->idx == link->wr_tx_cnt)
+ return -EBUSY;
+
+ *wr_buf = NULL;
+ *wr_pend_priv = NULL;
+ wr_id = smc_wr_tx_get_next_wr_id(link);
+ wr_pend = link->wr_tx_v2_pend;
+ wr_pend->wr_id = wr_id;
+ wr_pend->handler = handler;
+ wr_pend->link = link;
+ wr_pend->idx = link->wr_tx_cnt;
+ wr_ib = link->wr_tx_v2_ib;
+ wr_ib->wr_id = wr_id;
+ *wr_buf = link->lgr->wr_tx_buf_v2;
+ *wr_pend_priv = &wr_pend->priv;
+ return 0;
+}
+
int smc_wr_tx_put_slot(struct smc_link *link,
struct smc_wr_tx_pend_priv *wr_pend_priv)
{
@@ -256,6 +302,14 @@ int smc_wr_tx_put_slot(struct smc_link *link,
test_and_clear_bit(idx, link->wr_tx_mask);
wake_up(&link->wr_tx_wait);
return 1;
+ } else if (link->lgr->smc_version == SMC_V2 &&
+ pend->idx == link->wr_tx_cnt) {
+ /* Large v2 buffer */
+ memset(&link->wr_tx_v2_pend, 0,
+ sizeof(link->wr_tx_v2_pend));
+ memset(&link->lgr->wr_tx_buf_v2, 0,
+ sizeof(link->lgr->wr_tx_buf_v2));
+ return 1;
}
return 0;
@@ -280,6 +334,22 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
return rc;
}
+int smc_wr_tx_v2_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
+ int len)
+{
+ int rc;
+
+ link->wr_tx_v2_ib->sg_list[0].length = len;
+ ib_req_notify_cq(link->smcibdev->roce_cq_send,
+ IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
+ rc = ib_post_send(link->roce_qp, link->wr_tx_v2_ib, NULL);
+ if (rc) {
+ smc_wr_tx_put_slot(link, priv);
+ smcr_link_down_cond_sched(link);
+ }
+ return rc;
+}
+
/* Send prepared WR slot via ib_post_send and wait for send completion
* notification.
* @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
@@ -517,6 +587,7 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk)
static void smc_wr_init_sge(struct smc_link *lnk)
{
+ int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
u32 i;
for (i = 0; i < lnk->wr_tx_cnt; i++) {
@@ -545,14 +616,44 @@ static void smc_wr_init_sge(struct smc_link *lnk)
lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
}
+
+ if (lnk->lgr->smc_version == SMC_V2) {
+ lnk->wr_tx_v2_sge->addr = lnk->wr_tx_v2_dma_addr;
+ lnk->wr_tx_v2_sge->length = SMC_WR_BUF_V2_SIZE;
+ lnk->wr_tx_v2_sge->lkey = lnk->roce_pd->local_dma_lkey;
+
+ lnk->wr_tx_v2_ib->next = NULL;
+ lnk->wr_tx_v2_ib->sg_list = lnk->wr_tx_v2_sge;
+ lnk->wr_tx_v2_ib->num_sge = 1;
+ lnk->wr_tx_v2_ib->opcode = IB_WR_SEND;
+ lnk->wr_tx_v2_ib->send_flags =
+ IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+ }
+
+ /* With SMC-Rv2 there can be messages larger than SMC_WR_TX_SIZE.
+ * Each ib_recv_wr gets 2 sges, the second one is a spillover buffer
+ * and the same buffer for all sges. When a larger message arrived then
+ * the content of the first small sge is copied to the beginning of
+ * the larger spillover buffer, allowing easy data mapping.
+ */
for (i = 0; i < lnk->wr_rx_cnt; i++) {
- lnk->wr_rx_sges[i].addr =
+ int x = i * sges_per_buf;
+
+ lnk->wr_rx_sges[x].addr =
lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
- lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE;
- lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
+ lnk->wr_rx_sges[x].length = SMC_WR_TX_SIZE;
+ lnk->wr_rx_sges[x].lkey = lnk->roce_pd->local_dma_lkey;
+ if (lnk->lgr->smc_version == SMC_V2) {
+ lnk->wr_rx_sges[x + 1].addr =
+ lnk->wr_rx_v2_dma_addr + SMC_WR_TX_SIZE;
+ lnk->wr_rx_sges[x + 1].length =
+ SMC_WR_BUF_V2_SIZE - SMC_WR_TX_SIZE;
+ lnk->wr_rx_sges[x + 1].lkey =
+ lnk->roce_pd->local_dma_lkey;
+ }
lnk->wr_rx_ibs[i].next = NULL;
- lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i];
- lnk->wr_rx_ibs[i].num_sge = 1;
+ lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[x];
+ lnk->wr_rx_ibs[i].num_sge = sges_per_buf;
}
lnk->wr_reg.wr.next = NULL;
lnk->wr_reg.wr.num_sge = 0;
@@ -585,16 +686,45 @@ void smc_wr_free_link(struct smc_link *lnk)
DMA_FROM_DEVICE);
lnk->wr_rx_dma_addr = 0;
}
+ if (lnk->wr_rx_v2_dma_addr) {
+ ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,
+ SMC_WR_BUF_V2_SIZE,
+ DMA_FROM_DEVICE);
+ lnk->wr_rx_v2_dma_addr = 0;
+ }
if (lnk->wr_tx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
DMA_TO_DEVICE);
lnk->wr_tx_dma_addr = 0;
}
+ if (lnk->wr_tx_v2_dma_addr) {
+ ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr,
+ SMC_WR_BUF_V2_SIZE,
+ DMA_TO_DEVICE);
+ lnk->wr_tx_v2_dma_addr = 0;
+ }
+}
+
+void smc_wr_free_lgr_mem(struct smc_link_group *lgr)
+{
+ if (lgr->smc_version < SMC_V2)
+ return;
+
+ kfree(lgr->wr_rx_buf_v2);
+ lgr->wr_rx_buf_v2 = NULL;
+ kfree(lgr->wr_tx_buf_v2);
+ lgr->wr_tx_buf_v2 = NULL;
}
void smc_wr_free_link_mem(struct smc_link *lnk)
{
+ kfree(lnk->wr_tx_v2_ib);
+ lnk->wr_tx_v2_ib = NULL;
+ kfree(lnk->wr_tx_v2_sge);
+ lnk->wr_tx_v2_sge = NULL;
+ kfree(lnk->wr_tx_v2_pend);
+ lnk->wr_tx_v2_pend = NULL;
kfree(lnk->wr_tx_compl);
lnk->wr_tx_compl = NULL;
kfree(lnk->wr_tx_pends);
@@ -619,8 +749,26 @@ void smc_wr_free_link_mem(struct smc_link *lnk)
lnk->wr_rx_bufs = NULL;
}
+int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr)
+{
+ if (lgr->smc_version < SMC_V2)
+ return 0;
+
+ lgr->wr_rx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL);
+ if (!lgr->wr_rx_buf_v2)
+ return -ENOMEM;
+ lgr->wr_tx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL);
+ if (!lgr->wr_tx_buf_v2) {
+ kfree(lgr->wr_rx_buf_v2);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
int smc_wr_alloc_link_mem(struct smc_link *link)
{
+ int sges_per_buf = link->lgr->smc_version == SMC_V2 ? 2 : 1;
+
/* allocate link related memory */
link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
if (!link->wr_tx_bufs)
@@ -653,7 +801,7 @@ int smc_wr_alloc_link_mem(struct smc_link *link)
if (!link->wr_tx_sges)
goto no_mem_wr_tx_rdma_sges;
link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
- sizeof(link->wr_rx_sges[0]),
+ sizeof(link->wr_rx_sges[0]) * sges_per_buf,
GFP_KERNEL);
if (!link->wr_rx_sges)
goto no_mem_wr_tx_sges;
@@ -672,8 +820,29 @@ int smc_wr_alloc_link_mem(struct smc_link *link)
GFP_KERNEL);
if (!link->wr_tx_compl)
goto no_mem_wr_tx_pends;
+
+ if (link->lgr->smc_version == SMC_V2) {
+ link->wr_tx_v2_ib = kzalloc(sizeof(*link->wr_tx_v2_ib),
+ GFP_KERNEL);
+ if (!link->wr_tx_v2_ib)
+ goto no_mem_tx_compl;
+ link->wr_tx_v2_sge = kzalloc(sizeof(*link->wr_tx_v2_sge),
+ GFP_KERNEL);
+ if (!link->wr_tx_v2_sge)
+ goto no_mem_v2_ib;
+ link->wr_tx_v2_pend = kzalloc(sizeof(*link->wr_tx_v2_pend),
+ GFP_KERNEL);
+ if (!link->wr_tx_v2_pend)
+ goto no_mem_v2_sge;
+ }
return 0;
+no_mem_v2_sge:
+ kfree(link->wr_tx_v2_sge);
+no_mem_v2_ib:
+ kfree(link->wr_tx_v2_ib);
+no_mem_tx_compl:
+ kfree(link->wr_tx_compl);
no_mem_wr_tx_pends:
kfree(link->wr_tx_pends);
no_mem_wr_tx_mask:
@@ -725,6 +894,24 @@ int smc_wr_create_link(struct smc_link *lnk)
rc = -EIO;
goto out;
}
+ if (lnk->lgr->smc_version == SMC_V2) {
+ lnk->wr_rx_v2_dma_addr = ib_dma_map_single(ibdev,
+ lnk->lgr->wr_rx_buf_v2, SMC_WR_BUF_V2_SIZE,
+ DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ibdev, lnk->wr_rx_v2_dma_addr)) {
+ lnk->wr_rx_v2_dma_addr = 0;
+ rc = -EIO;
+ goto dma_unmap;
+ }
+ lnk->wr_tx_v2_dma_addr = ib_dma_map_single(ibdev,
+ lnk->lgr->wr_tx_buf_v2, SMC_WR_BUF_V2_SIZE,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ibdev, lnk->wr_tx_v2_dma_addr)) {
+ lnk->wr_tx_v2_dma_addr = 0;
+ rc = -EIO;
+ goto dma_unmap;
+ }
+ }
lnk->wr_tx_dma_addr = ib_dma_map_single(
ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
DMA_TO_DEVICE);
@@ -742,6 +929,18 @@ int smc_wr_create_link(struct smc_link *lnk)
return rc;
dma_unmap:
+ if (lnk->wr_rx_v2_dma_addr) {
+ ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,
+ SMC_WR_BUF_V2_SIZE,
+ DMA_FROM_DEVICE);
+ lnk->wr_rx_v2_dma_addr = 0;
+ }
+ if (lnk->wr_tx_v2_dma_addr) {
+ ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr,
+ SMC_WR_BUF_V2_SIZE,
+ DMA_TO_DEVICE);
+ lnk->wr_tx_v2_dma_addr = 0;
+ }
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
DMA_FROM_DEVICE);
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 423b8709f1c9..f353311e6f84 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -60,6 +60,20 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
atomic_long_set(wr_tx_id, val);
}
+static inline bool smc_wr_tx_link_hold(struct smc_link *link)
+{
+ if (!smc_link_usable(link))
+ return false;
+ atomic_inc(&link->wr_tx_refcnt);
+ return true;
+}
+
+static inline void smc_wr_tx_link_put(struct smc_link *link)
+{
+ if (atomic_dec_and_test(&link->wr_tx_refcnt))
+ wake_up_all(&link->wr_tx_wait);
+}
+
static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
{
wake_up_all(&lnk->wr_tx_wait);
@@ -87,8 +101,10 @@ static inline int smc_wr_rx_post(struct smc_link *link)
int smc_wr_create_link(struct smc_link *lnk);
int smc_wr_alloc_link_mem(struct smc_link *lnk);
+int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr);
void smc_wr_free_link(struct smc_link *lnk);
void smc_wr_free_link_mem(struct smc_link *lnk);
+void smc_wr_free_lgr_mem(struct smc_link_group *lgr);
void smc_wr_remember_qp_attr(struct smc_link *lnk);
void smc_wr_remove_dev(struct smc_ib_device *smcibdev);
void smc_wr_add_dev(struct smc_ib_device *smcibdev);
@@ -97,10 +113,16 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
struct smc_wr_buf **wr_buf,
struct smc_rdma_wr **wrs,
struct smc_wr_tx_pend_priv **wr_pend_priv);
+int smc_wr_tx_get_v2_slot(struct smc_link *link,
+ smc_wr_tx_handler handler,
+ struct smc_wr_v2_buf **wr_buf,
+ struct smc_wr_tx_pend_priv **wr_pend_priv);
int smc_wr_tx_put_slot(struct smc_link *link,
struct smc_wr_tx_pend_priv *wr_pend_priv);
int smc_wr_tx_send(struct smc_link *link,
struct smc_wr_tx_pend_priv *wr_pend_priv);
+int smc_wr_tx_v2_send(struct smc_link *link,
+ struct smc_wr_tx_pend_priv *priv, int len);
int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
unsigned long timeout);
void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 3e776e3dff91..1f2817195549 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -645,7 +645,7 @@ static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci,
}
__set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
goto ok;
- } else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) {
+ } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
goto toolow;
}
if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 443f8e5b9477..60bc74b76adc 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -462,7 +462,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
b->bcast_addr.media_id = b->media->type_id;
b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
b->mtu = dev->mtu;
- b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
+ b->media->raw2addr(b, &b->addr, (const char *)dev->dev_addr);
rcu_assign_pointer(dev->tipc_ptr, b);
return 0;
}
@@ -703,7 +703,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
break;
case NETDEV_CHANGEADDR:
b->media->raw2addr(b, &b->addr,
- (char *)dev->dev_addr);
+ (const char *)dev->dev_addr);
tipc_reset_bearer(net, b);
break;
case NETDEV_UNREGISTER:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 57c6a1a719e2..490ad6e5f7a3 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -117,7 +117,7 @@ struct tipc_media {
char *msg);
int (*raw2addr)(struct tipc_bearer *b,
struct tipc_media_addr *addr,
- char *raw);
+ const char *raw);
u32 priority;
u32 tolerance;
u32 min_win;
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index c68019697cfe..cb0d185e06af 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -60,7 +60,7 @@ static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
/* Convert raw mac address format to media addr format */
static int tipc_eth_raw2addr(struct tipc_bearer *b,
struct tipc_media_addr *addr,
- char *msg)
+ const char *msg)
{
memset(addr, 0, sizeof(*addr));
ether_addr_copy(addr->value, msg);
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index 7aa9ff88458d..b9ad0434c3cd 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -67,7 +67,7 @@ static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
/* Convert raw InfiniBand address format to media addr format */
static int tipc_ib_raw2addr(struct tipc_bearer *b,
struct tipc_media_addr *addr,
- char *msg)
+ const char *msg)
{
memset(addr, 0, sizeof(*addr));
memcpy(addr->value, msg, INFINIBAND_ALEN);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 989d1423a245..4147bb2e7057 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -498,9 +498,15 @@ static int tls_do_encryption(struct sock *sk,
int rc, iv_offset = 0;
/* For CCM based ciphers, first byte of IV is a constant */
- if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
}
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
@@ -1457,10 +1463,16 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
aad = (u8 *)(sgout + n_sgout);
iv = aad + prot->aad_size;
- /* For CCM based ciphers, first byte of nonce+iv is always '2' */
- if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
- iv[0] = 2;
+ /* For CCM based ciphers, first byte of nonce+iv is a constant */
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
+ iv[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
}
/* Prepare IV */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 92345c9bb60c..89f9e85ae970 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -608,20 +608,42 @@ static void unix_release_sock(struct sock *sk, int embrion)
static void init_peercred(struct sock *sk)
{
- put_pid(sk->sk_peer_pid);
- if (sk->sk_peer_cred)
- put_cred(sk->sk_peer_cred);
+ const struct cred *old_cred;
+ struct pid *old_pid;
+
+ spin_lock(&sk->sk_peer_lock);
+ old_pid = sk->sk_peer_pid;
+ old_cred = sk->sk_peer_cred;
sk->sk_peer_pid = get_pid(task_tgid(current));
sk->sk_peer_cred = get_current_cred();
+ spin_unlock(&sk->sk_peer_lock);
+
+ put_pid(old_pid);
+ put_cred(old_cred);
}
static void copy_peercred(struct sock *sk, struct sock *peersk)
{
- put_pid(sk->sk_peer_pid);
- if (sk->sk_peer_cred)
- put_cred(sk->sk_peer_cred);
+ const struct cred *old_cred;
+ struct pid *old_pid;
+
+ if (sk < peersk) {
+ spin_lock(&sk->sk_peer_lock);
+ spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock(&peersk->sk_peer_lock);
+ spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+ }
+ old_pid = sk->sk_peer_pid;
+ old_cred = sk->sk_peer_cred;
sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+
+ spin_unlock(&sk->sk_peer_lock);
+ spin_unlock(&peersk->sk_peer_lock);
+
+ put_pid(old_pid);
+ put_cred(old_cred);
}
static int unix_listen(struct socket *sock, int backlog)
@@ -806,7 +828,7 @@ static void unix_unhash(struct sock *sk)
}
struct proto unix_dgram_proto = {
- .name = "UNIX-DGRAM",
+ .name = "UNIX",
.owner = THIS_MODULE,
.obj_size = sizeof(struct unix_sock),
.close = unix_close,
@@ -828,20 +850,25 @@ struct proto unix_stream_proto = {
static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
{
- struct sock *sk = NULL;
struct unix_sock *u;
+ struct sock *sk;
+ int err;
atomic_long_inc(&unix_nr_socks);
- if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
- goto out;
+ if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
+ err = -ENFILE;
+ goto err;
+ }
if (type == SOCK_STREAM)
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
else /*dgram and seqpacket */
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
- if (!sk)
- goto out;
+ if (!sk) {
+ err = -ENOMEM;
+ goto err;
+ }
sock_init_data(sock, sk);
@@ -861,20 +888,23 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
memset(&u->scm_stat, 0, sizeof(struct scm_stat));
unix_insert_socket(unix_sockets_unbound(sk), sk);
-out:
- if (sk == NULL)
- atomic_long_dec(&unix_nr_socks);
- else {
- local_bh_disable();
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- local_bh_enable();
- }
+
+ local_bh_disable();
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ local_bh_enable();
+
return sk;
+
+err:
+ atomic_long_dec(&unix_nr_socks);
+ return ERR_PTR(err);
}
static int unix_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
+ struct sock *sk;
+
if (protocol && protocol != PF_UNIX)
return -EPROTONOSUPPORT;
@@ -901,7 +931,11 @@ static int unix_create(struct net *net, struct socket *sock, int protocol,
return -ESOCKTNOSUPPORT;
}
- return unix_create1(net, sock, kern, sock->type) ? 0 : -ENOMEM;
+ sk = unix_create1(net, sock, kern, sock->type);
+ if (IS_ERR(sk))
+ return PTR_ERR(sk);
+
+ return 0;
}
static int unix_release(struct socket *sock)
@@ -1314,12 +1348,15 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
we will have to recheck all again in any case.
*/
- err = -ENOMEM;
-
/* create new sock for complete connection */
newsk = unix_create1(sock_net(sk), NULL, 0, sock->type);
- if (newsk == NULL)
+ if (IS_ERR(newsk)) {
+ err = PTR_ERR(newsk);
+ newsk = NULL;
goto out;
+ }
+
+ err = -ENOMEM;
/* Allocate skb for sending to listening sock */
skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
@@ -2845,6 +2882,9 @@ static int unix_shutdown(struct socket *sock, int mode)
unix_state_lock(sk);
sk->sk_shutdown |= mode;
+ if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
+ mode == SHUTDOWN_MASK)
+ sk->sk_state = TCP_CLOSE;
other = unix_peer(sk);
if (other)
sock_hold(other);
@@ -2867,12 +2907,10 @@ static int unix_shutdown(struct socket *sock, int mode)
other->sk_shutdown |= peer_mode;
unix_state_unlock(other);
other->sk_state_change(other);
- if (peer_mode == SHUTDOWN_MASK) {
+ if (peer_mode == SHUTDOWN_MASK)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
- other->sk_state = TCP_CLOSE;
- } else if (peer_mode & RCV_SHUTDOWN) {
+ else if (peer_mode & RCV_SHUTDOWN)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
- }
}
if (other)
sock_put(other);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index e2c0cfb334d2..7d851eb3a683 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1614,13 +1614,18 @@ static int vsock_connectible_setsockopt(struct socket *sock,
vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
break;
- case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
- struct __kernel_old_timeval tv;
- COPY_IN(tv);
+ case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
+ case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: {
+ struct __kernel_sock_timeval tv;
+
+ err = sock_copy_user_timeval(&tv, optval, optlen,
+ optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
+ if (err)
+ break;
if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
vsk->connect_timeout = tv.tv_sec * HZ +
- DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
+ DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ));
if (vsk->connect_timeout == 0)
vsk->connect_timeout =
VSOCK_DEFAULT_CONNECT_TIMEOUT;
@@ -1648,68 +1653,59 @@ static int vsock_connectible_getsockopt(struct socket *sock,
char __user *optval,
int __user *optlen)
{
- int err;
+ struct sock *sk = sock->sk;
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ union {
+ u64 val64;
+ struct old_timeval32 tm32;
+ struct __kernel_old_timeval tm;
+ struct __kernel_sock_timeval stm;
+ } v;
+
+ int lv = sizeof(v.val64);
int len;
- struct sock *sk;
- struct vsock_sock *vsk;
- u64 val;
if (level != AF_VSOCK)
return -ENOPROTOOPT;
- err = get_user(len, optlen);
- if (err != 0)
- return err;
-
-#define COPY_OUT(_v) \
- do { \
- if (len < sizeof(_v)) \
- return -EINVAL; \
- \
- len = sizeof(_v); \
- if (copy_to_user(optval, &_v, len) != 0) \
- return -EFAULT; \
- \
- } while (0)
+ if (get_user(len, optlen))
+ return -EFAULT;
- err = 0;
- sk = sock->sk;
- vsk = vsock_sk(sk);
+ memset(&v, 0, sizeof(v));
switch (optname) {
case SO_VM_SOCKETS_BUFFER_SIZE:
- val = vsk->buffer_size;
- COPY_OUT(val);
+ v.val64 = vsk->buffer_size;
break;
case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
- val = vsk->buffer_max_size;
- COPY_OUT(val);
+ v.val64 = vsk->buffer_max_size;
break;
case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
- val = vsk->buffer_min_size;
- COPY_OUT(val);
+ v.val64 = vsk->buffer_min_size;
break;
- case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
- struct __kernel_old_timeval tv;
- tv.tv_sec = vsk->connect_timeout / HZ;
- tv.tv_usec =
- (vsk->connect_timeout -
- tv.tv_sec * HZ) * (1000000 / HZ);
- COPY_OUT(tv);
+ case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
+ case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD:
+ lv = sock_get_timeout(vsk->connect_timeout, &v,
+ optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
break;
- }
+
default:
return -ENOPROTOOPT;
}
- err = put_user(len, optlen);
- if (err != 0)
+ if (len < lv)
+ return -EINVAL;
+ if (len > lv)
+ len = lv;
+ if (copy_to_user(optval, &v, len))
return -EFAULT;
-#undef COPY_OUT
+ if (put_user(len, optlen))
+ return -EFAULT;
return 0;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index d6b500dc4208..f16074eb53c7 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -134,21 +134,6 @@ int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
return 0;
}
-void xp_release(struct xdp_buff_xsk *xskb)
-{
- xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
-}
-
-static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
-{
- u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
-
- offset += xskb->pool->headroom;
- if (!xskb->pool->unaligned)
- return xskb->orig_addr + offset;
- return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
-}
-
static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 8de01aaac4a0..90c4e1e819d3 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -44,12 +44,13 @@ void xp_destroy(struct xsk_buff_pool *pool)
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem)
{
+ bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
struct xsk_buff_pool *pool;
struct xdp_buff_xsk *xskb;
- u32 i;
+ u32 i, entries;
- pool = kvzalloc(struct_size(pool, free_heads, umem->chunks),
- GFP_KERNEL);
+ entries = unaligned ? umem->chunks : 0;
+ pool = kvzalloc(struct_size(pool, free_heads, entries), GFP_KERNEL);
if (!pool)
goto out;
@@ -63,7 +64,8 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
pool->free_heads_cnt = umem->chunks;
pool->headroom = umem->headroom;
pool->chunk_size = umem->chunk_size;
- pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
+ pool->chunk_shift = ffs(umem->chunk_size) - 1;
+ pool->unaligned = unaligned;
pool->frame_len = umem->chunk_size - umem->headroom -
XDP_PACKET_HEADROOM;
pool->umem = umem;
@@ -81,7 +83,10 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
xskb = &pool->heads[i];
xskb->pool = pool;
xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
- pool->free_heads[i] = xskb;
+ if (pool->unaligned)
+ pool->free_heads[i] = xskb;
+ else
+ xp_init_xskb_addr(xskb, pool, i * pool->chunk_size);
}
return pool;
@@ -406,6 +411,12 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
if (pool->unaligned)
xp_check_dma_contiguity(dma_map);
+ else
+ for (i = 0; i < pool->heads_cnt; i++) {
+ struct xdp_buff_xsk *xskb = &pool->heads[i];
+
+ xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
+ }
err = xp_init_dma_info(pool, dma_map);
if (err) {
@@ -448,12 +459,9 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
if (pool->free_heads_cnt == 0)
return NULL;
- xskb = pool->free_heads[--pool->free_heads_cnt];
-
for (;;) {
if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
pool->fq->queue_empty_descs++;
- xp_release(xskb);
return NULL;
}
@@ -466,17 +474,17 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
}
break;
}
- xskq_cons_release(pool->fq);
- xskb->orig_addr = addr;
- xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
- if (pool->dma_pages_cnt) {
- xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] &
- ~XSK_NEXT_PG_CONTIG_MASK) +
- (addr & ~PAGE_MASK);
- xskb->dma = xskb->frame_dma + pool->headroom +
- XDP_PACKET_HEADROOM;
+ if (pool->unaligned) {
+ xskb = pool->free_heads[--pool->free_heads_cnt];
+ xp_init_xskb_addr(xskb, pool, addr);
+ if (pool->dma_pages_cnt)
+ xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
+ } else {
+ xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
}
+
+ xskq_cons_release(pool->fq);
return xskb;
}
@@ -507,6 +515,96 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
}
EXPORT_SYMBOL(xp_alloc);
+static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ u32 i, cached_cons, nb_entries;
+
+ if (max > pool->free_heads_cnt)
+ max = pool->free_heads_cnt;
+ max = xskq_cons_nb_entries(pool->fq, max);
+
+ cached_cons = pool->fq->cached_cons;
+ nb_entries = max;
+ i = max;
+ while (i--) {
+ struct xdp_buff_xsk *xskb;
+ u64 addr;
+ bool ok;
+
+ __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr);
+
+ ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
+ xp_check_aligned(pool, &addr);
+ if (unlikely(!ok)) {
+ pool->fq->invalid_descs++;
+ nb_entries--;
+ continue;
+ }
+
+ if (pool->unaligned) {
+ xskb = pool->free_heads[--pool->free_heads_cnt];
+ xp_init_xskb_addr(xskb, pool, addr);
+ if (pool->dma_pages_cnt)
+ xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
+ } else {
+ xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
+ }
+
+ *xdp = &xskb->xdp;
+ xdp++;
+ }
+
+ xskq_cons_release_n(pool->fq, max);
+ return nb_entries;
+}
+
+static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries)
+{
+ struct xdp_buff_xsk *xskb;
+ u32 i;
+
+ nb_entries = min_t(u32, nb_entries, pool->free_list_cnt);
+
+ i = nb_entries;
+ while (i--) {
+ xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
+ list_del(&xskb->free_list_node);
+
+ *xdp = &xskb->xdp;
+ xdp++;
+ }
+ pool->free_list_cnt -= nb_entries;
+
+ return nb_entries;
+}
+
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ u32 nb_entries1 = 0, nb_entries2;
+
+ if (unlikely(pool->dma_need_sync)) {
+ /* Slow path */
+ *xdp = xp_alloc(pool);
+ return !!*xdp;
+ }
+
+ if (unlikely(pool->free_list_cnt)) {
+ nb_entries1 = xp_alloc_reused(pool, xdp, max);
+ if (nb_entries1 == max)
+ return nb_entries1;
+
+ max -= nb_entries1;
+ xdp += nb_entries1;
+ }
+
+ nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max);
+ if (!nb_entries2)
+ pool->fq->queue_empty_descs++;
+
+ return nb_entries1 + nb_entries2;
+}
+EXPORT_SYMBOL(xp_alloc_batch);
+
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
if (pool->free_list_cnt >= count)
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 9ae13cccfb28..e9aa2c236356 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -111,14 +111,18 @@ struct xsk_queue {
/* Functions that read and validate content from consumer rings. */
-static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
+static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
{
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
+ u32 idx = cached_cons & q->ring_mask;
- if (q->cached_cons != q->cached_prod) {
- u32 idx = q->cached_cons & q->ring_mask;
+ *addr = ring->desc[idx];
+}
- *addr = ring->desc[idx];
+static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
+{
+ if (q->cached_cons != q->cached_prod) {
+ __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
return true;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 03b66d154b2b..3a3cb09eec12 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1961,24 +1961,65 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
return skb;
}
+static int xfrm_notify_userpolicy(struct net *net)
+{
+ struct xfrm_userpolicy_default *up;
+ int len = NLMSG_ALIGN(sizeof(*up));
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ int err;
+
+ skb = nlmsg_new(len, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
+ if (nlh == NULL) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ up = nlmsg_data(nlh);
+ up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+ XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+ up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+ XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+ up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+ XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+
+ nlmsg_end(skb, nlh);
+
+ rcu_read_lock();
+ err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
+ rcu_read_unlock();
+
+ return err;
+}
+
static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
- u8 dirmask;
- u8 old_default = net->xfrm.policy_default;
- if (up->dirmask >= XFRM_USERPOLICY_DIRMASK_MAX)
- return -EINVAL;
+ if (up->in == XFRM_USERPOLICY_BLOCK)
+ net->xfrm.policy_default |= XFRM_POL_DEFAULT_IN;
+ else if (up->in == XFRM_USERPOLICY_ACCEPT)
+ net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_IN;
- dirmask = (1 << up->dirmask) & XFRM_POL_DEFAULT_MASK;
+ if (up->fwd == XFRM_USERPOLICY_BLOCK)
+ net->xfrm.policy_default |= XFRM_POL_DEFAULT_FWD;
+ else if (up->fwd == XFRM_USERPOLICY_ACCEPT)
+ net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_FWD;
- net->xfrm.policy_default = (old_default & (0xff ^ dirmask))
- | (up->action << up->dirmask);
+ if (up->out == XFRM_USERPOLICY_BLOCK)
+ net->xfrm.policy_default |= XFRM_POL_DEFAULT_OUT;
+ else if (up->out == XFRM_USERPOLICY_ACCEPT)
+ net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_OUT;
rt_genid_bump_all(net);
+ xfrm_notify_userpolicy(net);
return 0;
}
@@ -1988,13 +2029,11 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *r_skb;
struct nlmsghdr *r_nlh;
struct net *net = sock_net(skb->sk);
- struct xfrm_userpolicy_default *r_up, *up;
+ struct xfrm_userpolicy_default *r_up;
int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
u32 portid = NETLINK_CB(skb).portid;
u32 seq = nlh->nlmsg_seq;
- up = nlmsg_data(nlh);
-
r_skb = nlmsg_new(len, GFP_ATOMIC);
if (!r_skb)
return -ENOMEM;
@@ -2007,8 +2046,12 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
r_up = nlmsg_data(r_nlh);
- r_up->action = ((net->xfrm.policy_default & (1 << up->dirmask)) >> up->dirmask);
- r_up->dirmask = up->dirmask;
+ r_up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+ XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+ r_up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+ XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+ r_up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+ XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
nlmsg_end(r_skb, r_nlh);
return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4dc20be5fb96..5fd48a8d4f10 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -322,17 +322,11 @@ $(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
-include $(BPF_SAMPLES_PATH)/Makefile.target
-VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
- $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
- ../../../../vmlinux \
- /sys/kernel/btf/vmlinux \
- /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF_PATHS ?= $(abspath $(if $(O),$(O)/vmlinux)) \
+ $(abspath $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)) \
+ $(abspath ./vmlinux)
VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
-ifeq ($(VMLINUX_BTF),)
-$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
-endif
-
$(obj)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL)
ifeq ($(VMLINUX_H),)
$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
@@ -340,6 +334,11 @@ else
$(Q)cp "$(VMLINUX_H)" $@
endif
+ifeq ($(VMLINUX_BTF),)
+ $(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)",\
+ build the kernel or set VMLINUX_BTF variable)
+endif
+
clean-files += vmlinux.h
# Get Clang's default includes on this system, as opposed to those seen by
diff --git a/samples/bpf/bpf_insn.h b/samples/bpf/bpf_insn.h
index aee04534483a..29c3bb6ad1cd 100644
--- a/samples/bpf/bpf_insn.h
+++ b/samples/bpf/bpf_insn.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* eBPF instruction mini library */
#ifndef __BPF_INSN_H
#define __BPF_INSN_H
diff --git a/samples/bpf/xdp_redirect_map_multi.bpf.c b/samples/bpf/xdp_redirect_map_multi.bpf.c
index 8f59d430cb64..bb0a5a3bfcf0 100644
--- a/samples/bpf/xdp_redirect_map_multi.bpf.c
+++ b/samples/bpf/xdp_redirect_map_multi.bpf.c
@@ -5,11 +5,6 @@
#include "xdp_sample.bpf.h"
#include "xdp_sample_shared.h"
-enum {
- BPF_F_BROADCAST = (1ULL << 3),
- BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
-};
-
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
__uint(key_size, sizeof(int));
diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
index b5f03cb17a3c..cfaf7e50e431 100644
--- a/samples/bpf/xdp_router_ipv4_user.c
+++ b/samples/bpf/xdp_router_ipv4_user.c
@@ -155,7 +155,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
printf("%d\n", nh->nlmsg_type);
memset(&route, 0, sizeof(route));
- printf("Destination\t\tGateway\t\tGenmask\t\tMetric\t\tIface\n");
+ printf("Destination Gateway Genmask Metric Iface\n");
for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) {
rt_msg = (struct rtmsg *)NLMSG_DATA(nh);
rtm_family = rt_msg->rtm_family;
@@ -207,6 +207,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
int metric;
__be32 gw;
} *prefix_value;
+ struct in_addr dst_addr, gw_addr, mask_addr;
prefix_key = alloca(sizeof(*prefix_key) + 3);
prefix_value = alloca(sizeof(*prefix_value));
@@ -234,14 +235,17 @@ static void read_route(struct nlmsghdr *nh, int nll)
for (i = 0; i < 4; i++)
prefix_key->data[i] = (route.dst >> i * 8) & 0xff;
- printf("%3d.%d.%d.%d\t\t%3x\t\t%d\t\t%d\t\t%s\n",
- (int)prefix_key->data[0],
- (int)prefix_key->data[1],
- (int)prefix_key->data[2],
- (int)prefix_key->data[3],
- route.gw, route.dst_len,
+ dst_addr.s_addr = route.dst;
+ printf("%-16s", inet_ntoa(dst_addr));
+
+ gw_addr.s_addr = route.gw;
+ printf("%-16s", inet_ntoa(gw_addr));
+
+ mask_addr.s_addr = htonl(~(0xffffffffU >> route.dst_len));
+ printf("%-16s%-7d%s\n", inet_ntoa(mask_addr),
route.metric,
route.iface_name);
+
if (bpf_map_lookup_elem(lpm_map_fd, prefix_key,
prefix_value) < 0) {
for (i = 0; i < 4; i++)
@@ -393,8 +397,12 @@ static void read_arp(struct nlmsghdr *nh, int nll)
if (nh->nlmsg_type == RTM_GETNEIGH)
printf("READING arp entry\n");
- printf("Address\tHwAddress\n");
+ printf("Address HwAddress\n");
for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) {
+ struct in_addr dst_addr;
+ char mac_str[18];
+ int len = 0, i;
+
rt_msg = (struct ndmsg *)NLMSG_DATA(nh);
rt_attr = (struct rtattr *)RTM_RTA(rt_msg);
ndm_family = rt_msg->ndm_family;
@@ -415,7 +423,14 @@ static void read_arp(struct nlmsghdr *nh, int nll)
}
arp_entry.dst = atoi(dsts);
arp_entry.mac = atol(mac);
- printf("%x\t\t%llx\n", arp_entry.dst, arp_entry.mac);
+
+ dst_addr.s_addr = arp_entry.dst;
+ for (i = 0; i < 6; i++)
+ len += snprintf(mac_str + len, 18 - len, "%02llx%s",
+ ((arp_entry.mac >> i * 8) & 0xff),
+ i < 5 ? ":" : "");
+ printf("%-16s%s\n", inet_ntoa(dst_addr), mac_str);
+
if (ndm_family == AF_INET) {
if (bpf_map_lookup_elem(exact_match_map_fd,
&arp_entry.dst,
@@ -672,7 +687,7 @@ int main(int ac, char **argv)
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
return 1;
- printf("\n**************loading bpf file*********************\n\n\n");
+ printf("\n******************loading bpf file*********************\n");
if (!prog_fd) {
printf("bpf_prog_load_xattr: %s\n", strerror(errno));
return 1;
@@ -722,9 +737,9 @@ int main(int ac, char **argv)
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
- printf("*******************ROUTE TABLE*************************\n\n\n");
+ printf("\n*******************ROUTE TABLE*************************\n");
get_route_table(AF_INET);
- printf("*******************ARP TABLE***************************\n\n\n");
+ printf("\n*******************ARP TABLE***************************\n");
get_arp_table(AF_INET);
if (monitor_route() < 0) {
printf("Error in receiving route update");
diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang
index 4cce8fd0779c..51fc23e2e9e5 100644
--- a/scripts/Makefile.clang
+++ b/scripts/Makefile.clang
@@ -29,7 +29,12 @@ CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
else
CLANG_FLAGS += -fintegrated-as
endif
+# By default, clang only warns when it encounters an unknown warning flag or
+# certain optimization flags it knows it has not implemented.
+# Make it behave more like gcc by erroring when these flags are encountered
+# so they can be implemented or wrapped in cc-option.
CLANG_FLAGS += -Werror=unknown-warning-option
+CLANG_FLAGS += -Werror=ignored-optimization-argument
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 952e46876329..4aad28480035 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) \
+= -fplugin-arg-structleak_plugin-byref
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \
+= -fplugin-arg-structleak_plugin-byref-all
+ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
+ DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
+endif
+export DISABLE_STRUCTLEAK_PLUGIN
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
+= -DSTRUCTLEAK_PLUGIN
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 801c415bac59..b9e94c5e7097 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -33,10 +33,11 @@ else
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
$(call cc-param,asan-globals=1) \
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
- $(call cc-param,asan-stack=$(stack_enable)) \
$(call cc-param,asan-instrument-allocas=1)
endif
+CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable))
+
endif # CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_SW_TAGS
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index eef56d629799..48585c4d04ad 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -13,7 +13,7 @@
# Stage 2 is handled by this file and does the following
# 1) Find all modules listed in modules.order
# 2) modpost is then used to
-# 3) create one <module>.mod.c file pr. module
+# 3) create one <module>.mod.c file per module
# 4) create one Module.symvers file with CRC for all exported symbols
# Step 3 is used to place certain information in the module's ELF
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index b9b0f15e5880..217d21abc86e 100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -34,7 +34,6 @@ REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
-REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$")
REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
@@ -102,6 +101,9 @@ def parse_options():
"continue.")
if args.commit:
+ if args.commit.startswith('HEAD'):
+ sys.exit("The --commit option can't use the HEAD ref")
+
args.find = False
if args.ignore:
@@ -432,7 +434,6 @@ def parse_kconfig_file(kfile):
lines = []
defined = []
references = []
- skip = False
if not os.path.exists(kfile):
return defined, references
@@ -448,12 +449,6 @@ def parse_kconfig_file(kfile):
if REGEX_KCONFIG_DEF.match(line):
symbol_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(symbol_def[0])
- skip = False
- elif REGEX_KCONFIG_HELP.match(line):
- skip = True
- elif skip:
- # ignore content of help messages
- pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
symbols = get_symbols_in_line(line)
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index fd9777f63f14..9dbab13329fa 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -82,10 +82,8 @@ cat << EOF
#define __IGNORE_truncate64
#define __IGNORE_stat64
#define __IGNORE_lstat64
-#define __IGNORE_fstat64
#define __IGNORE_fcntl64
#define __IGNORE_fadvise64_64
-#define __IGNORE_fstatat64
#define __IGNORE_fstatfs64
#define __IGNORE_statfs64
#define __IGNORE_llseek
@@ -253,6 +251,10 @@ cat << EOF
#define __IGNORE_getpmsg
#define __IGNORE_putpmsg
#define __IGNORE_vserver
+
+/* 64-bit ports never needed these, and new 32-bit ports can use statx */
+#define __IGNORE_fstat64
+#define __IGNORE_fstatat64
EOF
}
diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
index 0033eedce003..1d1bde1fd45e 100755
--- a/scripts/clang-tools/gen_compile_commands.py
+++ b/scripts/clang-tools/gen_compile_commands.py
@@ -13,6 +13,7 @@ import logging
import os
import re
import subprocess
+import sys
_DEFAULT_OUTPUT = 'compile_commands.json'
_DEFAULT_LOG_LEVEL = 'WARNING'
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 8f6b13ae46bf..7d631aaa0ae1 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -189,7 +189,7 @@ if ($arch =~ /(x86(_64)?)|(i386)/) {
$local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)";
$weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)";
$section_regex = "Disassembly of section\\s+(\\S+):";
-$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
+$function_regex = "^([0-9a-fA-F]+)\\s+<([^^]*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$";
$section_type = '@progbits';
$mcount_adjust = 0;
diff --git a/scripts/sorttable.c b/scripts/sorttable.c
index f355869c65cd..6ee4fa882919 100644
--- a/scripts/sorttable.c
+++ b/scripts/sorttable.c
@@ -54,6 +54,10 @@
#define EM_ARCV2 195
#endif
+#ifndef EM_RISCV
+#define EM_RISCV 243
+#endif
+
static uint32_t (*r)(const uint32_t *);
static uint16_t (*r2)(const uint16_t *);
static uint64_t (*r8)(const uint64_t *);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index e3d79a7b6db6..b5d5333ab330 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -918,6 +918,13 @@ void key_change_session_keyring(struct callback_head *twork)
return;
}
+ /* If get_ucounts fails more bits are needed in the refcount */
+ if (unlikely(!get_ucounts(old->ucounts))) {
+ WARN_ONCE(1, "In %s get_ucounts failed\n", __func__);
+ put_cred(new);
+ return;
+ }
+
new-> uid = old-> uid;
new-> euid = old-> euid;
new-> suid = old-> suid;
@@ -927,6 +934,7 @@ void key_change_session_keyring(struct callback_head *twork)
new-> sgid = old-> sgid;
new->fsgid = old->fsgid;
new->user = get_uid(old->user);
+ new->ucounts = old->ucounts;
new->user_ns = get_user_ns(old->user_ns);
new->group_info = get_group_info(old->group_info);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6517f221d52c..e7ebd45ca345 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2157,7 +2157,7 @@ static int selinux_ptrace_access_check(struct task_struct *child,
static int selinux_ptrace_traceme(struct task_struct *parent)
{
return avc_has_perm(&selinux_state,
- task_sid_subj(parent), task_sid_obj(current),
+ task_sid_obj(parent), task_sid_obj(current),
SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
}
@@ -6222,7 +6222,7 @@ static int selinux_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *m
struct ipc_security_struct *isec;
struct msg_security_struct *msec;
struct common_audit_data ad;
- u32 sid = task_sid_subj(target);
+ u32 sid = task_sid_obj(target);
int rc;
isec = selinux_ipc(msq);
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index d59276f48d4f..94ea2a8b2bb7 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -126,6 +126,8 @@ static const struct nlmsg_perm nlmsg_xfrm_perms[] =
{ XFRM_MSG_NEWSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_GETSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_READ },
{ XFRM_MSG_MAPPING, NETLINK_XFRM_SOCKET__NLMSG_READ },
+ { XFRM_MSG_SETDEFAULT, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+ { XFRM_MSG_GETDEFAULT, NETLINK_XFRM_SOCKET__NLMSG_READ },
};
static const struct nlmsg_perm nlmsg_audit_perms[] =
@@ -189,7 +191,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
* structures at the top of this file with the new mappings
* before updating the BUILD_BUG_ON() macro!
*/
- BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING);
+ BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_GETDEFAULT);
err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
sizeof(nlmsg_xfrm_perms));
break;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index cacbe7518519..21a0e7c3b8de 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -2016,7 +2016,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
const char *caller)
{
struct smk_audit_info ad;
- struct smack_known *skp = smk_of_task_struct_subj(p);
+ struct smack_known *skp = smk_of_task_struct_obj(p);
int rc;
smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
@@ -3480,7 +3480,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
static int smack_getprocattr(struct task_struct *p, char *name, char **value)
{
- struct smack_known *skp = smk_of_task_struct_subj(p);
+ struct smack_known *skp = smk_of_task_struct_obj(p);
char *cp;
int slen;
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index a59de24695ec..dfe5a64e19d2 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -468,6 +468,76 @@ static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
}
#endif /* CONFIG_X86_X32 */
+#ifdef __BIG_ENDIAN
+typedef char __pad_before_u32[4];
+typedef char __pad_after_u32[0];
+#else
+typedef char __pad_before_u32[0];
+typedef char __pad_after_u32[4];
+#endif
+
+/* PCM 2.0.15 API definition had a bug in mmap control; it puts the avail_min
+ * at the wrong offset due to a typo in padding type.
+ * The bug hits only 32bit.
+ * A workaround for incorrect read/write is needed only in 32bit compat mode.
+ */
+struct __snd_pcm_mmap_control64_buggy {
+ __pad_before_u32 __pad1;
+ __u32 appl_ptr;
+ __pad_before_u32 __pad2; /* SiC! here is the bug */
+ __pad_before_u32 __pad3;
+ __u32 avail_min;
+ __pad_after_uframe __pad4;
+};
+
+static int snd_pcm_ioctl_sync_ptr_buggy(struct snd_pcm_substream *substream,
+ struct snd_pcm_sync_ptr __user *_sync_ptr)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_sync_ptr sync_ptr;
+ struct __snd_pcm_mmap_control64_buggy *sync_cp;
+ volatile struct snd_pcm_mmap_status *status;
+ volatile struct snd_pcm_mmap_control *control;
+ int err;
+
+ memset(&sync_ptr, 0, sizeof(sync_ptr));
+ sync_cp = (struct __snd_pcm_mmap_control64_buggy *)&sync_ptr.c.control;
+ if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
+ return -EFAULT;
+ if (copy_from_user(sync_cp, &(_sync_ptr->c.control), sizeof(*sync_cp)))
+ return -EFAULT;
+ status = runtime->status;
+ control = runtime->control;
+ if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+ err = snd_pcm_hwsync(substream);
+ if (err < 0)
+ return err;
+ }
+ snd_pcm_stream_lock_irq(substream);
+ if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
+ err = pcm_lib_apply_appl_ptr(substream, sync_cp->appl_ptr);
+ if (err < 0) {
+ snd_pcm_stream_unlock_irq(substream);
+ return err;
+ }
+ } else {
+ sync_cp->appl_ptr = control->appl_ptr;
+ }
+ if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+ control->avail_min = sync_cp->avail_min;
+ else
+ sync_cp->avail_min = control->avail_min;
+ sync_ptr.s.status.state = status->state;
+ sync_ptr.s.status.hw_ptr = status->hw_ptr;
+ sync_ptr.s.status.tstamp = status->tstamp;
+ sync_ptr.s.status.suspended_state = status->suspended_state;
+ sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
+ snd_pcm_stream_unlock_irq(substream);
+ if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
+ return -EFAULT;
+ return 0;
+}
+
/*
*/
enum {
@@ -537,7 +607,7 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
if (in_x32_syscall())
return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
#endif /* CONFIG_X86_X32 */
- return snd_pcm_common_ioctl(file, substream, cmd, argp);
+ return snd_pcm_ioctl_sync_ptr_buggy(substream, argp);
case SNDRV_PCM_IOCTL_HW_REFINE32:
return snd_pcm_ioctl_hw_params_compat(substream, 1, argp);
case SNDRV_PCM_IOCTL_HW_PARAMS32:
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 6c0a4a67ad2e..6f30231bdb88 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -873,12 +873,21 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
return -EINVAL;
}
}
+ case SNDRV_RAWMIDI_IOCTL_USER_PVERSION:
+ if (get_user(rfile->user_pversion, (unsigned int __user *)arg))
+ return -EFAULT;
+ return 0;
+
case SNDRV_RAWMIDI_IOCTL_PARAMS:
{
struct snd_rawmidi_params params;
if (copy_from_user(&params, argp, sizeof(struct snd_rawmidi_params)))
return -EFAULT;
+ if (rfile->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 2)) {
+ params.mode = 0;
+ memset(params.reserved, 0, sizeof(params.reserved));
+ }
switch (params.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
if (rfile->output == NULL)
diff --git a/sound/core/seq_device.c b/sound/core/seq_device.c
index 382275c5b193..7f3fd8eb016f 100644
--- a/sound/core/seq_device.c
+++ b/sound/core/seq_device.c
@@ -156,6 +156,8 @@ static int snd_seq_device_dev_free(struct snd_device *device)
struct snd_seq_device *dev = device->device_data;
cancel_autoload_drivers();
+ if (dev->private_free)
+ dev->private_free(dev);
put_device(&dev->dev);
return 0;
}
@@ -183,11 +185,7 @@ static int snd_seq_device_dev_disconnect(struct snd_device *device)
static void snd_seq_dev_release(struct device *dev)
{
- struct snd_seq_device *sdev = to_seq_dev(dev);
-
- if (sdev->private_free)
- sdev->private_free(sdev);
- kfree(sdev);
+ kfree(to_seq_dev(dev));
}
/*
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index ed40d0f7432c..773db4bf0876 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -143,7 +143,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle)
if (pointer_update)
pcsp_pointer_update(chip);
- hrtimer_forward(handle, hrtimer_get_expires(handle), ns_to_ktime(ns));
+ hrtimer_forward_now(handle, ns_to_ktime(ns));
return HRTIMER_RESTART;
}
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index 5388b85fb60e..a18c2c033e83 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -276,10 +276,11 @@ static void __maybe_unused copy_message(u64 *frames, __be32 *buffer,
/* This is just for v2/v3 protocol. */
for (i = 0; i < data_blocks; ++i) {
- *frames = (be32_to_cpu(buffer[1]) << 16) |
- (be32_to_cpu(buffer[2]) >> 16);
+ *frames = be32_to_cpu(buffer[1]);
+ *frames <<= 16;
+ *frames |= be32_to_cpu(buffer[2]) >> 16;
+ ++frames;
buffer += data_block_quadlets;
- frames++;
}
}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index cb5b5e3a481b..daf731364695 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -184,13 +184,16 @@ static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id
model = val;
}
- /*
- * Mackie Onyx Satellite with base station has a quirk to report a wrong
- * value in 'dbs' field of CIP header against its format information.
- */
- if (vendor == VENDOR_LOUD && model == MODEL_SATELLITE)
+ if (vendor == VENDOR_LOUD) {
+ // Mackie Onyx Satellite with base station has a quirk to report a wrong
+ // value in 'dbs' field of CIP header against its format information.
oxfw->quirks |= SND_OXFW_QUIRK_WRONG_DBS;
+ // OXFW971-based models may transfer events by blocking method.
+ if (!(oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD))
+ oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
+ }
+
return 0;
}
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 062da7a7a586..f7bd6e2db085 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -421,8 +421,9 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
if (!full_reset)
goto skip_reset;
- /* clear STATESTS */
- snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
+ /* clear STATESTS if not in reset */
+ if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)
+ snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
/* reset controller */
snd_hdac_bus_enter_link_reset(bus);
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 2523b23389e9..1c8bffc3eec6 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -298,29 +298,31 @@ int snd_hda_codec_configure(struct hda_codec *codec)
{
int err;
+ if (codec->configured)
+ return 0;
+
if (is_generic_config(codec))
codec->probe_id = HDA_CODEC_ID_GENERIC;
else
codec->probe_id = 0;
- err = snd_hdac_device_register(&codec->core);
- if (err < 0)
- return err;
+ if (!device_is_registered(&codec->core.dev)) {
+ err = snd_hdac_device_register(&codec->core);
+ if (err < 0)
+ return err;
+ }
if (!codec->preset)
codec_bind_module(codec);
if (!codec->preset) {
err = codec_bind_generic(codec);
if (err < 0) {
- codec_err(codec, "Unable to bind the codec\n");
- goto error;
+ codec_dbg(codec, "Unable to bind the codec\n");
+ return err;
}
}
+ codec->configured = 1;
return 0;
-
- error:
- snd_hdac_device_unregister(&codec->core);
- return err;
}
EXPORT_SYMBOL_GPL(snd_hda_codec_configure);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a9ebefd60cf6..0c4a337c9fc0 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -791,6 +791,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
snd_array_free(&codec->nids);
remove_conn_list(codec);
snd_hdac_regmap_exit(&codec->core);
+ codec->configured = 0;
}
EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 7cd452831fd3..930ae4002a81 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -25,6 +25,7 @@
#include <sound/core.h>
#include <sound/initval.h>
#include "hda_controller.h"
+#include "hda_local.h"
#define CREATE_TRACE_POINTS
#include "hda_controller_trace.h"
@@ -1248,17 +1249,24 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
int azx_codec_configure(struct azx *chip)
{
struct hda_codec *codec, *next;
+ int success = 0;
- /* use _safe version here since snd_hda_codec_configure() deregisters
- * the device upon error and deletes itself from the bus list.
- */
- list_for_each_codec_safe(codec, next, &chip->bus) {
- snd_hda_codec_configure(codec);
+ list_for_each_codec(codec, &chip->bus) {
+ if (!snd_hda_codec_configure(codec))
+ success++;
}
- if (!azx_bus(chip)->num_codecs)
- return -ENODEV;
- return 0;
+ if (success) {
+ /* unregister failed codecs if any codec has been probed */
+ list_for_each_codec_safe(codec, next, &chip->bus) {
+ if (!codec->configured) {
+ codec_err(codec, "Unable to configure, disabling\n");
+ snd_hdac_device_unregister(&codec->core);
+ }
+ }
+ }
+
+ return success ? 0 : -ENODEV;
}
EXPORT_SYMBOL_GPL(azx_codec_configure);
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index 3062f87380b1..f5bf295eb830 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -41,7 +41,7 @@
/* 24 unused */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
-/* 27 unused */
+#define AZX_DCAPS_RETRY_PROBE (1 << 27) /* retry probe if no codec is configured */
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 3aa432d814a2..4d22e7adeee8 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -307,7 +307,8 @@ enum {
/* quirks for AMD SB */
#define AZX_DCAPS_PRESET_AMD_SB \
(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_AMD_WORKAROUND |\
- AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
+ AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME |\
+ AZX_DCAPS_RETRY_PROBE)
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
@@ -883,10 +884,11 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev)
return azx_get_pos_posbuf(chip, azx_dev);
}
-static void azx_shutdown_chip(struct azx *chip)
+static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset)
{
azx_stop_chip(chip);
- azx_enter_link_reset(chip);
+ if (!skip_link_reset)
+ azx_enter_link_reset(chip);
azx_clear_irq_pending(chip);
display_power(chip, false);
}
@@ -895,6 +897,11 @@ static void azx_shutdown_chip(struct azx *chip)
static DEFINE_MUTEX(card_list_lock);
static LIST_HEAD(card_list);
+static void azx_shutdown_chip(struct azx *chip)
+{
+ __azx_shutdown_chip(chip, false);
+}
+
static void azx_add_card_list(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1717,7 +1724,7 @@ static void azx_check_snoop_available(struct azx *chip)
static void azx_probe_work(struct work_struct *work)
{
- struct hda_intel *hda = container_of(work, struct hda_intel, probe_work);
+ struct hda_intel *hda = container_of(work, struct hda_intel, probe_work.work);
azx_probe_continue(&hda->chip);
}
@@ -1822,7 +1829,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
}
/* continue probing in work context as may trigger request module */
- INIT_WORK(&hda->probe_work, azx_probe_work);
+ INIT_DELAYED_WORK(&hda->probe_work, azx_probe_work);
*rchip = chip;
@@ -2136,7 +2143,7 @@ static int azx_probe(struct pci_dev *pci,
#endif
if (schedule_probe)
- schedule_work(&hda->probe_work);
+ schedule_delayed_work(&hda->probe_work, 0);
dev++;
if (chip->disabled)
@@ -2222,6 +2229,11 @@ static int azx_probe_continue(struct azx *chip)
int dev = chip->dev_index;
int err;
+ if (chip->disabled || hda->init_failed)
+ return -EIO;
+ if (hda->probe_retry)
+ goto probe_retry;
+
to_hda_bus(bus)->bus_probing = 1;
hda->probe_continued = 1;
@@ -2283,10 +2295,20 @@ static int azx_probe_continue(struct azx *chip)
#endif
}
#endif
+
+ probe_retry:
if (bus->codec_mask && !(probe_only[dev] & 1)) {
err = azx_codec_configure(chip);
- if (err < 0)
+ if (err) {
+ if ((chip->driver_caps & AZX_DCAPS_RETRY_PROBE) &&
+ ++hda->probe_retry < 60) {
+ schedule_delayed_work(&hda->probe_work,
+ msecs_to_jiffies(1000));
+ return 0; /* keep things up */
+ }
+ dev_err(chip->card->dev, "Cannot probe codecs, giving up\n");
goto out_free;
+ }
}
err = snd_card_register(chip->card);
@@ -2316,6 +2338,7 @@ out_free:
display_power(chip, false);
complete_all(&hda->probe_wait);
to_hda_bus(bus)->bus_probing = 0;
+ hda->probe_retry = 0;
return 0;
}
@@ -2341,7 +2364,7 @@ static void azx_remove(struct pci_dev *pci)
* device during cancel_work_sync() call.
*/
device_unlock(&pci->dev);
- cancel_work_sync(&hda->probe_work);
+ cancel_delayed_work_sync(&hda->probe_work);
device_lock(&pci->dev);
snd_card_free(card);
@@ -2357,7 +2380,7 @@ static void azx_shutdown(struct pci_dev *pci)
return;
chip = card->private_data;
if (chip && chip->running)
- azx_shutdown_chip(chip);
+ __azx_shutdown_chip(chip, true);
}
/* PCI IDs */
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index 3fb119f09040..0f39418f9328 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -14,7 +14,7 @@ struct hda_intel {
/* sync probing */
struct completion probe_wait;
- struct work_struct probe_work;
+ struct delayed_work probe_work;
/* card list (for power_save trigger) */
struct list_head list;
@@ -30,6 +30,8 @@ struct hda_intel {
unsigned int freed:1; /* resources already released */
bool need_i915_power:1; /* the hda controller needs i915 power */
+
+ int probe_retry; /* being probe-retry */
};
#endif
diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
index 3c7ef55d016e..31ff11ab868e 100644
--- a/sound/pci/hda/patch_cs8409.c
+++ b/sound/pci/hda/patch_cs8409.c
@@ -1207,6 +1207,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
snd_hda_jack_add_kctl(codec, DOLPHIN_LO_PIN_NID, "Line Out", true,
SND_JACK_HEADPHONE, NULL);
+ snd_hda_jack_add_kctl(codec, DOLPHIN_AMIC_PIN_NID, "Microphone", true,
+ SND_JACK_MICROPHONE, NULL);
+
cs8409_fix_caps(codec, DOLPHIN_HP_PIN_NID);
cs8409_fix_caps(codec, DOLPHIN_LO_PIN_NID);
cs8409_fix_caps(codec, DOLPHIN_AMIC_PIN_NID);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8b7a389b6aed..965b096f416f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -526,6 +526,8 @@ static void alc_shutup_pins(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
+ case 0x10ec0256:
case 0x10ec0283:
case 0x10ec0286:
case 0x10ec0288:
@@ -2533,11 +2535,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
- SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED),
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
@@ -3528,7 +3532,8 @@ static void alc256_shutup(struct hda_codec *codec)
/* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
* when booting with headset plugged. So skip setting it for the codec alc257
*/
- if (codec->core.vendor_id != 0x10ec0257)
+ if (spec->codec_variant != ALC269_TYPE_ALC257 &&
+ spec->codec_variant != ALC269_TYPE_ALC256)
alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
if (!spec->no_shutup_pins)
@@ -6401,6 +6406,44 @@ static void alc_fixup_no_int_mic(struct hda_codec *codec,
}
}
+/* GPIO1 = amplifier on/off
+ * GPIO3 = mic mute LED
+ */
+static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static const hda_nid_t conn[] = { 0x02 };
+
+ struct alc_spec *spec = codec->spec;
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x14, 0x90170110 }, /* front/high speakers */
+ { 0x17, 0x90170130 }, /* back/bass speakers */
+ { }
+ };
+
+ //enable micmute led
+ alc_fixup_hp_gpio_led(codec, action, 0x00, 0x04);
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ spec->micmute_led_polarity = 1;
+ /* needed for amp of back speakers */
+ spec->gpio_mask |= 0x01;
+ spec->gpio_dir |= 0x01;
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ /* share DAC to have unified volume control */
+ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn);
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ /* need to toggle GPIO to enable the amp of back speakers */
+ alc_update_gpio_data(codec, 0x01, true);
+ msleep(100);
+ alc_update_gpio_data(codec, 0x01, false);
+ break;
+ }
+}
+
static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -6429,12 +6472,44 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
hda_fixup_thinkpad_acpi(codec, fix, action);
}
+/* Fixup for Lenovo Legion 15IMHg05 speaker output on headset removal. */
+static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ spec->gen.suppress_auto_mute = 1;
+ break;
+ }
+}
+
/* for alc295_fixup_hp_top_speakers */
#include "hp_x360_helper.c"
/* for alc285_fixup_ideapad_s740_coef() */
#include "ideapad_s740_helper.c"
+static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ /*
+ * A certain other OS sets these coeffs to different values. On at least one TongFang
+ * barebone these settings might survive even a cold reboot. So to restore a clean slate the
+ * values are explicitly reset to default here. Without this, the external microphone is
+ * always in a plugged-in state, while the internal microphone is always in an unplugged
+ * state, breaking the ability to use the internal microphone.
+ */
+ alc_write_coef_idx(codec, 0x24, 0x0000);
+ alc_write_coef_idx(codec, 0x26, 0x0000);
+ alc_write_coef_idx(codec, 0x29, 0x3000);
+ alc_write_coef_idx(codec, 0x37, 0xfe05);
+ alc_write_coef_idx(codec, 0x45, 0x5089);
+}
+
enum {
ALC269_FIXUP_GPIO2,
ALC269_FIXUP_SONY_VAIO,
@@ -6521,6 +6596,7 @@ enum {
ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
ALC280_FIXUP_HP_9480M,
ALC245_FIXUP_HP_X360_AMP,
+ ALC285_FIXUP_HP_SPECTRE_X360_EB1,
ALC288_FIXUP_DELL_HEADSET_MODE,
ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC288_FIXUP_DELL_XPS_13,
@@ -6646,6 +6722,11 @@ enum {
ALC623_FIXUP_LENOVO_THINKSTATION_P340,
ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
+ ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
+ ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+ ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
+ ALC287_FIXUP_13S_GEN2_SPEAKERS,
+ ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8209,6 +8290,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_hp_spectre_x360,
},
+ [ALC285_FIXUP_HP_SPECTRE_X360_EB1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_hp_spectre_x360_eb1
+ },
[ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_ideapad_s740_coef,
@@ -8236,6 +8321,117 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
},
+ [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = {
+ .type = HDA_FIXUP_VERBS,
+ //.v.verbs = legion_15imhg05_coefs,
+ .v.verbs = (const struct hda_verb[]) {
+ // set left speaker Legion 7i.
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ // set right speaker Legion 7i.
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+ },
+ [ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_legion_15imhg05_speakers,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
+ },
+ [ALC287_FIXUP_YOGA7_14ITL_SPEAKERS] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ // set left speaker Yoga 7i.
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ // set right speaker Yoga 7i.
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x46 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
+ },
+ [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
+ },
+ [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8327,6 +8523,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8429,6 +8628,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
@@ -8630,6 +8831,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
+ SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8660,6 +8865,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -8845,6 +9051,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+ {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
@@ -10037,6 +10244,9 @@ enum {
ALC671_FIXUP_HP_HEADSET_MIC2,
ALC662_FIXUP_ACER_X2660G_HEADSET_MODE,
ALC662_FIXUP_ACER_NITRO_HEADSET_MODE,
+ ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
+ ALC668_FIXUP_HEADSET_MIC,
+ ALC668_FIXUP_MIC_DET_COEF,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -10420,6 +10630,29 @@ static const struct hda_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC662_FIXUP_USI_FUNC
},
+ [ALC668_FIXUP_ASUS_NO_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1b, 0x04a1112c },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC668_FIXUP_HEADSET_MIC
+ },
+ [ALC668_FIXUP_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_headset_mic,
+ .chained = true,
+ .chain_id = ALC668_FIXUP_MIC_DET_COEF
+ },
+ [ALC668_FIXUP_MIC_DET_COEF] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x15 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0d60 },
+ {}
+ },
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10455,6 +10688,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+ SND_PCI_QUIRK(0x1043, 0x185d, "ASUS G551JW", ALC668_FIXUP_ASUS_NO_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index 87d24224c042..23f253effb4f 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -52,7 +52,7 @@
#define PCXHR_DSP 2
#if (PCXHR_DSP_OFFSET_MAX > PCXHR_PLX_OFFSET_MIN)
-#undef PCXHR_REG_TO_PORT(x)
+#error PCXHR_REG_TO_PORT(x)
#else
#define PCXHR_REG_TO_PORT(x) ((x)>PCXHR_DSP_OFFSET_MAX ? PCXHR_PLX : PCXHR_DSP)
#endif
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 82ee233a269d..216cea04ad70 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1583,6 +1583,7 @@ config SND_SOC_WCD938X_SDW
tristate "WCD9380/WCD9385 Codec - SDW"
select SND_SOC_WCD938X
select SND_SOC_WCD_MBHC
+ select REGMAP_IRQ
depends on SOUNDWIRE
select REGMAP_SOUNDWIRE
help
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index fb1e4c33e27d..9a463ab54bdd 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -922,7 +922,6 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
struct snd_soc_component *component = dai->component;
struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
unsigned int regval;
- u8 fullScaleVol;
int ret;
if (mute) {
@@ -993,20 +992,11 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
cs42l42->stream_use |= 1 << stream;
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- /* Read the headphone load */
- regval = snd_soc_component_read(component, CS42L42_LOAD_DET_RCSTAT);
- if (((regval & CS42L42_RLA_STAT_MASK) >> CS42L42_RLA_STAT_SHIFT) ==
- CS42L42_RLA_STAT_15_OHM) {
- fullScaleVol = CS42L42_HP_FULL_SCALE_VOL_MASK;
- } else {
- fullScaleVol = 0;
- }
-
- /* Un-mute the headphone, set the full scale volume flag */
+ /* Un-mute the headphone */
snd_soc_component_update_bits(component, CS42L42_HP_CTL,
CS42L42_HP_ANA_AMUTE_MASK |
- CS42L42_HP_ANA_BMUTE_MASK |
- CS42L42_HP_FULL_SCALE_VOL_MASK, fullScaleVol);
+ CS42L42_HP_ANA_BMUTE_MASK,
+ 0);
}
}
diff --git a/sound/soc/codecs/cs4341.c b/sound/soc/codecs/cs4341.c
index 7d3e54d8eef3..29d05e32d341 100644
--- a/sound/soc/codecs/cs4341.c
+++ b/sound/soc/codecs/cs4341.c
@@ -305,12 +305,19 @@ static int cs4341_spi_probe(struct spi_device *spi)
return cs4341_probe(&spi->dev);
}
+static const struct spi_device_id cs4341_spi_ids[] = {
+ { "cs4341a" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, cs4341_spi_ids);
+
static struct spi_driver cs4341_spi_driver = {
.driver = {
.name = "cs4341-spi",
.of_match_table = of_match_ptr(cs4341_dt_ids),
},
.probe = cs4341_spi_probe,
+ .id_table = cs4341_spi_ids,
};
#endif
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index db88be48c998..f946ef65a4c1 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -867,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work)
struct regmap *regmap = nau8824->regmap;
int adc_value, event = 0, event_mask = 0;
- snd_soc_dapm_enable_pin(dapm, "MICBIAS");
- snd_soc_dapm_enable_pin(dapm, "SAR");
+ snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
+ snd_soc_dapm_force_enable_pin(dapm, "SAR");
snd_soc_dapm_sync(dapm);
msleep(100);
diff --git a/sound/soc/codecs/pcm179x-spi.c b/sound/soc/codecs/pcm179x-spi.c
index 0a542924ec5f..ebf63ea90a1c 100644
--- a/sound/soc/codecs/pcm179x-spi.c
+++ b/sound/soc/codecs/pcm179x-spi.c
@@ -36,6 +36,7 @@ static const struct of_device_id pcm179x_of_match[] = {
MODULE_DEVICE_TABLE(of, pcm179x_of_match);
static const struct spi_device_id pcm179x_spi_ids[] = {
+ { "pcm1792a", 0 },
{ "pcm179x", 0 },
{ },
};
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 4dc844f3c1fc..60dee41816dc 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -116,6 +116,8 @@ static const struct reg_default pcm512x_reg_defaults[] = {
{ PCM512x_FS_SPEED_MODE, 0x00 },
{ PCM512x_IDAC_1, 0x01 },
{ PCM512x_IDAC_2, 0x00 },
+ { PCM512x_I2S_1, 0x02 },
+ { PCM512x_I2S_2, 0x00 },
};
static bool pcm512x_readable(struct device *dev, unsigned int reg)
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
index f0daf8defcf1..52de7d14b139 100644
--- a/sound/soc/codecs/wcd938x.c
+++ b/sound/soc/codecs/wcd938x.c
@@ -4144,10 +4144,10 @@ static int wcd938x_codec_set_jack(struct snd_soc_component *comp,
{
struct wcd938x_priv *wcd = dev_get_drvdata(comp->dev);
- if (!jack)
+ if (jack)
return wcd_mbhc_start(wcd->wcd_mbhc, &wcd->mbhc_cfg, jack);
-
- wcd_mbhc_stop(wcd->wcd_mbhc);
+ else
+ wcd_mbhc_stop(wcd->wcd_mbhc);
return 0;
}
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 9e621a254392..499604f1e178 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -742,9 +742,16 @@ static int wm8960_configure_clocking(struct snd_soc_component *component)
int i, j, k;
int ret;
- if (!(iface1 & (1<<6))) {
- dev_dbg(component->dev,
- "Codec is slave mode, no need to configure clock\n");
+ /*
+ * For Slave mode clocking should still be configured,
+ * so this if statement should be removed, but some platform
+ * may not work if the sysclk is not configured, to avoid such
+ * compatible issue, just add '!wm8960->sysclk' condition in
+ * this if statement.
+ */
+ if (!(iface1 & (1 << 6)) && !wm8960->sysclk) {
+ dev_warn(component->dev,
+ "slave mode, but proceeding with no clock configuration\n");
return 0;
}
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index a961f837cd09..bda66b30e063 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -1073,6 +1073,16 @@ static int fsl_esai_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_get_sync;
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
+ goto err_pm_get_sync;
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
&fsl_esai_dai, 1);
if (ret) {
@@ -1082,12 +1092,6 @@ static int fsl_esai_probe(struct platform_device *pdev)
INIT_WORK(&esai_priv->work, fsl_esai_hw_reset);
- ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
- if (ret) {
- dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
- goto err_pm_get_sync;
- }
-
return ret;
err_pm_get_sync:
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index 8c0c75ce9490..9f90989ac59a 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -737,18 +737,23 @@ static int fsl_micfil_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
regcache_cache_only(micfil->regmap, true);
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to pcm register\n");
+ return ret;
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,
&fsl_micfil_dai, 1);
if (ret) {
dev_err(&pdev->dev, "failed to register component %s\n",
fsl_micfil_component.name);
- return ret;
}
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
- if (ret)
- dev_err(&pdev->dev, "failed to pcm register\n");
-
return ret;
}
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 223fcd15bfcc..38f6362099d5 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -1152,11 +1152,10 @@ static int fsl_sai_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_get_sync;
- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
- &sai->cpu_dai_drv, 1);
- if (ret)
- goto err_pm_get_sync;
-
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
if (sai->soc_data->use_imx_pcm) {
ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
if (ret)
@@ -1167,6 +1166,11 @@ static int fsl_sai_probe(struct platform_device *pdev)
goto err_pm_get_sync;
}
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+ &sai->cpu_dai_drv, 1);
+ if (ret)
+ goto err_pm_get_sync;
+
return ret;
err_pm_get_sync:
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 8ffb1a6048d6..1c53719bb61e 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -1434,16 +1434,20 @@ static int fsl_spdif_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
regcache_cache_only(spdif_priv->regmap, true);
- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
- &spdif_priv->cpu_dai_drv, 1);
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
if (ret) {
- dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
goto err_pm_disable;
}
- ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
+ &spdif_priv->cpu_dai_drv, 1);
if (ret) {
- dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
+ dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
goto err_pm_disable;
}
diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
index 31c5ee641fe7..d0556c79fdb1 100644
--- a/sound/soc/fsl/fsl_xcvr.c
+++ b/sound/soc/fsl/fsl_xcvr.c
@@ -487,8 +487,9 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
return ret;
}
- /* clear DPATH RESET */
+ /* set DPATH RESET */
m_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
+ v_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, m_ctl, v_ctl);
if (ret < 0) {
dev_err(dai->dev, "Error while setting EXT_CTRL: %d\n", ret);
@@ -590,10 +591,6 @@ static void fsl_xcvr_shutdown(struct snd_pcm_substream *substream,
val |= FSL_XCVR_EXT_CTRL_CMDC_RESET(tx);
}
- /* set DPATH RESET */
- mask |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
- val |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
-
ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, mask, val);
if (ret < 0) {
dev_err(dai->dev, "Err setting DPATH RESET: %d\n", ret);
@@ -643,6 +640,16 @@ static int fsl_xcvr_trigger(struct snd_pcm_substream *substream, int cmd,
dev_err(dai->dev, "Failed to enable DMA: %d\n", ret);
return ret;
}
+
+ /* clear DPATH RESET */
+ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
+ FSL_XCVR_EXT_CTRL_DPTH_RESET(tx),
+ 0);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to clear DPATH RESET: %d\n", ret);
+ return ret;
+ }
+
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
@@ -1215,18 +1222,23 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
regcache_cache_only(xcvr->regmap, true);
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to pcm register\n");
+ return ret;
+ }
+
ret = devm_snd_soc_register_component(dev, &fsl_xcvr_comp,
&fsl_xcvr_dai, 1);
if (ret) {
dev_err(dev, "failed to register component %s\n",
fsl_xcvr_comp.name);
- return ret;
}
- ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
- if (ret)
- dev_err(dev, "failed to pcm register\n");
-
return ret;
}
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 055248f104b2..4d313d0d0f23 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -456,12 +456,12 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
static const char * const mic_name[] = { "in1", "in2" };
+ struct snd_soc_acpi_mach *mach = dev_get_platdata(dev);
struct property_entry props[MAX_NO_PROPS] = {};
struct byt_cht_es8316_private *priv;
const struct dmi_system_id *dmi_id;
- struct device *dev = &pdev->dev;
- struct snd_soc_acpi_mach *mach;
struct fwnode_handle *fwnode;
const char *platform_name;
struct acpi_device *adev;
@@ -476,7 +476,6 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- mach = dev->platform_data;
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
if (!strcmp(byt_cht_es8316_dais[i].codecs->name,
@@ -494,7 +493,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
put_device(&adev->dev);
byt_cht_es8316_dais[dai_index].codecs->name = codec_name;
} else {
- dev_err(&pdev->dev, "Error cannot find '%s' dev\n", mach->id);
+ dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
return -ENXIO;
}
@@ -533,11 +532,8 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
/* get the clock */
priv->mclk = devm_clk_get(dev, "pmc_plt_clk_3");
- if (IS_ERR(priv->mclk)) {
- ret = PTR_ERR(priv->mclk);
- dev_err(dev, "clk_get pmc_plt_clk_3 failed: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->mclk))
+ return dev_err_probe(dev, PTR_ERR(priv->mclk), "clk_get pmc_plt_clk_3 failed\n");
/* get speaker enable GPIO */
codec_dev = acpi_get_first_physical_node(adev);
@@ -567,22 +563,13 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios);
priv->speaker_en_gpio =
- gpiod_get_index(codec_dev, "speaker-enable", 0,
- /* see comment in byt_cht_es8316_resume */
- GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
-
+ gpiod_get_optional(codec_dev, "speaker-enable",
+ /* see comment in byt_cht_es8316_resume() */
+ GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(priv->speaker_en_gpio)) {
- ret = PTR_ERR(priv->speaker_en_gpio);
- switch (ret) {
- case -ENOENT:
- priv->speaker_en_gpio = NULL;
- break;
- default:
- dev_err(dev, "get speaker GPIO failed: %d\n", ret);
- fallthrough;
- case -EPROBE_DEFER:
- goto err_put_codec;
- }
+ ret = dev_err_probe(dev, PTR_ERR(priv->speaker_en_gpio),
+ "get speaker GPIO failed\n");
+ goto err_put_codec;
}
snprintf(components_string, sizeof(components_string),
@@ -597,7 +584,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
byt_cht_es8316_card.long_name = long_name;
#endif
- sof_parent = snd_soc_acpi_sof_parent(&pdev->dev);
+ sof_parent = snd_soc_acpi_sof_parent(dev);
/* set card and driver name */
if (sof_parent) {
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 6602eda89e8e..6b06248a9327 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -929,6 +929,11 @@ static int create_sdw_dailink(struct snd_soc_card *card,
cpus + *cpu_id, cpu_dai_num,
codecs, codec_num,
NULL, &sdw_ops);
+ /*
+ * SoundWire DAILINKs use 'stream' functions and Bank Switch operations
+ * based on wait_for_completion(), tag them as 'nonatomic'.
+ */
+ dai_links[*be_index].nonatomic = true;
ret = set_codec_init_func(card, link, dai_links + (*be_index)++,
playback, group_id);
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 5a2f4667d50b..81ad2dcee9eb 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config SND_SOC_MEDIATEK
tristate
+ select REGMAP_MMIO
config SND_SOC_MT2701
tristate "ASoC support for Mediatek MT2701 chip"
@@ -188,7 +189,9 @@ config SND_SOC_MT8192_MT6359_RT1015_RT5682
config SND_SOC_MT8195
tristate "ASoC support for Mediatek MT8195 chip"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on COMMON_CLK
select SND_SOC_MEDIATEK
+ select MFD_SYSCON if SND_SOC_MT6359
help
This adds ASoC platform driver support for Mediatek MT8195 chip
that can be used with other codecs.
diff --git a/sound/soc/mediatek/common/mtk-afe-fe-dai.c b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
index baaa5881b1d4..e95c7c018e7d 100644
--- a/sound/soc/mediatek/common/mtk-afe-fe-dai.c
+++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
@@ -334,9 +334,11 @@ int mtk_afe_suspend(struct snd_soc_component *component)
devm_kcalloc(dev, afe->reg_back_up_list_num,
sizeof(unsigned int), GFP_KERNEL);
- for (i = 0; i < afe->reg_back_up_list_num; i++)
- regmap_read(regmap, afe->reg_back_up_list[i],
- &afe->reg_back_up[i]);
+ if (afe->reg_back_up) {
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ regmap_read(regmap, afe->reg_back_up_list[i],
+ &afe->reg_back_up[i]);
+ }
afe->suspended = true;
afe->runtime_suspend(dev);
@@ -356,12 +358,13 @@ int mtk_afe_resume(struct snd_soc_component *component)
afe->runtime_resume(dev);
- if (!afe->reg_back_up)
+ if (!afe->reg_back_up) {
dev_dbg(dev, "%s no reg_backup\n", __func__);
-
- for (i = 0; i < afe->reg_back_up_list_num; i++)
- mtk_regmap_write(regmap, afe->reg_back_up_list[i],
- afe->reg_back_up[i]);
+ } else {
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ mtk_regmap_write(regmap, afe->reg_back_up_list[i],
+ afe->reg_back_up[i]);
+ }
afe->suspended = false;
return 0;
diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c b/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
index c97ace7387b4..de09f67c0450 100644
--- a/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
+++ b/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
@@ -424,8 +424,8 @@ static int mt8195_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
return snd_soc_component_set_jack(cmpnt_codec, &priv->hdmi_jack, NULL);
}
-static int mt8195_hdmitx_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
+static int mt8195_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
{
/* fix BE i2s format to 32bit, clean param mask first */
@@ -902,7 +902,7 @@ static struct snd_soc_dai_link mt8195_mt6359_rt1019_rt5682_dai_links[] = {
.no_pcm = 1,
.dpcm_playback = 1,
.ops = &mt8195_dptx_ops,
- .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup,
+ .be_hw_params_fixup = mt8195_dptx_hw_params_fixup,
SND_SOC_DAILINK_REG(DPTX_BE),
},
[DAI_LINK_ETDM1_IN_BE] = {
@@ -953,7 +953,6 @@ static struct snd_soc_dai_link mt8195_mt6359_rt1019_rt5682_dai_links[] = {
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.dpcm_playback = 1,
- .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup,
SND_SOC_DAILINK_REG(ETDM3_OUT_BE),
},
[DAI_LINK_PCM1_BE] = {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index c830e96afba2..80ca260595fd 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2599,6 +2599,7 @@ int snd_soc_component_initialize(struct snd_soc_component *component,
INIT_LIST_HEAD(&component->dai_list);
INIT_LIST_HEAD(&component->dobj_list);
INIT_LIST_HEAD(&component->card_list);
+ INIT_LIST_HEAD(&component->list);
mutex_init(&component->io_mutex);
component->name = fmt_single_name(dev, &component->id);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7b67f1e19ae9..59d07648a7e7 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2561,6 +2561,7 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
const char *pin, int status)
{
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
+ int ret = 0;
dapm_assert_locked(dapm);
@@ -2573,13 +2574,14 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
dapm_mark_dirty(w, "pin configuration");
dapm_widget_invalidate_input_paths(w);
dapm_widget_invalidate_output_paths(w);
+ ret = 1;
}
w->connected = status;
if (status == 0)
w->force = 0;
- return 0;
+ return ret;
}
/**
@@ -3583,14 +3585,15 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
const char *pin = (const char *)kcontrol->private_value;
+ int ret;
if (ucontrol->value.integer.value[0])
- snd_soc_dapm_enable_pin(&card->dapm, pin);
+ ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
else
- snd_soc_dapm_disable_pin(&card->dapm, pin);
+ ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
snd_soc_dapm_sync(&card->dapm);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch);
@@ -4023,7 +4026,7 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
rtd->params_select = ucontrol->value.enumerated.item[0];
- return 0;
+ return 1;
}
static void
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 3e4dd4a86363..59d0d7b2b55c 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -371,7 +371,6 @@ int snd_sof_device_remove(struct device *dev)
dev_warn(dev, "error: %d failed to prepare DSP for device removal",
ret);
- snd_sof_fw_unload(sdev);
snd_sof_ipc_free(sdev);
snd_sof_free_debug(sdev);
snd_sof_free_trace(sdev);
@@ -394,8 +393,7 @@ int snd_sof_device_remove(struct device *dev)
snd_sof_remove(sdev);
/* release firmware */
- release_firmware(pdata->fw);
- pdata->fw = NULL;
+ snd_sof_fw_unload(sdev);
return 0;
}
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index 12fedf0984bd..7e9723a10d02 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -365,7 +365,14 @@ static int imx8_remove(struct snd_sof_dev *sdev)
/* on i.MX8 there is 1 to 1 match between type and BAR idx */
static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type)
{
- return type;
+ /* Only IRAM and SRAM bars are valid */
+ switch (type) {
+ case SOF_FW_BLK_TYPE_IRAM:
+ case SOF_FW_BLK_TYPE_SRAM:
+ return type;
+ default:
+ return -EINVAL;
+ }
}
static void imx8_ipc_msg_data(struct snd_sof_dev *sdev,
diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
index cb822d953767..892e1482f97f 100644
--- a/sound/soc/sof/imx/imx8m.c
+++ b/sound/soc/sof/imx/imx8m.c
@@ -228,7 +228,14 @@ static int imx8m_remove(struct snd_sof_dev *sdev)
/* on i.MX8 there is 1 to 1 match between type and BAR idx */
static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)
{
- return type;
+ /* Only IRAM and SRAM bars are valid */
+ switch (type) {
+ case SOF_FW_BLK_TYPE_IRAM:
+ case SOF_FW_BLK_TYPE_SRAM:
+ return type;
+ default:
+ return -EINVAL;
+ }
}
static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
index 2b38a77cd594..bb79c77775b3 100644
--- a/sound/soc/sof/loader.c
+++ b/sound/soc/sof/loader.c
@@ -729,10 +729,10 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);
if (ret < 0) {
- dev_err(sdev->dev, "error: request firmware %s failed err: %d\n",
- fw_filename, ret);
dev_err(sdev->dev,
- "you may need to download the firmware from https://github.com/thesofproject/sof-bin/\n");
+ "error: sof firmware file is missing, you might need to\n");
+ dev_err(sdev->dev,
+ " download it from https://github.com/thesofproject/sof-bin/\n");
goto err;
} else {
dev_dbg(sdev->dev, "request_firmware %s successful\n",
@@ -880,5 +880,7 @@ EXPORT_SYMBOL(snd_sof_run_firmware);
void snd_sof_fw_unload(struct snd_sof_dev *sdev)
{
/* TODO: support module unloading at runtime */
+ release_firmware(sdev->pdata->fw);
+ sdev->pdata->fw = NULL;
}
EXPORT_SYMBOL(snd_sof_fw_unload);
diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c
index f72a6e83e6af..58f6ca5cf491 100644
--- a/sound/soc/sof/trace.c
+++ b/sound/soc/sof/trace.c
@@ -530,7 +530,6 @@ void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
return;
if (sdev->dtrace_is_enabled) {
- dev_err(sdev->dev, "error: waking up any trace sleepers\n");
sdev->dtrace_error = true;
wake_up(&sdev->trace_sleep);
}
diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
index bbb9a2282ed9..f6e3411b33cf 100644
--- a/sound/soc/sof/xtensa/core.c
+++ b/sound/soc/sof/xtensa/core.c
@@ -122,9 +122,9 @@ static void xtensa_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
* 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63
*/
for (i = 0; i < stack_words; i += 4) {
- hex_dump_to_buffer(stack + i * 4, 16, 16, 4,
+ hex_dump_to_buffer(stack + i, 16, 16, 4,
buf, sizeof(buf), false);
- dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf);
+ dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf);
}
}
diff --git a/sound/usb/card.c b/sound/usb/card.c
index fd570a42f043..1764b9302d46 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -1054,7 +1054,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
}
-static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+static int usb_audio_resume(struct usb_interface *intf)
{
struct snd_usb_audio *chip = usb_get_intfdata(intf);
struct snd_usb_stream *as;
@@ -1080,7 +1080,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
* we just notify and restart the mixers
*/
list_for_each_entry(mixer, &chip->mixer_list, list) {
- err = snd_usb_mixer_resume(mixer, reset_resume);
+ err = snd_usb_mixer_resume(mixer);
if (err < 0)
goto err_out;
}
@@ -1100,20 +1100,10 @@ err_out:
atomic_dec(&chip->active); /* allow autopm after this point */
return err;
}
-
-static int usb_audio_resume(struct usb_interface *intf)
-{
- return __usb_audio_resume(intf, false);
-}
-
-static int usb_audio_reset_resume(struct usb_interface *intf)
-{
- return __usb_audio_resume(intf, true);
-}
#else
#define usb_audio_suspend NULL
#define usb_audio_resume NULL
-#define usb_audio_reset_resume NULL
+#define usb_audio_resume NULL
#endif /* CONFIG_PM */
static const struct usb_device_id usb_audio_ids [] = {
@@ -1135,7 +1125,7 @@ static struct usb_driver usb_audio_driver = {
.disconnect = usb_audio_disconnect,
.suspend = usb_audio_suspend,
.resume = usb_audio_resume,
- .reset_resume = usb_audio_reset_resume,
+ .reset_resume = usb_audio_resume,
.id_table = usb_audio_ids,
.supports_autosuspend = 1,
};
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 43bc59575a6e..8e030b1c061a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1198,6 +1198,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
cval->res = 1;
}
break;
+ case USB_ID(0x1224, 0x2a25): /* Jieli Technology USB PHY 2.0 */
+ if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+ usb_audio_info(chip,
+ "set resolution quirk: cval->res = 16\n");
+ cval->res = 16;
+ }
+ break;
}
}
@@ -3653,33 +3660,16 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
return 0;
}
-static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)
-{
- int err;
-
- if (list->resume) {
- err = list->resume(list);
- if (err < 0)
- return err;
- }
- return restore_mixer_value(list);
-}
-
-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)
+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer)
{
struct usb_mixer_elem_list *list;
- usb_mixer_elem_resume_func_t f;
int id, err;
/* restore cached mixer values */
for (id = 0; id < MAX_ID_ELEMS; id++) {
for_each_mixer_elem(list, mixer, id) {
- if (reset_resume)
- f = list->reset_resume;
- else
- f = list->resume;
- if (f) {
- err = f(list);
+ if (list->resume) {
+ err = list->resume(list);
if (err < 0)
return err;
}
@@ -3700,7 +3690,6 @@ void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
list->id = unitid;
list->dump = snd_usb_mixer_dump_cval;
#ifdef CONFIG_PM
- list->resume = NULL;
- list->reset_resume = default_mixer_reset_resume;
+ list->resume = restore_mixer_value;
#endif
}
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 876bbc9a71ad..98ea24d91d80 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -70,7 +70,6 @@ struct usb_mixer_elem_list {
bool is_std_info;
usb_mixer_elem_dump_func_t dump;
usb_mixer_elem_resume_func_t resume;
- usb_mixer_elem_resume_func_t reset_resume;
};
/* iterate over mixer element list of the given unit id */
@@ -121,7 +120,7 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
#ifdef CONFIG_PM
int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer);
-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume);
+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer);
#endif
int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index a66ce0375fd9..46082dc57be0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -151,7 +151,7 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
*listp = list;
list->mixer = mixer;
list->id = id;
- list->reset_resume = resume;
+ list->resume = resume;
kctl = snd_ctl_new1(knew, list);
if (!kctl) {
kfree(list);
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index 3d5848d5481b..53ebabf42472 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -2450,6 +2450,8 @@ static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
err = scarlett2_usb_get_config(mixer,
SCARLETT2_CONFIG_TALKBACK_MAP,
1, &bitmap);
+ if (err < 0)
+ return err;
for (i = 0; i < num_mixes; i++, bitmap >>= 1)
private->talkback_map[i] = bitmap & 1;
}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index e03043f7dad3..2af8c68fac27 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -78,6 +78,48 @@
{ USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) },
/*
+ * Creative Technology, Ltd Live! Cam Sync HD [VF0770]
+ * The device advertises 8 formats, but only a rate of 48kHz is honored by the
+ * hardware and 24 bits give chopped audio, so only report the one working
+ * combination.
+ */
+{
+ USB_DEVICE(0x041e, 0x4095),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ {
+ .ifnum = 3,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .fmt_bits = 16,
+ .iface = 3,
+ .altsetting = 4,
+ .altset_idx = 4,
+ .endpoint = 0x82,
+ .ep_attr = 0x05,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) { 48000 },
+ },
+ },
+ {
+ .ifnum = -1
+ },
+ },
+ },
+},
+
+/*
* HP Wireless Audio
* When not ignored, causes instability issues for some users, forcing them to
* skip the entire module.
@@ -3970,6 +4012,38 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
}
},
+{
+ /*
+ * Sennheiser GSP670
+ * Change order of interfaces loaded
+ */
+ USB_DEVICE(0x1395, 0x0300),
+ .bInterfaceClass = USB_CLASS_PER_INTERFACE,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ // Communication
+ {
+ .ifnum = 3,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ // Recording
+ {
+ .ifnum = 4,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ // Main
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
#undef USB_DEVICE_VENDOR_SPEC
#undef USB_AUDIO_DEVICE
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6ee6d24c847f..8929d9abe8aa 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1719,6 +1719,11 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
*/
fp->attributes &= ~UAC_EP_CS_ATTR_FILL_MAX;
break;
+ case USB_ID(0x1224, 0x2a25): /* Jieli Technology USB PHY 2.0 */
+ /* mic works only when ep packet size is set to wMaxPacketSize */
+ fp->attributes |= UAC_EP_CS_ATTR_FILL_MAX;
+ break;
+
}
}
@@ -1884,10 +1889,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
/* Vendor matches */
VENDOR_FLG(0x045e, /* MS Lifecam */
@@ -1900,6 +1909,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_CTL_MSG_DELAY | QUIRK_FLAG_IFACE_DELAY),
VENDOR_FLG(0x07fd, /* MOTU */
QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1235, /* Focusrite Novation */
+ QUIRK_FLAG_VALIDATE_RATES),
VENDOR_FLG(0x152a, /* Thesycon devices */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x1de7, /* Phoenix Audio */
diff --git a/tools/arch/x86/include/asm/unistd_32.h b/tools/arch/x86/include/uapi/asm/unistd_32.h
index 60a89dba01b6..60a89dba01b6 100644
--- a/tools/arch/x86/include/asm/unistd_32.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_32.h
diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/uapi/asm/unistd_64.h
index 4205ed4158bf..cb52a3a8b8fc 100644
--- a/tools/arch/x86/include/asm/unistd_64.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_64.h
@@ -1,7 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NR_userfaultfd
-#define __NR_userfaultfd 282
-#endif
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 298
#endif
diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c
index c41f95815480..797699462cd8 100644
--- a/tools/arch/x86/lib/insn.c
+++ b/tools/arch/x86/lib/insn.c
@@ -37,10 +37,10 @@
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
+ ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
#define __peek_nbyte_next(t, insn, n) \
- ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
+ ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
#define get_next(t, insn) \
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 7f36385aa9e2..ade44577688e 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -624,6 +624,7 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
*/
switch (id) {
case BPF_FUNC_trace_printk:
+ case BPF_FUNC_trace_vprintk:
case BPF_FUNC_probe_write_user:
if (!full_mode)
continue;
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index e3ec47a6a612..cc835859465b 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -803,7 +803,10 @@ static int do_skeleton(int argc, char **argv)
} \n\
\n\
err = %1$s__create_skeleton(obj); \n\
- err = err ?: bpf_object__open_skeleton(obj->skeleton, opts);\n\
+ if (err) \n\
+ goto err_out; \n\
+ \n\
+ err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
if (err) \n\
goto err_out; \n\
\n\
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 3e9785f1064a..6fc59d61937a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4046,7 +4046,7 @@ union bpf_attr {
* arguments. The *data* are a **u64** array and corresponding format string
* values are stored in the array. For strings and pointers where pointees
* are accessed, only the pointer values are stored in the *data* array.
- * The *data_len* is the size of *data* in bytes.
+ * The *data_len* is the size of *data* in bytes - must be a multiple of 8.
*
* Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
* Reading kernel memory may fail due to either invalid address or
@@ -4751,7 +4751,8 @@ union bpf_attr {
* Each format specifier in **fmt** corresponds to one u64 element
* in the **data** array. For strings and pointers where pointees
* are accessed, only the pointer values are stored in the *data*
- * array. The *data_len* is the size of *data* in bytes.
+ * array. The *data_len* is the size of *data* in bytes - must be
+ * a multiple of 8.
*
* Formats **%s** and **%p{i,I}{4,6}** require to read kernel
* memory. Reading kernel memory may fail due to either invalid
@@ -4898,6 +4899,16 @@ union bpf_attr {
* **-EINVAL** if *flags* is not zero.
*
* **-ENOENT** if architecture does not support branch records.
+ *
+ * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * Description
+ * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
+ * to format and can handle more format args as a result.
+ *
+ * Arguments are to be used as in **bpf_seq_printf**\ () helper.
+ * Return
+ * The number of bytes written to the buffer, or a negative error
+ * in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5077,6 +5088,7 @@ union bpf_attr {
FN(get_attach_cookie), \
FN(task_pt_regs), \
FN(get_branch_snapshot), \
+ FN(trace_vprintk), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 1d84ec9db93b..5859ca0a1439 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -784,6 +784,7 @@ struct snd_rawmidi_status {
#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
#define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params)
#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index b0bf56c5f120..5a5bd74f55bd 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -742,7 +742,7 @@ class DebugfsProvider(Provider):
The fields are all available KVM debugfs files
"""
- exempt_list = ['halt_poll_fail_ns', 'halt_poll_success_ns']
+ exempt_list = ['halt_poll_fail_ns', 'halt_poll_success_ns', 'halt_wait_ns']
fields = [field for field in self.walkdir(PATH_DEBUGFS_KVM)[2]
if field not in exempt_list]
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index b9987c3efa3c..963b1060d944 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -14,14 +14,6 @@
#define __type(name, val) typeof(val) *name
#define __array(name, val) typeof(val) *name[]
-/* Helper macro to print out debug messages */
-#define bpf_printk(fmt, ...) \
-({ \
- char ____fmt[] = fmt; \
- bpf_trace_printk(____fmt, sizeof(____fmt), \
- ##__VA_ARGS__); \
-})
-
/*
* Helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
@@ -224,4 +216,47 @@ enum libbpf_tristate {
___param, sizeof(___param)); \
})
+#ifdef BPF_NO_GLOBAL_DATA
+#define BPF_PRINTK_FMT_MOD
+#else
+#define BPF_PRINTK_FMT_MOD static const
+#endif
+
+#define __bpf_printk(fmt, ...) \
+({ \
+ BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+})
+
+/*
+ * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
+ * instead of an array of u64.
+ */
+#define __bpf_vprintk(fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_trace_vprintk(___fmt, sizeof(___fmt), \
+ ___param, sizeof(___param)); \
+})
+
+/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
+ * Otherwise use __bpf_vprintk
+ */
+#define ___bpf_pick_printk(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
+ __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
+ __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
+ __bpf_printk /*1*/, __bpf_printk /*0*/)
+
+/* Helper macro to print out debug messages */
+#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
+
#endif
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 8df718a6b142..80087b13877f 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -5,6 +5,7 @@
#include <string.h>
#include <errno.h>
#include <linux/filter.h>
+#include <sys/param.h>
#include "btf.h"
#include "bpf.h"
#include "libbpf.h"
@@ -135,13 +136,17 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level)
static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
{
+ __u32 size8 = roundup(size, 8);
+ __u64 zero = 0;
void *prev;
- if (realloc_data_buf(gen, size))
+ if (realloc_data_buf(gen, size8))
return 0;
prev = gen->data_cur;
memcpy(gen->data_cur, data, size);
gen->data_cur += size;
+ memcpy(gen->data_cur, &zero, size8 - size);
+ gen->data_cur += size8 - size;
return prev - gen->data_start;
}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index da65a1666a5e..8892f2f1bbcc 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -220,17 +220,40 @@ struct reloc_desc {
struct bpf_sec_def;
-typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog);
+typedef int (*init_fn_t)(struct bpf_program *prog, long cookie);
+typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_params *attr, long cookie);
+typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie);
+
+/* stored as sec_def->cookie for all libbpf-supported SEC()s */
+enum sec_def_flags {
+ SEC_NONE = 0,
+ /* expected_attach_type is optional, if kernel doesn't support that */
+ SEC_EXP_ATTACH_OPT = 1,
+ /* legacy, only used by libbpf_get_type_names() and
+ * libbpf_attach_type_by_name(), not used by libbpf itself at all.
+ * This used to be associated with cgroup (and few other) BPF programs
+ * that were attachable through BPF_PROG_ATTACH command. Pretty
+ * meaningless nowadays, though.
+ */
+ SEC_ATTACHABLE = 2,
+ SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
+ /* attachment target is specified through BTF ID in either kernel or
+ * other BPF program's BTF object */
+ SEC_ATTACH_BTF = 4,
+ /* BPF program type allows sleeping/blocking in kernel */
+ SEC_SLEEPABLE = 8,
+ /* allow non-strict prefix matching */
+ SEC_SLOPPY_PFX = 16,
+};
struct bpf_sec_def {
const char *sec;
- size_t len;
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
- bool is_exp_attach_type_optional;
- bool is_attachable;
- bool is_attach_btf;
- bool is_sleepable;
+ long cookie;
+
+ init_fn_t init_fn;
+ preload_fn_t preload_fn;
attach_fn_t attach_fn;
};
@@ -1665,7 +1688,7 @@ static int bpf_object__process_kconfig_line(struct bpf_object *obj,
void *ext_val;
__u64 num;
- if (strncmp(buf, "CONFIG_", 7))
+ if (!str_has_pfx(buf, "CONFIG_"))
return 0;
sep = strchr(buf, '=');
@@ -1845,6 +1868,8 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
continue;
if (sym.st_shndx != obj->efile.maps_shndx)
continue;
+ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION)
+ continue;
nr_maps++;
}
/* Assume equally sized map definitions */
@@ -1869,6 +1894,8 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
continue;
if (sym.st_shndx != obj->efile.maps_shndx)
continue;
+ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION)
+ continue;
map = bpf_object__add_map(obj);
if (IS_ERR(map))
@@ -1881,8 +1908,7 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
return -LIBBPF_ERRNO__FORMAT;
}
- if (GELF_ST_TYPE(sym.st_info) == STT_SECTION
- || GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+ if (GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
return -ENOTSUP;
}
@@ -2913,7 +2939,7 @@ static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
static bool is_sec_name_dwarf(const char *name)
{
/* approximation, but the actual list is too long */
- return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
+ return str_has_pfx(name, ".debug_");
}
static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
@@ -2935,7 +2961,7 @@ static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
if (is_sec_name_dwarf(name))
return true;
- if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
+ if (str_has_pfx(name, ".rel")) {
name += sizeof(".rel") - 1;
/* DWARF section relocations */
if (is_sec_name_dwarf(name))
@@ -4643,6 +4669,30 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.inner_map_fd = map->inner_map_fd;
}
+ switch (def->type) {
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ case BPF_MAP_TYPE_CGROUP_ARRAY:
+ case BPF_MAP_TYPE_STACK_TRACE:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_HASH:
+ case BPF_MAP_TYPE_CPUMAP:
+ case BPF_MAP_TYPE_XSKMAP:
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ case BPF_MAP_TYPE_RINGBUF:
+ create_attr.btf_fd = 0;
+ create_attr.btf_key_type_id = 0;
+ create_attr.btf_value_type_id = 0;
+ map->btf_key_type_id = 0;
+ map->btf_value_type_id = 0;
+ default:
+ break;
+ }
+
if (obj->gen_loader) {
bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps);
/* Pretend to have valid FD to pass various fd >= 0 checks.
@@ -6094,6 +6144,48 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
return 0;
}
+static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
+ int *btf_obj_fd, int *btf_type_id);
+
+/* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */
+static int libbpf_preload_prog(struct bpf_program *prog,
+ struct bpf_prog_load_params *attr, long cookie)
+{
+ enum sec_def_flags def = cookie;
+
+ /* old kernels might not support specifying expected_attach_type */
+ if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
+ attr->expected_attach_type = 0;
+
+ if (def & SEC_SLEEPABLE)
+ attr->prog_flags |= BPF_F_SLEEPABLE;
+
+ if ((prog->type == BPF_PROG_TYPE_TRACING ||
+ prog->type == BPF_PROG_TYPE_LSM ||
+ prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
+ int btf_obj_fd = 0, btf_type_id = 0, err;
+ const char *attach_name;
+
+ attach_name = strchr(prog->sec_name, '/') + 1;
+ err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
+ if (err)
+ return err;
+
+ /* cache resolved BTF FD and BTF type ID in the prog */
+ prog->attach_btf_obj_fd = btf_obj_fd;
+ prog->attach_btf_id = btf_type_id;
+
+ /* but by now libbpf common logic is not utilizing
+ * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
+ * this callback is called after attrs were populated by
+ * libbpf, so this callback has to update attr explicitly here
+ */
+ attr->attach_btf_obj_fd = btf_obj_fd;
+ attr->attach_btf_id = btf_type_id;
+ }
+ return 0;
+}
+
static int
load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *license, __u32 kern_version, int *pfd)
@@ -6102,7 +6194,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
char *log_buf = NULL;
- int btf_fd, ret;
+ int btf_fd, ret, err;
if (prog->type == BPF_PROG_TYPE_UNSPEC) {
/*
@@ -6118,22 +6210,15 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
return -EINVAL;
load_attr.prog_type = prog->type;
- /* old kernels might not support specifying expected_attach_type */
- if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
- prog->sec_def->is_exp_attach_type_optional)
- load_attr.expected_attach_type = 0;
- else
- load_attr.expected_attach_type = prog->expected_attach_type;
+ load_attr.expected_attach_type = prog->expected_attach_type;
if (kernel_supports(prog->obj, FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insn_cnt = insns_cnt;
load_attr.license = license;
load_attr.attach_btf_id = prog->attach_btf_id;
- if (prog->attach_prog_fd)
- load_attr.attach_prog_fd = prog->attach_prog_fd;
- else
- load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
+ load_attr.attach_prog_fd = prog->attach_prog_fd;
+ load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
load_attr.attach_btf_id = prog->attach_btf_id;
load_attr.kern_version = kern_version;
load_attr.prog_ifindex = prog->prog_ifindex;
@@ -6152,6 +6237,16 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.log_level = prog->log_level;
load_attr.prog_flags = prog->prog_flags;
+ /* adjust load_attr if sec_def provides custom preload callback */
+ if (prog->sec_def && prog->sec_def->preload_fn) {
+ err = prog->sec_def->preload_fn(prog, &load_attr, prog->sec_def->cookie);
+ if (err < 0) {
+ pr_warn("prog '%s': failed to prepare load attributes: %d\n",
+ prog->name, err);
+ return err;
+ }
+ }
+
if (prog->obj->gen_loader) {
bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
prog - prog->obj->programs);
@@ -6267,8 +6362,6 @@ static int bpf_program__record_externs(struct bpf_program *prog)
return 0;
}
-static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
-
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{
int err = 0, fd, i;
@@ -6278,19 +6371,6 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
return libbpf_err(-EINVAL);
}
- if ((prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_LSM ||
- prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
- int btf_obj_fd = 0, btf_type_id = 0;
-
- err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
- if (err)
- return libbpf_err(err);
-
- prog->attach_btf_obj_fd = btf_obj_fd;
- prog->attach_btf_id = btf_type_id;
- }
-
if (prog->instances.nr < 0 || !prog->instances.fds) {
if (prog->preprocessor) {
pr_warn("Internal error: can't load program '%s'\n",
@@ -6400,6 +6480,7 @@ static const struct bpf_sec_def *find_sec_def(const char *sec_name);
static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
{
struct bpf_program *prog;
+ int err;
bpf_object__for_each_program(prog, obj) {
prog->sec_def = find_sec_def(prog->sec_name);
@@ -6410,8 +6491,6 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
continue;
}
- if (prog->sec_def->is_sleepable)
- prog->prog_flags |= BPF_F_SLEEPABLE;
bpf_program__set_type(prog, prog->sec_def->prog_type);
bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type);
@@ -6421,6 +6500,18 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
#pragma GCC diagnostic pop
+
+ /* sec_def can have custom callback which should be called
+ * after bpf_program is initialized to adjust its properties
+ */
+ if (prog->sec_def->init_fn) {
+ err = prog->sec_def->init_fn(prog, prog->sec_def->cookie);
+ if (err < 0) {
+ pr_warn("prog '%s': failed to initialize: %d\n",
+ prog->name, err);
+ return err;
+ }
+ }
}
return 0;
@@ -6847,8 +6938,7 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
if (err)
return err;
pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
- } else if (ext->type == EXT_KCFG &&
- strncmp(ext->name, "CONFIG_", 7) == 0) {
+ } else if (ext->type == EXT_KCFG && str_has_pfx(ext->name, "CONFIG_")) {
need_config = true;
} else if (ext->type == EXT_KSYM) {
if (ext->ksym.type_id)
@@ -6934,7 +7024,8 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
if (obj->gen_loader) {
/* reset FDs */
- btf__set_fd(obj->btf, -1);
+ if (obj->btf)
+ btf__set_fd(obj->btf, -1);
for (i = 0; i < obj->nr_maps; i++)
obj->maps[i].fd = -1;
if (!err)
@@ -7908,217 +7999,143 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
prog->expected_attach_type = type;
}
-#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \
- attachable, attach_btf) \
- { \
- .sec = string, \
- .len = sizeof(string) - 1, \
- .prog_type = ptype, \
- .expected_attach_type = eatype, \
- .is_exp_attach_type_optional = eatype_optional, \
- .is_attachable = attachable, \
- .is_attach_btf = attach_btf, \
- }
-
-/* Programs that can NOT be attached. */
-#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
-
-/* Programs that can be attached. */
-#define BPF_APROG_SEC(string, ptype, atype) \
- BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
-
-/* Programs that must specify expected attach type at load time. */
-#define BPF_EAPROG_SEC(string, ptype, eatype) \
- BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
-
-/* Programs that use BTF to identify attach point */
-#define BPF_PROG_BTF(string, ptype, eatype) \
- BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
-
-/* Programs that can be attached but attach type can't be identified by section
- * name. Kept for backward compatibility.
- */
-#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
-
-#define SEC_DEF(sec_pfx, ptype, ...) { \
+#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
.sec = sec_pfx, \
- .len = sizeof(sec_pfx) - 1, \
.prog_type = BPF_PROG_TYPE_##ptype, \
+ .expected_attach_type = atype, \
+ .cookie = (long)(flags), \
+ .preload_fn = libbpf_preload_prog, \
__VA_ARGS__ \
}
-static struct bpf_link *attach_kprobe(const struct bpf_program *prog);
-static struct bpf_link *attach_tp(const struct bpf_program *prog);
-static struct bpf_link *attach_raw_tp(const struct bpf_program *prog);
-static struct bpf_link *attach_trace(const struct bpf_program *prog);
-static struct bpf_link *attach_lsm(const struct bpf_program *prog);
-static struct bpf_link *attach_iter(const struct bpf_program *prog);
+static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie);
static const struct bpf_sec_def section_defs[] = {
- BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
- BPF_EAPROG_SEC("sk_reuseport/migrate", BPF_PROG_TYPE_SK_REUSEPORT,
- BPF_SK_REUSEPORT_SELECT_OR_MIGRATE),
- BPF_EAPROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT,
- BPF_SK_REUSEPORT_SELECT),
- SEC_DEF("kprobe/", KPROBE,
- .attach_fn = attach_kprobe),
- BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
- SEC_DEF("kretprobe/", KPROBE,
- .attach_fn = attach_kprobe),
- BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
- BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
- BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
- SEC_DEF("tracepoint/", TRACEPOINT,
- .attach_fn = attach_tp),
- SEC_DEF("tp/", TRACEPOINT,
- .attach_fn = attach_tp),
- SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
- .attach_fn = attach_raw_tp),
- SEC_DEF("raw_tp/", RAW_TRACEPOINT,
- .attach_fn = attach_raw_tp),
- SEC_DEF("tp_btf/", TRACING,
- .expected_attach_type = BPF_TRACE_RAW_TP,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("fentry/", TRACING,
- .expected_attach_type = BPF_TRACE_FENTRY,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("fmod_ret/", TRACING,
- .expected_attach_type = BPF_MODIFY_RETURN,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("fexit/", TRACING,
- .expected_attach_type = BPF_TRACE_FEXIT,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("fentry.s/", TRACING,
- .expected_attach_type = BPF_TRACE_FENTRY,
- .is_attach_btf = true,
- .is_sleepable = true,
- .attach_fn = attach_trace),
- SEC_DEF("fmod_ret.s/", TRACING,
- .expected_attach_type = BPF_MODIFY_RETURN,
- .is_attach_btf = true,
- .is_sleepable = true,
- .attach_fn = attach_trace),
- SEC_DEF("fexit.s/", TRACING,
- .expected_attach_type = BPF_TRACE_FEXIT,
- .is_attach_btf = true,
- .is_sleepable = true,
- .attach_fn = attach_trace),
- SEC_DEF("freplace/", EXT,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("lsm/", LSM,
- .is_attach_btf = true,
- .expected_attach_type = BPF_LSM_MAC,
- .attach_fn = attach_lsm),
- SEC_DEF("lsm.s/", LSM,
- .is_attach_btf = true,
- .is_sleepable = true,
- .expected_attach_type = BPF_LSM_MAC,
- .attach_fn = attach_lsm),
- SEC_DEF("iter/", TRACING,
- .expected_attach_type = BPF_TRACE_ITER,
- .is_attach_btf = true,
- .attach_fn = attach_iter),
- SEC_DEF("syscall", SYSCALL,
- .is_sleepable = true),
- BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
- BPF_XDP_DEVMAP),
- BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
- BPF_XDP_CPUMAP),
- BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP,
- BPF_XDP),
- BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
- BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
- BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
- BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
- BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
- BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
- BPF_CGROUP_INET_INGRESS),
- BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
- BPF_CGROUP_INET_EGRESS),
- BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
- BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK,
- BPF_CGROUP_INET_SOCK_CREATE),
- BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK,
- BPF_CGROUP_INET_SOCK_RELEASE),
- BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
- BPF_CGROUP_INET_SOCK_CREATE),
- BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
- BPF_CGROUP_INET4_POST_BIND),
- BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
- BPF_CGROUP_INET6_POST_BIND),
- BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
- BPF_CGROUP_DEVICE),
- BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
- BPF_CGROUP_SOCK_OPS),
- BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
- BPF_SK_SKB_STREAM_PARSER),
- BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
- BPF_SK_SKB_STREAM_VERDICT),
- BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
- BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
- BPF_SK_MSG_VERDICT),
- BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
- BPF_LIRC_MODE2),
- BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
- BPF_FLOW_DISSECTOR),
- BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET4_BIND),
- BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET6_BIND),
- BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET4_CONNECT),
- BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET6_CONNECT),
- BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_UDP4_SENDMSG),
- BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_UDP6_SENDMSG),
- BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_UDP4_RECVMSG),
- BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_UDP6_RECVMSG),
- BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET4_GETPEERNAME),
- BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET6_GETPEERNAME),
- BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET4_GETSOCKNAME),
- BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET6_GETSOCKNAME),
- BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
- BPF_CGROUP_SYSCTL),
- BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
- BPF_CGROUP_GETSOCKOPT),
- BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
- BPF_CGROUP_SETSOCKOPT),
- BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
- BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP,
- BPF_SK_LOOKUP),
+ SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
+ SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
+ SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
+ SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
+ SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
+ SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("tracepoint/", TRACEPOINT, 0, SEC_NONE, attach_tp),
+ SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp),
+ SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tp/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("tp_btf/", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fentry/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fmod_ret/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fexit/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fentry.s/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+ SEC_DEF("fmod_ret.s/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+ SEC_DEF("fexit.s/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+ SEC_DEF("freplace/", EXT, 0, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("lsm/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
+ SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
+ SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
+ SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
+ SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
+ SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
+ SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
+ SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
+ SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
};
-#undef BPF_PROG_SEC_IMPL
-#undef BPF_PROG_SEC
-#undef BPF_APROG_SEC
-#undef BPF_EAPROG_SEC
-#undef BPF_APROG_COMPAT
-#undef SEC_DEF
-
#define MAX_TYPE_NAME_SIZE 32
static const struct bpf_sec_def *find_sec_def(const char *sec_name)
{
- int i, n = ARRAY_SIZE(section_defs);
+ const struct bpf_sec_def *sec_def;
+ enum sec_def_flags sec_flags;
+ int i, n = ARRAY_SIZE(section_defs), len;
+ bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME;
for (i = 0; i < n; i++) {
- if (strncmp(sec_name,
- section_defs[i].sec, section_defs[i].len))
+ sec_def = &section_defs[i];
+ sec_flags = sec_def->cookie;
+ len = strlen(sec_def->sec);
+
+ /* "type/" always has to have proper SEC("type/extras") form */
+ if (sec_def->sec[len - 1] == '/') {
+ if (str_has_pfx(sec_name, sec_def->sec))
+ return sec_def;
+ continue;
+ }
+
+ /* "type+" means it can be either exact SEC("type") or
+ * well-formed SEC("type/extras") with proper '/' separator
+ */
+ if (sec_def->sec[len - 1] == '+') {
+ len--;
+ /* not even a prefix */
+ if (strncmp(sec_name, sec_def->sec, len) != 0)
+ continue;
+ /* exact match or has '/' separator */
+ if (sec_name[len] == '\0' || sec_name[len] == '/')
+ return sec_def;
+ continue;
+ }
+
+ /* SEC_SLOPPY_PFX definitions are allowed to be just prefix
+ * matches, unless strict section name mode
+ * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the
+ * match has to be exact.
+ */
+ if ((sec_flags & SEC_SLOPPY_PFX) && !strict) {
+ if (str_has_pfx(sec_name, sec_def->sec))
+ return sec_def;
continue;
- return &section_defs[i];
+ }
+
+ /* Definitions not marked SEC_SLOPPY_PFX (e.g.,
+ * SEC("syscall")) are exact matches in both modes.
+ */
+ if (strcmp(sec_name, sec_def->sec) == 0)
+ return sec_def;
}
return NULL;
}
@@ -8135,8 +8152,15 @@ static char *libbpf_get_type_names(bool attach_type)
buf[0] = '\0';
/* Forge string buf with all available names */
for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
- if (attach_type && !section_defs[i].is_attachable)
- continue;
+ const struct bpf_sec_def *sec_def = &section_defs[i];
+
+ if (attach_type) {
+ if (sec_def->preload_fn != libbpf_preload_prog)
+ continue;
+
+ if (!(sec_def->cookie & SEC_ATTACHABLE))
+ continue;
+ }
if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
free(buf);
@@ -8460,20 +8484,13 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
return -ESRCH;
}
-static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
+static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
+ int *btf_obj_fd, int *btf_type_id)
{
enum bpf_attach_type attach_type = prog->expected_attach_type;
__u32 attach_prog_fd = prog->attach_prog_fd;
- const char *attach_name;
int err = 0;
- if (!prog->sec_def || !prog->sec_def->is_attach_btf) {
- pr_warn("failed to identify BTF ID based on ELF section name '%s'\n",
- prog->sec_name);
- return -ESRCH;
- }
- attach_name = prog->sec_name + prog->sec_def->len;
-
/* BPF program's BTF ID */
if (attach_prog_fd) {
err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
@@ -8523,7 +8540,9 @@ int libbpf_attach_type_by_name(const char *name,
return libbpf_err(-EINVAL);
}
- if (!sec_def->is_attachable)
+ if (sec_def->preload_fn != libbpf_preload_prog)
+ return libbpf_err(-EINVAL);
+ if (!(sec_def->cookie & SEC_ATTACHABLE))
return libbpf_err(-EINVAL);
*attach_type = sec_def->expected_attach_type;
@@ -9011,59 +9030,18 @@ int bpf_link__unpin(struct bpf_link *link)
return 0;
}
-static int poke_kprobe_events(bool add, const char *name, bool retprobe, uint64_t offset)
-{
- int fd, ret = 0;
- pid_t p = getpid();
- char cmd[260], probename[128], probefunc[128];
- const char *file = "/sys/kernel/debug/tracing/kprobe_events";
-
- if (retprobe)
- snprintf(probename, sizeof(probename), "kretprobes/%s_libbpf_%u", name, p);
- else
- snprintf(probename, sizeof(probename), "kprobes/%s_libbpf_%u", name, p);
-
- if (offset)
- snprintf(probefunc, sizeof(probefunc), "%s+%zu", name, (size_t)offset);
-
- if (add) {
- snprintf(cmd, sizeof(cmd), "%c:%s %s",
- retprobe ? 'r' : 'p',
- probename,
- offset ? probefunc : name);
- } else {
- snprintf(cmd, sizeof(cmd), "-:%s", probename);
- }
-
- fd = open(file, O_WRONLY | O_APPEND, 0);
- if (!fd)
- return -errno;
- ret = write(fd, cmd, strlen(cmd));
- if (ret < 0)
- ret = -errno;
- close(fd);
-
- return ret;
-}
-
-static inline int add_kprobe_event_legacy(const char *name, bool retprobe, uint64_t offset)
-{
- return poke_kprobe_events(true, name, retprobe, offset);
-}
-
-static inline int remove_kprobe_event_legacy(const char *name, bool retprobe)
-{
- return poke_kprobe_events(false, name, retprobe, 0);
-}
-
struct bpf_link_perf {
struct bpf_link link;
int perf_event_fd;
/* legacy kprobe support: keep track of probe identifier and type */
char *legacy_probe_name;
+ bool legacy_is_kprobe;
bool legacy_is_retprobe;
};
+static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
+static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
+
static int bpf_link_perf_detach(struct bpf_link *link)
{
struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
@@ -9076,10 +9054,16 @@ static int bpf_link_perf_detach(struct bpf_link *link)
close(perf_link->perf_event_fd);
close(link->fd);
- /* legacy kprobe needs to be removed after perf event fd closure */
- if (perf_link->legacy_probe_name)
- err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
- perf_link->legacy_is_retprobe);
+ /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
+ if (perf_link->legacy_probe_name) {
+ if (perf_link->legacy_is_kprobe) {
+ err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
+ perf_link->legacy_is_retprobe);
+ } else {
+ err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
+ perf_link->legacy_is_retprobe);
+ }
+ }
return err;
}
@@ -9202,18 +9186,6 @@ static int parse_uint_from_file(const char *file, const char *fmt)
return ret;
}
-static int determine_kprobe_perf_type_legacy(const char *func_name, bool is_retprobe)
-{
- char file[192];
-
- snprintf(file, sizeof(file),
- "/sys/kernel/debug/tracing/events/%s/%s_libbpf_%d/id",
- is_retprobe ? "kretprobes" : "kprobes",
- func_name, getpid());
-
- return parse_uint_from_file(file, "%d\n");
-}
-
static int determine_kprobe_perf_type(void)
{
const char *file = "/sys/bus/event_source/devices/kprobe/type";
@@ -9296,21 +9268,79 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
return pfd;
}
-static int perf_event_kprobe_open_legacy(bool retprobe, const char *name, uint64_t offset, int pid)
+static int append_to_file(const char *file, const char *fmt, ...)
+{
+ int fd, n, err = 0;
+ va_list ap;
+
+ fd = open(file, O_WRONLY | O_APPEND, 0);
+ if (fd < 0)
+ return -errno;
+
+ va_start(ap, fmt);
+ n = vdprintf(fd, fmt, ap);
+ va_end(ap);
+
+ if (n < 0)
+ err = -errno;
+
+ close(fd);
+ return err;
+}
+
+static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
+ const char *kfunc_name, size_t offset)
+{
+ snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), kfunc_name, offset);
+}
+
+static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
+ const char *kfunc_name, size_t offset)
+{
+ const char *file = "/sys/kernel/debug/tracing/kprobe_events";
+
+ return append_to_file(file, "%c:%s/%s %s+0x%zx",
+ retprobe ? 'r' : 'p',
+ retprobe ? "kretprobes" : "kprobes",
+ probe_name, kfunc_name, offset);
+}
+
+static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
+{
+ const char *file = "/sys/kernel/debug/tracing/kprobe_events";
+
+ return append_to_file(file, "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name);
+}
+
+static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
+{
+ char file[256];
+
+ snprintf(file, sizeof(file),
+ "/sys/kernel/debug/tracing/events/%s/%s/id",
+ retprobe ? "kretprobes" : "kprobes", probe_name);
+
+ return parse_uint_from_file(file, "%d\n");
+}
+
+static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
+ const char *kfunc_name, size_t offset, int pid)
{
struct perf_event_attr attr = {};
char errmsg[STRERR_BUFSIZE];
int type, pfd, err;
- err = add_kprobe_event_legacy(name, retprobe, offset);
+ err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
if (err < 0) {
- pr_warn("failed to add legacy kprobe event: %s\n",
+ pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
+ kfunc_name, offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return err;
}
- type = determine_kprobe_perf_type_legacy(name, retprobe);
+ type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
if (type < 0) {
- pr_warn("failed to determine legacy kprobe event id: %s\n",
+ pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
+ kfunc_name, offset,
libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
return type;
}
@@ -9340,7 +9370,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
char errmsg[STRERR_BUFSIZE];
char *legacy_probe = NULL;
struct bpf_link *link;
- unsigned long offset;
+ size_t offset;
bool retprobe, legacy;
int pfd, err;
@@ -9357,36 +9387,48 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
func_name, offset,
-1 /* pid */, 0 /* ref_ctr_off */);
} else {
+ char probe_name[256];
+
+ gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
+ func_name, offset);
+
legacy_probe = strdup(func_name);
if (!legacy_probe)
return libbpf_err_ptr(-ENOMEM);
- pfd = perf_event_kprobe_open_legacy(retprobe, func_name,
+ pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
offset, -1 /* pid */);
}
if (pfd < 0) {
- pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
- prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return libbpf_err_ptr(pfd);
+ err = -errno;
+ pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
+ prog->name, retprobe ? "kretprobe" : "kprobe",
+ func_name, offset,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto err_out;
}
link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
err = libbpf_get_error(link);
if (err) {
close(pfd);
- pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
- prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
+ pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
+ prog->name, retprobe ? "kretprobe" : "kprobe",
+ func_name, offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return libbpf_err_ptr(err);
+ goto err_out;
}
if (legacy) {
struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
perf_link->legacy_probe_name = legacy_probe;
+ perf_link->legacy_is_kprobe = true;
perf_link->legacy_is_retprobe = retprobe;
}
return link;
+err_out:
+ free(legacy_probe);
+ return libbpf_err_ptr(err);
}
struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
@@ -9400,7 +9442,7 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
}
-static struct bpf_link *attach_kprobe(const struct bpf_program *prog)
+static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie)
{
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
unsigned long offset = 0;
@@ -9409,8 +9451,11 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog)
char *func;
int n, err;
- func_name = prog->sec_name + prog->sec_def->len;
- opts.retprobe = strcmp(prog->sec_def->sec, "kretprobe/") == 0;
+ opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
+ if (opts.retprobe)
+ func_name = prog->sec_name + sizeof("kretprobe/") - 1;
+ else
+ func_name = prog->sec_name + sizeof("kprobe/") - 1;
n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
if (n < 1) {
@@ -9431,17 +9476,96 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog)
return link;
}
+static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
+ const char *binary_path, uint64_t offset)
+{
+ int i;
+
+ snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
+
+ /* sanitize binary_path in the probe name */
+ for (i = 0; buf[i]; i++) {
+ if (!isalnum(buf[i]))
+ buf[i] = '_';
+ }
+}
+
+static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
+ const char *binary_path, size_t offset)
+{
+ const char *file = "/sys/kernel/debug/tracing/uprobe_events";
+
+ return append_to_file(file, "%c:%s/%s %s:0x%zx",
+ retprobe ? 'r' : 'p',
+ retprobe ? "uretprobes" : "uprobes",
+ probe_name, binary_path, offset);
+}
+
+static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
+{
+ const char *file = "/sys/kernel/debug/tracing/uprobe_events";
+
+ return append_to_file(file, "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name);
+}
+
+static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
+{
+ char file[512];
+
+ snprintf(file, sizeof(file),
+ "/sys/kernel/debug/tracing/events/%s/%s/id",
+ retprobe ? "uretprobes" : "uprobes", probe_name);
+
+ return parse_uint_from_file(file, "%d\n");
+}
+
+static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
+ const char *binary_path, size_t offset, int pid)
+{
+ struct perf_event_attr attr;
+ int type, pfd, err;
+
+ err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
+ if (err < 0) {
+ pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
+ binary_path, (size_t)offset, err);
+ return err;
+ }
+ type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
+ if (type < 0) {
+ pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
+ binary_path, offset, err);
+ return type;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.size = sizeof(attr);
+ attr.config = type;
+ attr.type = PERF_TYPE_TRACEPOINT;
+
+ pfd = syscall(__NR_perf_event_open, &attr,
+ pid < 0 ? -1 : pid, /* pid */
+ pid == -1 ? 0 : -1, /* cpu */
+ -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
+ if (pfd < 0) {
+ err = -errno;
+ pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
+ return err;
+ }
+ return pfd;
+}
+
LIBBPF_API struct bpf_link *
bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
const char *binary_path, size_t func_offset,
const struct bpf_uprobe_opts *opts)
{
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
- char errmsg[STRERR_BUFSIZE];
+ char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
struct bpf_link *link;
size_t ref_ctr_off;
int pfd, err;
- bool retprobe;
+ bool retprobe, legacy;
if (!OPTS_VALID(opts, bpf_uprobe_opts))
return libbpf_err_ptr(-EINVAL);
@@ -9450,15 +9574,35 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
- pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
- func_offset, pid, ref_ctr_off);
+ legacy = determine_uprobe_perf_type() < 0;
+ if (!legacy) {
+ pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
+ func_offset, pid, ref_ctr_off);
+ } else {
+ char probe_name[512];
+
+ if (ref_ctr_off)
+ return libbpf_err_ptr(-EINVAL);
+
+ gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
+ binary_path, func_offset);
+
+ legacy_probe = strdup(probe_name);
+ if (!legacy_probe)
+ return libbpf_err_ptr(-ENOMEM);
+
+ pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
+ binary_path, func_offset, pid);
+ }
if (pfd < 0) {
+ err = -errno;
pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return libbpf_err_ptr(pfd);
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto err_out;
}
+
link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
err = libbpf_get_error(link);
if (err) {
@@ -9467,9 +9611,20 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return libbpf_err_ptr(err);
+ goto err_out;
+ }
+ if (legacy) {
+ struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
+
+ perf_link->legacy_probe_name = legacy_probe;
+ perf_link->legacy_is_kprobe = false;
+ perf_link->legacy_is_retprobe = retprobe;
}
return link;
+err_out:
+ free(legacy_probe);
+ return libbpf_err_ptr(err);
+
}
struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
@@ -9573,7 +9728,7 @@ struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
}
-static struct bpf_link *attach_tp(const struct bpf_program *prog)
+static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie)
{
char *sec_name, *tp_cat, *tp_name;
struct bpf_link *link;
@@ -9582,8 +9737,11 @@ static struct bpf_link *attach_tp(const struct bpf_program *prog)
if (!sec_name)
return libbpf_err_ptr(-ENOMEM);
- /* extract "tp/<category>/<name>" */
- tp_cat = sec_name + prog->sec_def->len;
+ /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
+ if (str_has_pfx(prog->sec_name, "tp/"))
+ tp_cat = sec_name + sizeof("tp/") - 1;
+ else
+ tp_cat = sec_name + sizeof("tracepoint/") - 1;
tp_name = strchr(tp_cat, '/');
if (!tp_name) {
free(sec_name);
@@ -9627,9 +9785,14 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr
return link;
}
-static struct bpf_link *attach_raw_tp(const struct bpf_program *prog)
+static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie)
{
- const char *tp_name = prog->sec_name + prog->sec_def->len;
+ const char *tp_name;
+
+ if (str_has_pfx(prog->sec_name, "raw_tp/"))
+ tp_name = prog->sec_name + sizeof("raw_tp/") - 1;
+ else
+ tp_name = prog->sec_name + sizeof("raw_tracepoint/") - 1;
return bpf_program__attach_raw_tracepoint(prog, tp_name);
}
@@ -9674,12 +9837,12 @@ struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
return bpf_program__attach_btf_id(prog);
}
-static struct bpf_link *attach_trace(const struct bpf_program *prog)
+static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie)
{
return bpf_program__attach_trace(prog);
}
-static struct bpf_link *attach_lsm(const struct bpf_program *prog)
+static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie)
{
return bpf_program__attach_lsm(prog);
}
@@ -9810,7 +9973,7 @@ bpf_program__attach_iter(const struct bpf_program *prog,
return link;
}
-static struct bpf_link *attach_iter(const struct bpf_program *prog)
+static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie)
{
return bpf_program__attach_iter(prog, NULL);
}
@@ -9820,7 +9983,7 @@ struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
if (!prog->sec_def || !prog->sec_def->attach_fn)
return libbpf_err_ptr(-ESRCH);
- return prog->sec_def->attach_fn(prog);
+ return prog->sec_def->attach_fn(prog, prog->sec_def->cookie);
}
static int bpf_link__detach_struct_ops(struct bpf_link *link)
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c90e3d79e72c..e35490c54eb3 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -269,7 +269,7 @@ struct bpf_kprobe_opts {
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
__u64 bpf_cookie;
/* function's offset to install kprobe to */
- unsigned long offset;
+ size_t offset;
/* kprobe is return probe */
bool retprobe;
size_t :0;
@@ -481,9 +481,13 @@ struct bpf_map_def {
unsigned int map_flags;
};
-/*
- * The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel,
- * so no need to worry about a name clash.
+/**
+ * @brief **bpf_object__find_map_by_name()** returns BPF map of
+ * the given name, if it exists within the passed BPF object
+ * @param obj BPF object
+ * @param name name of the BPF map
+ * @return BPF map instance, if such map exists within the BPF object;
+ * or NULL otherwise.
*/
LIBBPF_API struct bpf_map *
bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
@@ -509,7 +513,12 @@ bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
LIBBPF_API struct bpf_map *
bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
-/* get/set map FD */
+/**
+ * @brief **bpf_map__fd()** gets the file descriptor of the passed
+ * BPF map
+ * @param map the BPF map instance
+ * @return the file descriptor; or -EINVAL in case of an error
+ */
LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
/* get map definition */
@@ -550,6 +559,14 @@ LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__is_internal()** tells the caller whether or not the
+ * passed map is a special map created by libbpf automatically for things like
+ * global variables, __ksym externs, Kconfig values, etc
+ * @param map the bpf_map
+ * @return true, if the map is an internal map; false, otherwise
+ */
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
@@ -561,6 +578,38 @@ LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
+/**
+ * @brief **libbpf_get_error()** extracts the error code from the passed
+ * pointer
+ * @param ptr pointer returned from libbpf API function
+ * @return error code; or 0 if no error occured
+ *
+ * Many libbpf API functions which return pointers have logic to encode error
+ * codes as pointers, and do not return NULL. Meaning **libbpf_get_error()**
+ * should be used on the return value from these functions immediately after
+ * calling the API function, with no intervening calls that could clobber the
+ * `errno` variable. Consult the individual functions documentation to verify
+ * if this logic applies should be used.
+ *
+ * For these API functions, if `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)`
+ * is enabled, NULL is returned on error instead.
+ *
+ * If ptr is NULL, then errno should be already set by the failing
+ * API, because libbpf never returns NULL on success and it now always
+ * sets errno on error.
+ *
+ * Example usage:
+ *
+ * struct perf_buffer *pb;
+ *
+ * pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES, &opts);
+ * err = libbpf_get_error(pb);
+ * if (err) {
+ * pb = NULL;
+ * fprintf(stderr, "failed to open perf buffer: %d\n", err);
+ * goto cleanup;
+ * }
+ */
LIBBPF_API long libbpf_get_error(const void *ptr);
struct bpf_prog_load_attr {
@@ -825,9 +874,10 @@ bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
LIBBPF_API void
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
-/*
- * A helper function to get the number of possible CPUs before looking up
- * per-CPU maps. Negative errno is returned on failure.
+/**
+ * @brief **libbpf_num_possible_cpus()** is a helper function to get the
+ * number of possible CPUs that the host kernel supports and expects.
+ * @return number of possible CPUs; or error code on failure
*
* Example usage:
*
@@ -837,7 +887,6 @@ bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
* }
* long values[ncpus];
* bpf_map_lookup_elem(per_cpu_map_fd, key, values);
- *
*/
LIBBPF_API int libbpf_num_possible_cpus(void);
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index ceb0c98979bc..ec79400517d4 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -89,6 +89,13 @@
(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
#endif
+/* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is
+ * a string literal known at compilation time or char * pointer known only at
+ * runtime.
+ */
+#define str_has_pfx(str, pfx) \
+ (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
+
/* Symbol versioning is different between static and shared library.
* Properly versioned symbols are needed for shared library, but
* only the symbol of the new version is needed for static library.
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
index df0d03dcffab..74e6f860f703 100644
--- a/tools/lib/bpf/libbpf_legacy.h
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -46,6 +46,15 @@ enum libbpf_strict_mode {
*/
LIBBPF_STRICT_DIRECT_ERRS = 0x02,
+ /*
+ * Enforce strict BPF program section (SEC()) names.
+ * E.g., while prefiously SEC("xdp_whatever") or SEC("perf_event_blah") were
+ * allowed, with LIBBPF_STRICT_SEC_PREFIX this will become
+ * unrecognized by libbpf and would have to be just SEC("xdp") and
+ * SEC("xdp") and SEC("perf_event").
+ */
+ LIBBPF_STRICT_SEC_NAME = 0x04,
+
__LIBBPF_STRICT_LAST,
};
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 10911a8cad0f..2df880cefdae 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -1649,11 +1649,17 @@ static bool btf_is_non_static(const struct btf_type *t)
static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name,
int *out_btf_sec_id, int *out_btf_id)
{
- int i, j, n = btf__get_nr_types(obj->btf), m, btf_id = 0;
+ int i, j, n, m, btf_id = 0;
const struct btf_type *t;
const struct btf_var_secinfo *vi;
const char *name;
+ if (!obj->btf) {
+ pr_warn("failed to find BTF info for object '%s'\n", obj->filename);
+ return -EINVAL;
+ }
+
+ n = btf__get_nr_types(obj->btf);
for (i = 1; i <= n; i++) {
t = btf__type_by_id(obj->btf, i);
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
index b22b50c1b173..9cf66702fa8d 100644
--- a/tools/lib/bpf/skel_internal.h
+++ b/tools/lib/bpf/skel_internal.h
@@ -105,10 +105,12 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr));
if (err < 0 || (int)attr.test.retval < 0) {
opts->errstr = "failed to execute loader prog";
- if (err < 0)
+ if (err < 0) {
err = -errno;
- else
+ } else {
err = (int)attr.test.retval;
+ errno = -err;
+ }
goto out;
}
err = 0;
diff --git a/tools/lib/bpf/strset.c b/tools/lib/bpf/strset.c
index 1fb8b49de1d6..ea655318153f 100644
--- a/tools/lib/bpf/strset.c
+++ b/tools/lib/bpf/strset.c
@@ -88,6 +88,7 @@ void strset__free(struct strset *set)
hashmap__free(set->strs_hash);
free(set->strs_data);
+ free(set);
}
size_t strset__data_size(const struct strset *set)
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index d8886720e83d..8441e3e1aaac 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
free(evsel);
}
-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
#define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
int cpu, thread;
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd)
+ *fd = -1;
}
}
}
@@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
{
struct perf_evsel *leader = evsel->leader;
- int fd;
+ int *fd;
if (evsel == leader) {
*group_fd = -1;
@@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
return -ENOTCONN;
fd = FD(leader, cpu, thread);
- if (fd == -1)
+ if (fd == NULL || *fd == -1)
return -EBADF;
- *group_fd = fd;
+ *group_fd = *fd;
return 0;
}
@@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
for (cpu = 0; cpu < cpus->nr; cpu++) {
for (thread = 0; thread < threads->nr; thread++) {
- int fd, group_fd;
+ int fd, group_fd, *evsel_fd;
+
+ evsel_fd = FD(evsel, cpu, thread);
+ if (evsel_fd == NULL)
+ return -EINVAL;
err = get_group_fd(evsel, cpu, thread, &group_fd);
if (err < 0)
@@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
if (fd < 0)
return -errno;
- FD(evsel, cpu, thread) = fd;
+ *evsel_fd = fd;
}
}
@@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
- if (FD(evsel, cpu, thread) >= 0)
- close(FD(evsel, cpu, thread));
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd && *fd >= 0) {
+ close(*fd);
+ *fd = -1;
+ }
}
}
@@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread);
- struct perf_mmap *map = MMAP(evsel, cpu, thread);
+ int *fd = FD(evsel, cpu, thread);
- if (fd < 0)
+ if (fd == NULL || *fd < 0)
continue;
- perf_mmap__munmap(map);
+ perf_mmap__munmap(MMAP(evsel, cpu, thread));
}
}
@@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread);
- struct perf_mmap *map = MMAP(evsel, cpu, thread);
+ int *fd = FD(evsel, cpu, thread);
+ struct perf_mmap *map;
- if (fd < 0)
+ if (fd == NULL || *fd < 0)
continue;
+ map = MMAP(evsel, cpu, thread);
perf_mmap__init(map, NULL, false, NULL);
- ret = perf_mmap__mmap(map, &mp, fd, cpu);
+ ret = perf_mmap__mmap(map, &mp, *fd, cpu);
if (ret) {
perf_evsel__munmap(evsel);
return ret;
@@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
{
- if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
return NULL;
return MMAP(evsel, cpu, thread)->base;
@@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
struct perf_counts_values *count)
{
size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu, thread);
memset(count, 0, sizeof(*count));
- if (FD(evsel, cpu, thread) < 0)
+ if (fd == NULL || *fd < 0)
return -EINVAL;
if (MMAP(evsel, cpu, thread) &&
!perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
return 0;
- if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
+ if (readn(*fd, count->values, size) <= 0)
return -errno;
return 0;
@@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread),
- err = ioctl(fd, ioc, arg);
+ int err;
+ int *fd = FD(evsel, cpu, thread);
+
+ if (fd == NULL || *fd < 0)
+ return -1;
+
+ err = ioctl(*fd, ioc, arg);
if (err)
return err;
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
index c67c83399170..ce91a582f0e4 100644
--- a/tools/lib/perf/tests/test-evlist.c
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -40,7 +40,7 @@ static int test_stat_cpu(void)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_TASK_CLOCK,
};
- int err, cpu, tmp;
+ int err, idx;
cpus = perf_cpu_map__new(NULL);
__T("failed to create cpus", cpus);
@@ -70,10 +70,10 @@ static int test_stat_cpu(void)
perf_evlist__for_each_evsel(evlist, evsel) {
cpus = perf_evsel__cpus(evsel);
- perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
struct perf_counts_values counts = { .val = 0 };
- perf_evsel__read(evsel, cpu, 0, &counts);
+ perf_evsel__read(evsel, idx, 0, &counts);
__T("failed to read value for evsel", counts.val != 0);
}
}
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index a184e4861627..33ae9334861a 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -22,7 +22,7 @@ static int test_stat_cpu(void)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
- int err, cpu, tmp;
+ int err, idx;
cpus = perf_cpu_map__new(NULL);
__T("failed to create cpus", cpus);
@@ -33,10 +33,10 @@ static int test_stat_cpu(void)
err = perf_evsel__open(evsel, cpus, NULL);
__T("failed to open evsel", err == 0);
- perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
struct perf_counts_values counts = { .val = 0 };
- perf_evsel__read(evsel, cpu, 0, &counts);
+ perf_evsel__read(evsel, idx, 0, &counts);
__T("failed to read value for evsel", counts.val != 0);
}
@@ -148,6 +148,7 @@ static int test_stat_user_read(int event)
__T("failed to mmap evsel", err == 0);
pc = perf_evsel__mmap_base(evsel, 0, 0);
+ __T("failed to get mmapped address", pc);
#if defined(__i386__) || defined(__x86_64__)
__T("userspace counter access not supported", pc->cap_user_rdpmc);
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index bc821056aba9..0893436cc09f 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
sec = find_section_by_name(elf, ".altinstructions");
if (!sec) {
sec = elf_create_section(elf, ".altinstructions",
- SHF_ALLOC, size, 0);
+ SHF_ALLOC, 0, 0);
if (!sec) {
WARN_ELF("elf_create_section");
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index e5947fbb9e7a..06b5c164ae93 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -292,7 +292,7 @@ static int decode_instructions(struct objtool_file *file)
!strcmp(sec->name, ".entry.text"))
sec->noinstr = true;
- for (offset = 0; offset < sec->len; offset += insn->len) {
+ for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
insn = malloc(sizeof(*insn));
if (!insn) {
WARN("malloc failed");
@@ -307,7 +307,7 @@ static int decode_instructions(struct objtool_file *file)
insn->offset = offset;
ret = arch_decode_instruction(file->elf, sec, offset,
- sec->len - offset,
+ sec->sh.sh_size - offset,
&insn->len, &insn->type,
&insn->immediate,
&insn->stack_ops);
@@ -349,9 +349,9 @@ static struct instruction *find_last_insn(struct objtool_file *file,
{
struct instruction *insn = NULL;
unsigned int offset;
- unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
+ unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
- for (offset = sec->len - 1; offset >= end && !insn; offset--)
+ for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
insn = find_insn(file, sec, offset);
return insn;
@@ -389,7 +389,7 @@ static int add_dead_ends(struct objtool_file *file)
insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (insn)
insn = list_prev_entry(insn, list);
- else if (reloc->addend == reloc->sym->sec->len) {
+ else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find unreachable insn at %s+0x%x",
@@ -424,7 +424,7 @@ reachable:
insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (insn)
insn = list_prev_entry(insn, list);
- else if (reloc->addend == reloc->sym->sec->len) {
+ else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
WARN("can't find reachable insn at %s+0x%x",
@@ -1561,14 +1561,14 @@ static int read_unwind_hints(struct objtool_file *file)
return -1;
}
- if (sec->len % sizeof(struct unwind_hint)) {
+ if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
WARN("struct unwind_hint size mismatch");
return -1;
}
file->hints = true;
- for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
+ for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
hint = (struct unwind_hint *)sec->data->d_buf + i;
reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 8676c7598728..fee03b744a6e 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -286,10 +286,9 @@ static int read_sections(struct elf *elf)
return -1;
}
}
- sec->len = sec->sh.sh_size;
if (sec->sh.sh_flags & SHF_EXECINSTR)
- elf->text_size += sec->len;
+ elf->text_size += sec->sh.sh_size;
list_add_tail(&sec->list, &elf->sections);
elf_hash_add(section, &sec->hash, sec->idx);
@@ -509,6 +508,7 @@ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
list_add_tail(&reloc->list, &sec->reloc->reloc_list);
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
+ sec->reloc->sh.sh_size += sec->reloc->sh.sh_entsize;
sec->reloc->changed = true;
return 0;
@@ -734,8 +734,8 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
data->d_size = strlen(str) + 1;
data->d_align = 1;
- len = strtab->len;
- strtab->len += data->d_size;
+ len = strtab->sh.sh_size;
+ strtab->sh.sh_size += data->d_size;
strtab->changed = true;
return len;
@@ -790,9 +790,9 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
data->d_align = 1;
data->d_type = ELF_T_SYM;
- sym->idx = symtab->len / sizeof(sym->sym);
+ sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
- symtab->len += data->d_size;
+ symtab->sh.sh_size += data->d_size;
symtab->changed = true;
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -814,7 +814,7 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
data->d_align = 4;
data->d_type = ELF_T_WORD;
- symtab_shndx->len += 4;
+ symtab_shndx->sh.sh_size += 4;
symtab_shndx->changed = true;
}
@@ -855,7 +855,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
}
sec->idx = elf_ndxscn(s);
- sec->len = size;
sec->changed = true;
sec->data = elf_newdata(s);
@@ -979,63 +978,63 @@ static struct section *elf_create_reloc_section(struct elf *elf,
}
}
-static int elf_rebuild_rel_reloc_section(struct section *sec, int nr)
+static int elf_rebuild_rel_reloc_section(struct section *sec)
{
struct reloc *reloc;
- int idx = 0, size;
+ int idx = 0;
void *buf;
/* Allocate a buffer for relocations */
- size = nr * sizeof(GElf_Rel);
- buf = malloc(size);
+ buf = malloc(sec->sh.sh_size);
if (!buf) {
perror("malloc");
return -1;
}
sec->data->d_buf = buf;
- sec->data->d_size = size;
+ sec->data->d_size = sec->sh.sh_size;
sec->data->d_type = ELF_T_REL;
- sec->sh.sh_size = size;
-
idx = 0;
list_for_each_entry(reloc, &sec->reloc_list, list) {
reloc->rel.r_offset = reloc->offset;
reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
- gelf_update_rel(sec->data, idx, &reloc->rel);
+ if (!gelf_update_rel(sec->data, idx, &reloc->rel)) {
+ WARN_ELF("gelf_update_rel");
+ return -1;
+ }
idx++;
}
return 0;
}
-static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
+static int elf_rebuild_rela_reloc_section(struct section *sec)
{
struct reloc *reloc;
- int idx = 0, size;
+ int idx = 0;
void *buf;
/* Allocate a buffer for relocations with addends */
- size = nr * sizeof(GElf_Rela);
- buf = malloc(size);
+ buf = malloc(sec->sh.sh_size);
if (!buf) {
perror("malloc");
return -1;
}
sec->data->d_buf = buf;
- sec->data->d_size = size;
+ sec->data->d_size = sec->sh.sh_size;
sec->data->d_type = ELF_T_RELA;
- sec->sh.sh_size = size;
-
idx = 0;
list_for_each_entry(reloc, &sec->reloc_list, list) {
reloc->rela.r_offset = reloc->offset;
reloc->rela.r_addend = reloc->addend;
reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
- gelf_update_rela(sec->data, idx, &reloc->rela);
+ if (!gelf_update_rela(sec->data, idx, &reloc->rela)) {
+ WARN_ELF("gelf_update_rela");
+ return -1;
+ }
idx++;
}
@@ -1044,16 +1043,9 @@ static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
{
- struct reloc *reloc;
- int nr;
-
- nr = 0;
- list_for_each_entry(reloc, &sec->reloc_list, list)
- nr++;
-
switch (sec->sh.sh_type) {
- case SHT_REL: return elf_rebuild_rel_reloc_section(sec, nr);
- case SHT_RELA: return elf_rebuild_rela_reloc_section(sec, nr);
+ case SHT_REL: return elf_rebuild_rel_reloc_section(sec);
+ case SHT_RELA: return elf_rebuild_rela_reloc_section(sec);
default: return -1;
}
}
@@ -1113,12 +1105,6 @@ int elf_write(struct elf *elf)
/* Update changed relocation sections and section headers: */
list_for_each_entry(sec, &elf->sections, list) {
if (sec->changed) {
- if (sec->base &&
- elf_rebuild_reloc_section(elf, sec)) {
- WARN("elf_rebuild_reloc_section");
- return -1;
- }
-
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
WARN_ELF("elf_getscn");
@@ -1129,6 +1115,12 @@ int elf_write(struct elf *elf)
return -1;
}
+ if (sec->base &&
+ elf_rebuild_reloc_section(elf, sec)) {
+ WARN("elf_rebuild_reloc_section");
+ return -1;
+ }
+
sec->changed = false;
elf->changed = true;
}
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index e34395047530..075d8291b854 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -38,7 +38,6 @@ struct section {
Elf_Data *data;
char *name;
int idx;
- unsigned int len;
bool changed, text, rodata, noinstr;
};
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index dc9b7dd314b0..b5865e2450cb 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -204,7 +204,7 @@ int orc_create(struct objtool_file *file)
/* Add a section terminator */
if (!empty) {
- orc_list_add(&orc_list, &null, sec, sec->len);
+ orc_list_add(&orc_list, &null, sec, sec->sh.sh_size);
nr++;
}
}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index bc925cf19e2d..06c3eacab3d5 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -58,6 +58,13 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
{
}
+static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
+ unsigned long *off)
+{
+ *sec = reloc->sym->sec;
+ *off = reloc->sym->offset + reloc->addend;
+}
+
static int get_alt_entry(struct elf *elf, struct special_entry *entry,
struct section *sec, int idx,
struct special_alt *alt)
@@ -91,14 +98,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
return -1;
}
- if (orig_reloc->sym->type != STT_SECTION) {
- WARN_FUNC("don't know how to handle non-section reloc symbol %s",
- sec, offset + entry->orig, orig_reloc->sym->name);
- return -1;
- }
- alt->orig_sec = orig_reloc->sym->sec;
- alt->orig_off = orig_reloc->addend;
+ reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
if (!entry->group || alt->new_len) {
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
@@ -116,8 +117,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
if (arch_is_retpoline(new_reloc->sym))
return 1;
- alt->new_sec = new_reloc->sym->sec;
- alt->new_off = (unsigned int)new_reloc->addend;
+ reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
/* _ASM_EXTABLE_EX hack */
if (alt->new_off >= 0x7ffffff0)
@@ -159,13 +159,13 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
if (!sec)
continue;
- if (sec->len % entry->size != 0) {
+ if (sec->sh.sh_size % entry->size != 0) {
WARN("%s size not a multiple of %d",
sec->name, entry->size);
return -1;
}
- nr_entries = sec->len / entry->size;
+ nr_entries = sec->sh.sh_size / entry->size;
for (idx = 0; idx < nr_entries; idx++) {
alt = malloc(sizeof(*alt));
diff --git a/tools/perf/Documentation/jitdump-specification.txt b/tools/perf/Documentation/jitdump-specification.txt
index 52152d156ad9..79936355d819 100644
--- a/tools/perf/Documentation/jitdump-specification.txt
+++ b/tools/perf/Documentation/jitdump-specification.txt
@@ -164,7 +164,7 @@ const char unwinding_data[n]: an array of unwinding data, consisting of the EH F
The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
-The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
+The EH Frame follows the LSB specification as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index de6beedb7283..3b6a2c84ea02 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -261,7 +261,7 @@ COALESCE
User can specify how to sort offsets for cacheline.
Following fields are available and governs the final
-output fields set for caheline offsets output:
+output fields set for cacheline offsets output:
tid - coalesced by process TIDs
pid - coalesced by process PIDs
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index 184ba62420f0..db465fa7ee91 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -883,7 +883,7 @@ and "r" can be combined to get calls and returns.
"Transactions" events correspond to the start or end of transactions. The
'flags' field can be used in perf script to determine whether the event is a
-tranasaction start, commit or abort.
+transaction start, commit or abort.
Note that "instructions", "branches" and "transactions" events depend on code
flow packets which can be disabled by using the config term "branch=0". Refer
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 74d774592196..1b4d452923d7 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -44,7 +44,7 @@ COMMON OPTIONS
-f::
--force::
- Don't complan, do it.
+ Don't complain, do it.
REPORT OPTIONS
--------------
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index 5a1f68122f50..fa4f39d305a7 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -54,7 +54,7 @@ all sched_wakeup events in the system:
Traces meant to be processed using a script should be recorded with
the above option: -a to enable system-wide collection.
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
----
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 0250dc61cf98..cf4b7f4b625a 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -448,7 +448,7 @@ all sched_wakeup events in the system:
Traces meant to be processed using a script should be recorded with
the above option: -a to enable system-wide collection.
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
----
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 4c9310be6acc..7e6fb7cbc0f4 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -385,7 +385,7 @@ Aggregate counts per physical processor for system-wide mode measurements.
Print metrics or metricgroups specified in a comma separated list.
For a group all metrics from the group are added.
The events from the metrics are automatically measured.
-See perf list output for the possble metrics and metricgroups.
+See perf list output for the possible metrics and metricgroups.
-A::
--no-aggr::
diff --git a/tools/perf/Documentation/topdown.txt b/tools/perf/Documentation/topdown.txt
index c6302df4cf29..a15b93fdcf50 100644
--- a/tools/perf/Documentation/topdown.txt
+++ b/tools/perf/Documentation/topdown.txt
@@ -2,7 +2,7 @@ Using TopDown metrics in user space
-----------------------------------
Intel CPUs (since Sandy Bridge and Silvermont) support a TopDown
-methology to break down CPU pipeline execution into 4 bottlenecks:
+methodology to break down CPU pipeline execution into 4 bottlenecks:
frontend bound, backend bound, bad speculation, retiring.
For more details on Topdown see [1][5]
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 446180401e26..14e3e8d702a0 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -143,7 +143,7 @@ FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
ifdef CSINCLUDES
LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
endif
-OPENCSDLIBS := -lopencsd_c_api -lopencsd
+OPENCSDLIBS := -lopencsd_c_api -lopencsd -lstdc++
ifdef CSLIBS
LIBOPENCSD_LDFLAGS := -L$(CSLIBS)
endif
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index e04313c4d840..5cd702062a04 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -802,7 +802,7 @@ endif
$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
-LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
+LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(filter-out -static,$(LDFLAGS))'
$(LIBTRACEEVENT): FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index c7c7ec0812d5..5fc6a2a3dbc5 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -8,10 +8,10 @@
#include <linux/coresight-pmu.h>
#include <linux/zalloc.h>
-#include "../../util/auxtrace.h"
-#include "../../util/debug.h"
-#include "../../util/evlist.h"
-#include "../../util/pmu.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/debug.h"
+#include "../../../util/evlist.h"
+#include "../../../util/pmu.h"
#include "cs-etm.h"
#include "arm-spe.h"
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 515aae470e23..293a23bf8be3 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -16,19 +16,19 @@
#include <linux/zalloc.h>
#include "cs-etm.h"
-#include "../../util/debug.h"
-#include "../../util/record.h"
-#include "../../util/auxtrace.h"
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evlist.h"
-#include "../../util/evsel.h"
-#include "../../util/perf_api_probe.h"
-#include "../../util/evsel_config.h"
-#include "../../util/pmu.h"
-#include "../../util/cs-etm.h"
+#include "../../../util/debug.h"
+#include "../../../util/record.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/perf_api_probe.h"
+#include "../../../util/evsel_config.h"
+#include "../../../util/pmu.h"
+#include "../../../util/cs-etm.h"
#include <internal/lib.h> // page_size
-#include "../../util/session.h"
+#include "../../../util/session.h"
#include <errno.h>
#include <stdlib.h>
diff --git a/tools/perf/arch/arm/util/perf_regs.c b/tools/perf/arch/arm/util/perf_regs.c
index 2864e2e3776d..2833e101a7c6 100644
--- a/tools/perf/arch/arm/util/perf_regs.c
+++ b/tools/perf/arch/arm/util/perf_regs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include "../../util/perf_regs.h"
+#include "../../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index bbc297a7e2e3..b8b23b9dc598 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -10,7 +10,7 @@
#include <linux/string.h>
#include "arm-spe.h"
-#include "../../util/pmu.h"
+#include "../../../util/pmu.h"
struct perf_event_attr
*perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
diff --git a/tools/perf/arch/arm/util/unwind-libdw.c b/tools/perf/arch/arm/util/unwind-libdw.c
index 36ba4c69c3c5..b7692cb0c733 100644
--- a/tools/perf/arch/arm/util/unwind-libdw.c
+++ b/tools/perf/arch/arm/util/unwind-libdw.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/event.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
diff --git a/tools/perf/arch/arm/util/unwind-libunwind.c b/tools/perf/arch/arm/util/unwind-libunwind.c
index 3a550225dfaf..438906bf0014 100644
--- a/tools/perf/arch/arm/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm/util/unwind-libunwind.c
@@ -3,8 +3,8 @@
#include <errno.h>
#include <libunwind.h>
#include "perf_regs.h"
-#include "../../util/unwind.h"
-#include "../../util/debug.h"
+#include "../../../util/unwind.h"
+#include "../../../util/debug.h"
int libunwind__arch_reg_id(int regnum)
{
diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c
index eeafe97b8105..792cd75ade33 100644
--- a/tools/perf/arch/x86/util/iostat.c
+++ b/tools/perf/arch/x86/util/iostat.c
@@ -432,7 +432,7 @@ void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
u8 die = ((struct iio_root_port *)evsel->priv)->die;
struct perf_counts_values *count = perf_counts(evsel->counts, die, 0);
- if (count->run && count->ena) {
+ if (count && count->run && count->ena) {
if (evsel->prev_raw_counts && !out->force_header) {
struct perf_counts_values *prev_count =
perf_counts(evsel->prev_raw_counts, die, 0);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 0e824f7d8b19..6211d0b84b7a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -368,16 +368,6 @@ static inline int output_type(unsigned int type)
return OUTPUT_TYPE_OTHER;
}
-static inline unsigned int attr_type(unsigned int type)
-{
- switch (type) {
- case OUTPUT_TYPE_SYNTH:
- return PERF_TYPE_SYNTH;
- default:
- return type;
- }
-}
-
static bool output_set_by_user(void)
{
int j;
@@ -556,6 +546,18 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
}
+static struct evsel *find_first_output_type(struct evlist *evlist,
+ unsigned int type)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (output_type(evsel->core.attr.type) == (int)type)
+ return evsel;
+ }
+ return NULL;
+}
+
/*
* verify all user requested events exist and the samples
* have the expected data
@@ -567,7 +569,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
struct evsel *evsel;
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
- evsel = perf_session__find_first_evtype(session, attr_type(j));
+ evsel = find_first_output_type(session->evlist, j);
/*
* even if fields is set to 0 (ie., show nothing) event must
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index f6e87b7be5fa..f0ecfda34ece 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -2408,6 +2408,8 @@ int cmd_stat(int argc, const char **argv)
goto out;
} else if (verbose)
iostat_list(evsel_list, &stat_config);
+ if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
+ target.system_wide = true;
}
if (add_default_attributes())
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
index 84a0cedf1fd9..f1f2965f6775 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -1046,7 +1046,7 @@
{
"EventCode": "0x4e010",
"EventName": "PM_GCT_NOSLOT_IC_L3MISS",
- "BriefDescription": "Gct empty for this thread due to icach l3 miss",
+ "BriefDescription": "Gct empty for this thread due to icache l3 miss",
"PublicDescription": ""
},
{
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 6731b3cf0c2f..7c887d37b893 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -1285,6 +1285,7 @@ int main(int argc, char *argv[])
}
free_arch_std_events();
+ free_sys_event_tables();
free(mapfile);
return 0;
@@ -1306,6 +1307,7 @@ err_close_eventsfp:
create_empty_mapping(output_file);
err_out:
free_arch_std_events();
+ free_sys_event_tables();
free(mapfile);
return ret;
}
diff --git a/tools/perf/tests/attr/test-stat-default b/tools/perf/tests/attr/test-stat-default
index d9e99b3f77e6..d8ea6a88163f 100644
--- a/tools/perf/tests/attr/test-stat-default
+++ b/tools/perf/tests/attr/test-stat-default
@@ -68,3 +68,100 @@ fd=10
type=0
config=5
optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-1 b/tools/perf/tests/attr/test-stat-detailed-1
index 8b04a055d154..b656ab93c5bf 100644
--- a/tools/perf/tests/attr/test-stat-detailed-1
+++ b/tools/perf/tests/attr/test-stat-detailed-1
@@ -70,12 +70,109 @@ type=0
config=5
optional=1
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event11:base-stat]
-fd=11
+[event20:base-stat]
+fd=20
type=3
config=0
optional=1
@@ -84,8 +181,8 @@ optional=1
# PERF_COUNT_HW_CACHE_L1D << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event12:base-stat]
-fd=12
+[event21:base-stat]
+fd=21
type=3
config=65536
optional=1
@@ -94,8 +191,8 @@ optional=1
# PERF_COUNT_HW_CACHE_LL << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event13:base-stat]
-fd=13
+[event22:base-stat]
+fd=22
type=3
config=2
optional=1
@@ -104,8 +201,8 @@ optional=1
# PERF_COUNT_HW_CACHE_LL << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event14:base-stat]
-fd=14
+[event23:base-stat]
+fd=23
type=3
config=65538
optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-2 b/tools/perf/tests/attr/test-stat-detailed-2
index 4fca9f1bfbf8..97625090a1c4 100644
--- a/tools/perf/tests/attr/test-stat-detailed-2
+++ b/tools/perf/tests/attr/test-stat-detailed-2
@@ -70,12 +70,109 @@ type=0
config=5
optional=1
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event11:base-stat]
-fd=11
+[event20:base-stat]
+fd=20
type=3
config=0
optional=1
@@ -84,8 +181,8 @@ optional=1
# PERF_COUNT_HW_CACHE_L1D << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event12:base-stat]
-fd=12
+[event21:base-stat]
+fd=21
type=3
config=65536
optional=1
@@ -94,8 +191,8 @@ optional=1
# PERF_COUNT_HW_CACHE_LL << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event13:base-stat]
-fd=13
+[event22:base-stat]
+fd=22
type=3
config=2
optional=1
@@ -104,8 +201,8 @@ optional=1
# PERF_COUNT_HW_CACHE_LL << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event14:base-stat]
-fd=14
+[event23:base-stat]
+fd=23
type=3
config=65538
optional=1
@@ -114,8 +211,8 @@ optional=1
# PERF_COUNT_HW_CACHE_L1I << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event15:base-stat]
-fd=15
+[event24:base-stat]
+fd=24
type=3
config=1
optional=1
@@ -124,8 +221,8 @@ optional=1
# PERF_COUNT_HW_CACHE_L1I << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event16:base-stat]
-fd=16
+[event25:base-stat]
+fd=25
type=3
config=65537
optional=1
@@ -134,8 +231,8 @@ optional=1
# PERF_COUNT_HW_CACHE_DTLB << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event17:base-stat]
-fd=17
+[event26:base-stat]
+fd=26
type=3
config=3
optional=1
@@ -144,8 +241,8 @@ optional=1
# PERF_COUNT_HW_CACHE_DTLB << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event18:base-stat]
-fd=18
+[event27:base-stat]
+fd=27
type=3
config=65539
optional=1
@@ -154,8 +251,8 @@ optional=1
# PERF_COUNT_HW_CACHE_ITLB << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event19:base-stat]
-fd=19
+[event28:base-stat]
+fd=28
type=3
config=4
optional=1
@@ -164,8 +261,8 @@ optional=1
# PERF_COUNT_HW_CACHE_ITLB << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event20:base-stat]
-fd=20
+[event29:base-stat]
+fd=29
type=3
config=65540
optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-3 b/tools/perf/tests/attr/test-stat-detailed-3
index 4bb58e1c82a6..d555042e3fbf 100644
--- a/tools/perf/tests/attr/test-stat-detailed-3
+++ b/tools/perf/tests/attr/test-stat-detailed-3
@@ -70,12 +70,109 @@ type=0
config=5
optional=1
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event11:base-stat]
-fd=11
+[event20:base-stat]
+fd=20
type=3
config=0
optional=1
@@ -84,8 +181,8 @@ optional=1
# PERF_COUNT_HW_CACHE_L1D << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event12:base-stat]
-fd=12
+[event21:base-stat]
+fd=21
type=3
config=65536
optional=1
@@ -94,8 +191,8 @@ optional=1
# PERF_COUNT_HW_CACHE_LL << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event13:base-stat]
-fd=13
+[event22:base-stat]
+fd=22
type=3
config=2
optional=1
@@ -104,8 +201,8 @@ optional=1
# PERF_COUNT_HW_CACHE_LL << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event14:base-stat]
-fd=14
+[event23:base-stat]
+fd=23
type=3
config=65538
optional=1
@@ -114,8 +211,8 @@ optional=1
# PERF_COUNT_HW_CACHE_L1I << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event15:base-stat]
-fd=15
+[event24:base-stat]
+fd=24
type=3
config=1
optional=1
@@ -124,8 +221,8 @@ optional=1
# PERF_COUNT_HW_CACHE_L1I << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event16:base-stat]
-fd=16
+[event25:base-stat]
+fd=25
type=3
config=65537
optional=1
@@ -134,8 +231,8 @@ optional=1
# PERF_COUNT_HW_CACHE_DTLB << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event17:base-stat]
-fd=17
+[event26:base-stat]
+fd=26
type=3
config=3
optional=1
@@ -144,8 +241,8 @@ optional=1
# PERF_COUNT_HW_CACHE_DTLB << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event18:base-stat]
-fd=18
+[event27:base-stat]
+fd=27
type=3
config=65539
optional=1
@@ -154,8 +251,8 @@ optional=1
# PERF_COUNT_HW_CACHE_ITLB << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event19:base-stat]
-fd=19
+[event28:base-stat]
+fd=28
type=3
config=4
optional=1
@@ -164,8 +261,8 @@ optional=1
# PERF_COUNT_HW_CACHE_ITLB << 0 |
# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event20:base-stat]
-fd=20
+[event29:base-stat]
+fd=29
type=3
config=65540
optional=1
@@ -174,8 +271,8 @@ optional=1
# PERF_COUNT_HW_CACHE_L1D << 0 |
# (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
-[event21:base-stat]
-fd=21
+[event30:base-stat]
+fd=30
type=3
config=512
optional=1
@@ -184,8 +281,8 @@ optional=1
# PERF_COUNT_HW_CACHE_L1D << 0 |
# (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
-[event22:base-stat]
-fd=22
+[event31:base-stat]
+fd=31
type=3
config=66048
optional=1
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 9866cddebf23..9b4a765e4b73 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -229,8 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
struct thread *thread, struct state *state)
{
struct addr_location al;
- unsigned char buf1[BUFSZ];
- unsigned char buf2[BUFSZ];
+ unsigned char buf1[BUFSZ] = {0};
+ unsigned char buf2[BUFSZ] = {0};
size_t ret_len;
u64 objdump_addr;
const char *objdump_name;
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index a288035eb362..c756284b3b13 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -20,6 +20,23 @@
/* For bsearch. We try to unwind functions in shared object. */
#include <stdlib.h>
+/*
+ * The test will assert frames are on the stack but tail call optimizations lose
+ * the frame of the caller. Clang can disable this optimization on a called
+ * function but GCC currently (11/2020) lacks this attribute. The barrier is
+ * used to inhibit tail calls in these cases.
+ */
+#ifdef __has_attribute
+#if __has_attribute(disable_tail_calls)
+#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
+#define NO_TAIL_CALL_BARRIER
+#endif
+#endif
+#ifndef NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
+#endif
+
static int mmap_handler(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
@@ -91,7 +108,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
return strcmp((const char *) symbol, funcs[idx]);
}
-noinline int test_dwarf_unwind__thread(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
{
struct perf_sample sample;
unsigned long cnt = 0;
@@ -122,7 +139,7 @@ noinline int test_dwarf_unwind__thread(struct thread *thread)
static int global_unwind_retval = -INT_MAX;
-noinline int test_dwarf_unwind__compare(void *p1, void *p2)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
{
/* Any possible value should be 'thread' */
struct thread *thread = *(struct thread **)p1;
@@ -141,7 +158,7 @@ noinline int test_dwarf_unwind__compare(void *p1, void *p2)
return p1 - p2;
}
-noinline int test_dwarf_unwind__krava_3(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
{
struct thread *array[2] = {thread, thread};
void *fp = &bsearch;
@@ -160,14 +177,22 @@ noinline int test_dwarf_unwind__krava_3(struct thread *thread)
return global_unwind_retval;
}
-noinline int test_dwarf_unwind__krava_2(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
{
- return test_dwarf_unwind__krava_3(thread);
+ int ret;
+
+ ret = test_dwarf_unwind__krava_3(thread);
+ NO_TAIL_CALL_BARRIER;
+ return ret;
}
-noinline int test_dwarf_unwind__krava_1(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
{
- return test_dwarf_unwind__krava_2(thread);
+ int ret;
+
+ ret = test_dwarf_unwind__krava_2(thread);
+ NO_TAIL_CALL_BARRIER;
+ return ret;
}
int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 781afe42e90e..fa5bd5c20e96 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -757,25 +757,40 @@ void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
}
void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
- unsigned int row, bool arrow_down)
+ unsigned int row, int diff, bool arrow_down)
{
- unsigned int end_row;
+ int end_row;
- if (row >= browser->top_idx)
- end_row = row - browser->top_idx;
- else
+ if (diff <= 0)
return;
SLsmg_set_char_set(1);
if (arrow_down) {
+ if (row + diff <= browser->top_idx)
+ return;
+
+ end_row = row + diff - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
- SLsmg_write_char(SLSMG_ULCORN_CHAR);
- ui_browser__gotorc(browser, end_row, column);
- SLsmg_draw_hline(2);
- ui_browser__gotorc(browser, end_row + 1, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
+
+ while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) {
+ ui_browser__gotorc(browser, end_row, column - 1);
+ SLsmg_draw_vline(1);
+ }
+
+ end_row = (int)(row - browser->top_idx);
+ if (end_row >= 0) {
+ ui_browser__gotorc(browser, end_row, column - 1);
+ SLsmg_write_char(SLSMG_ULCORN_CHAR);
+ ui_browser__gotorc(browser, end_row, column);
+ SLsmg_draw_hline(2);
+ }
} else {
+ if (row < browser->top_idx)
+ return;
+
+ end_row = row - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
ui_browser__gotorc(browser, end_row, column);
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index 3678eb88f119..510ce4554050 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -51,7 +51,7 @@ void ui_browser__write_graph(struct ui_browser *browser, int graph);
void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
u64 start, u64 end);
void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
- unsigned int row, bool arrow_down);
+ unsigned int row, int diff, bool arrow_down);
void __ui_browser__show_title(struct ui_browser *browser, const char *title);
void ui_browser__show_title(struct ui_browser *browser, const char *title);
int ui_browser__show(struct ui_browser *browser, const char *title,
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index ef4da4295bf7..e81c2493efdf 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -125,13 +125,20 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
ab->selection = al;
}
-static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
+static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
{
struct disasm_line *pos = list_prev_entry(cursor, al.node);
const char *name;
+ int diff = 1;
+
+ while (pos && pos->al.offset == -1) {
+ pos = list_prev_entry(pos, al.node);
+ if (!ab->opts->hide_src_code)
+ diff++;
+ }
if (!pos)
- return false;
+ return 0;
if (ins__is_lock(&pos->ins))
name = pos->ops.locked.ins.name;
@@ -139,9 +146,11 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
name = pos->ins.name;
if (!name || !cursor->ins.name)
- return false;
+ return 0;
- return ins__is_fused(ab->arch, name, cursor->ins.name);
+ if (ins__is_fused(ab->arch, name, cursor->ins.name))
+ return diff;
+ return 0;
}
static void annotate_browser__draw_current_jump(struct ui_browser *browser)
@@ -155,6 +164,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
struct annotation *notes = symbol__annotation(sym);
u8 pcnt_width = annotation__pcnt_width(notes);
int width;
+ int diff = 0;
/* PLT symbols contain external offsets */
if (strstr(sym->name, "@plt"))
@@ -205,11 +215,11 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
pcnt_width + 2 + notes->widths.addr + width,
from, to);
- if (is_fused(ab, cursor)) {
+ diff = is_fused(ab, cursor);
+ if (diff > 0) {
ui_browser__mark_fused(browser,
pcnt_width + 3 + notes->widths.addr + width,
- from - 1,
- to > from);
+ from - diff, diff, to > from);
}
}
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 683f6d63560e..1a7112a87736 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -24,7 +24,10 @@
struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
{
struct btf *btf;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int err = btf__get_from_id(id, &btf);
+#pragma GCC diagnostic pop
return err ? ERR_PTR(err) : btf;
}
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 4fb5e90d7a57..60ce5908c664 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -801,7 +801,7 @@ int perf_config_set(struct perf_config_set *set,
section->name, item->name);
ret = fn(key, value, data);
if (ret < 0) {
- pr_err("Error: wrong config key-value pair %s=%s\n",
+ pr_err("Error in the given config file: wrong config key-value pair %s=%s\n",
key, value);
/*
* Can't be just a 'break', as perf_config_set__for_each_entry()
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index da19be7da284..44e40bad0e33 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread,
al.filtered = 0;
al.sym = NULL;
+ al.srcline = NULL;
if (!cpumode) {
thread__find_cpumode_addr_location(thread, ip, &al);
} else {
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 069c2cfdd3be..352f16076e01 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2116,7 +2116,7 @@ fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
static int __perf_session__process_decomp_events(struct perf_session *session)
{
s64 skip;
- u64 size, file_pos = 0;
+ u64 size;
struct decomp *decomp = session->decomp_last;
if (!decomp)
@@ -2132,7 +2132,7 @@ static int __perf_session__process_decomp_events(struct perf_session *session)
size = event->header.size;
if (size < sizeof(struct perf_event_header) ||
- (skip = perf_session__process_event(session, event, file_pos)) < 0) {
+ (skip = perf_session__process_event(session, event, decomp->file_pos)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
decomp->file_pos + decomp->head, event->header.size, event->header.type);
return -EINVAL;
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 5a931456e718..ac35c61f65f5 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -16,7 +16,7 @@ assert sys.version_info >= (3, 7), "Python version is too old"
from collections import namedtuple
from enum import Enum, auto
-from typing import Iterable
+from typing import Iterable, Sequence
import kunit_config
import kunit_json
@@ -186,6 +186,26 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
exec_result.elapsed_time))
return parse_result
+# Problem:
+# $ kunit.py run --json
+# works as one would expect and prints the parsed test results as JSON.
+# $ kunit.py run --json suite_name
+# would *not* pass suite_name as the filter_glob and print as json.
+# argparse will consider it to be another way of writing
+# $ kunit.py run --json=suite_name
+# i.e. it would run all tests, and dump the json to a `suite_name` file.
+# So we hackily automatically rewrite --json => --json=stdout
+pseudo_bool_flag_defaults = {
+ '--json': 'stdout',
+ '--raw_output': 'kunit',
+}
+def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+ def massage_arg(arg: str) -> str:
+ if arg not in pseudo_bool_flag_defaults:
+ return arg
+ return f'{arg}={pseudo_bool_flag_defaults[arg]}'
+ return list(map(massage_arg, argv))
+
def add_common_opts(parser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
@@ -303,7 +323,7 @@ def main(argv, linux=None):
help='Specifies the file to read results from.',
type=str, nargs='?', metavar='input_file')
- cli_args = parser.parse_args(argv)
+ cli_args = parser.parse_args(massage_argv(argv))
if get_kernel_root_path():
os.chdir(get_kernel_root_path())
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 619c4554cbff..1edcc8373b4e 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -408,6 +408,14 @@ class KUnitMainTest(unittest.TestCase):
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ def test_run_raw_output_does_not_take_positional_args(self):
+ # --raw_output is a string flag, but we don't want it to consume
+ # any positional arguments, only ones after an '='
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
+
def test_exec_timeout(self):
timeout = 3453
kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c
index 6836510a522f..22722abc9dfa 100644
--- a/tools/testing/selftests/arm64/signal/test_signals_utils.c
+++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c
@@ -266,16 +266,19 @@ int test_init(struct tdescr *td)
td->feats_supported |= FEAT_SSBS;
if (getauxval(AT_HWCAP) & HWCAP_SVE)
td->feats_supported |= FEAT_SVE;
- if (feats_ok(td))
+ if (feats_ok(td)) {
fprintf(stderr,
"Required Features: [%s] supported\n",
feats_to_string(td->feats_required &
td->feats_supported));
- else
+ } else {
fprintf(stderr,
"Required Features: [%s] NOT supported\n",
feats_to_string(td->feats_required &
~td->feats_supported));
+ td->result = KSFT_SKIP;
+ return 0;
+ }
}
/* Perform test specific additional initialization */
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 1a4d30ff3275..aa94739a1835 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -315,7 +315,8 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
- test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c
+ test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c \
+ trace_vprintk.c
SKEL_BLACKLIST += $$(LSKELS)
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
@@ -375,7 +376,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_BPF_PROGS_DIR)/%.c \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
$$(INCLUDE_DIR)/vmlinux.h \
- $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT)
+ $(wildcard $(BPFDIR)/bpf_*.h) \
+ | $(TRUNNER_OUTPUT) $$(BPFOBJ)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS))
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index 8200c0da2769..554553acc6d9 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -242,3 +242,16 @@ To fix this issue, user newer libbpf.
.. Links
.. _clang reloc patch: https://reviews.llvm.org/D102712
.. _kernel llvm reloc: /Documentation/bpf/llvm_reloc.rst
+
+Clang dependencies for the u32 spill test (xdpwall)
+===================================================
+The xdpwall selftest requires a change in `Clang 14`__.
+
+Without it, the xdpwall selftest will fail and the error message
+from running test_progs will look like:
+
+.. code-block:: console
+
+ test_xdpwall:FAIL:Does LLVM have https://reviews.llvm.org/D109073? unexpected error: -4007
+
+__ https://reviews.llvm.org/D109073
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index bf307bb9e446..6c511dcd1465 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -14,6 +14,20 @@ void test_attach_probe(void)
struct test_attach_probe* skel;
size_t uprobe_offset;
ssize_t base_addr, ref_ctr_offset;
+ bool legacy;
+
+ /* Check if new-style kprobe/uprobe API is supported.
+ * Kernels that support new FD-based kprobe and uprobe BPF attachment
+ * through perf_event_open() syscall expose
+ * /sys/bus/event_source/devices/kprobe/type and
+ * /sys/bus/event_source/devices/uprobe/type files, respectively. They
+ * contain magic numbers that are passed as "type" field of
+ * perf_event_attr. Lack of such file in the system indicates legacy
+ * kernel with old-style kprobe/uprobe attach interface through
+ * creating per-probe event through tracefs. For such cases
+ * ref_ctr_offset feature is not supported, so we don't test it.
+ */
+ legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0;
base_addr = get_base_addr();
if (CHECK(base_addr < 0, "get_base_addr",
@@ -45,10 +59,11 @@ void test_attach_probe(void)
goto cleanup;
skel->links.handle_kretprobe = kretprobe_link;
- ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
+ if (!legacy)
+ ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
uprobe_opts.retprobe = false;
- uprobe_opts.ref_ctr_offset = ref_ctr_offset;
+ uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset;
uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
0 /* self pid */,
"/proc/self/exe",
@@ -58,11 +73,12 @@ void test_attach_probe(void)
goto cleanup;
skel->links.handle_uprobe = uprobe_link;
- ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
+ if (!legacy)
+ ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
uprobe_opts.retprobe = true;
- uprobe_opts.ref_ctr_offset = ref_ctr_offset;
+ uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset;
uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
-1 /* any pid */,
"/proc/self/exe",
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 52ccf0cf35e1..87f9df653e4e 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -358,12 +358,27 @@ static void test_btf_dump_int_data(struct btf *btf, struct btf_dump *d,
TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, int, sizeof(int)-1, "", 1);
#ifdef __SIZEOF_INT128__
- TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128, BTF_F_COMPACT,
- "(__int128)0xffffffffffffffff",
- 0xffffffffffffffff);
- ASSERT_OK(btf_dump_data(btf, d, "__int128", NULL, 0, &i, 16, str,
- "(__int128)0xfffffffffffffffffffffffffffffffe"),
- "dump __int128");
+ /* gcc encode unsigned __int128 type with name "__int128 unsigned" in dwarf,
+ * and clang encode it with name "unsigned __int128" in dwarf.
+ * Do an availability test for either variant before doing actual test.
+ */
+ if (btf__find_by_name(btf, "unsigned __int128") > 0) {
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, unsigned __int128, BTF_F_COMPACT,
+ "(unsigned __int128)0xffffffffffffffff",
+ 0xffffffffffffffff);
+ ASSERT_OK(btf_dump_data(btf, d, "unsigned __int128", NULL, 0, &i, 16, str,
+ "(unsigned __int128)0xfffffffffffffffffffffffffffffffe"),
+ "dump unsigned __int128");
+ } else if (btf__find_by_name(btf, "__int128 unsigned") > 0) {
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128 unsigned, BTF_F_COMPACT,
+ "(__int128 unsigned)0xffffffffffffffff",
+ 0xffffffffffffffff);
+ ASSERT_OK(btf_dump_data(btf, d, "__int128 unsigned", NULL, 0, &i, 16, str,
+ "(__int128 unsigned)0xfffffffffffffffffffffffffffffffe"),
+ "dump unsigned __int128");
+ } else {
+ ASSERT_TRUE(false, "unsigned_int128_not_found");
+ }
#endif
}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index 225714f71ac6..ac54e3f91d42 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -458,9 +458,9 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
return -1;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "flow_dissector/%i", i);
+ snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (!prog)
return -1;
diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
index f81db9135ae4..67e86f8d8677 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
@@ -38,10 +38,9 @@ static int create_perf_events(void)
static void close_perf_events(void)
{
- int cpu = 0;
- int fd;
+ int cpu, fd;
- while (cpu++ < cpu_cnt) {
+ for (cpu = 0; cpu < cpu_cnt; cpu++) {
fd = pfd_array[cpu];
if (fd < 0)
break;
diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c
index 95bd12097358..52fe157e2a90 100644
--- a/tools/testing/selftests/bpf/prog_tests/probe_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c
@@ -3,7 +3,7 @@
void test_probe_user(void)
{
- const char *prog_name = "kprobe/__sys_connect";
+ const char *prog_name = "handle_sys_connect";
const char *obj_file = "./test_probe_user.o";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
int err, results_map_fd, sock_fd, duration = 0;
@@ -18,7 +18,7 @@ void test_probe_user(void)
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
- kprobe_prog = bpf_object__find_program_by_title(obj, prog_name);
+ kprobe_prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK(!kprobe_prog, "find_probe",
"prog '%s' not found\n", prog_name))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index 4e91f4d6466c..873323fb18ba 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -1,6 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+static void toggle_object_autoload_progs(const struct bpf_object *obj,
+ const char *name_load)
+{
+ struct bpf_program *prog;
+
+ bpf_object__for_each_program(prog, obj) {
+ const char *name = bpf_program__name(prog);
+
+ if (!strcmp(name_load, name))
+ bpf_program__set_autoload(prog, true);
+ else
+ bpf_program__set_autoload(prog, false);
+ }
+}
+
void test_reference_tracking(void)
{
const char *file = "test_sk_lookup_kern.o";
@@ -9,44 +24,49 @@ void test_reference_tracking(void)
.object_name = obj_name,
.relaxed_maps = true,
);
- struct bpf_object *obj;
+ struct bpf_object *obj_iter, *obj = NULL;
struct bpf_program *prog;
__u32 duration = 0;
int err = 0;
- obj = bpf_object__open_file(file, &open_opts);
- if (!ASSERT_OK_PTR(obj, "obj_open_file"))
+ obj_iter = bpf_object__open_file(file, &open_opts);
+ if (!ASSERT_OK_PTR(obj_iter, "obj_iter_open_file"))
return;
- if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
+ if (CHECK(strcmp(bpf_object__name(obj_iter), obj_name), "obj_name",
"wrong obj name '%s', expected '%s'\n",
- bpf_object__name(obj), obj_name))
+ bpf_object__name(obj_iter), obj_name))
goto cleanup;
- bpf_object__for_each_program(prog, obj) {
- const char *title;
+ bpf_object__for_each_program(prog, obj_iter) {
+ const char *name;
- /* Ignore .text sections */
- title = bpf_program__section_name(prog);
- if (strstr(title, ".text") != NULL)
+ name = bpf_program__name(prog);
+ if (!test__start_subtest(name))
continue;
- if (!test__start_subtest(title))
- continue;
+ obj = bpf_object__open_file(file, &open_opts);
+ if (!ASSERT_OK_PTR(obj, "obj_open_file"))
+ goto cleanup;
+ toggle_object_autoload_progs(obj, name);
/* Expect verifier failure if test name has 'err' */
- if (strstr(title, "err_") != NULL) {
+ if (strncmp(name, "err_", sizeof("err_") - 1) == 0) {
libbpf_print_fn_t old_print_fn;
old_print_fn = libbpf_set_print(NULL);
- err = !bpf_program__load(prog, "GPL", 0);
+ err = !bpf_object__load(obj);
libbpf_set_print(old_print_fn);
} else {
- err = bpf_program__load(prog, "GPL", 0);
+ err = bpf_object__load(obj);
}
- CHECK(err, title, "\n");
+ ASSERT_OK(err, name);
+
+ bpf_object__close(obj);
+ obj = NULL;
}
cleanup:
bpf_object__close(obj);
+ bpf_object__close(obj_iter);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
index 3a469099f30d..1d272e05188e 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
@@ -48,7 +48,7 @@ configure_stack(void)
return false;
sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
"direct-action object-file ./test_sk_assign.o",
- "section classifier/sk_assign_test",
+ "section tc",
(env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose");
if (CHECK(system(tc_cmd), "BPF load failed;",
"run with -vv for more info\n"))
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
index 51fac975b316..bc34f7773444 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
@@ -2,7 +2,7 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
-static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
+static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name)
{
enum bpf_attach_type attach_type;
enum bpf_prog_type prog_type;
@@ -15,23 +15,23 @@ static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
return -1;
}
- prog = bpf_object__find_program_by_title(obj, title);
+ prog = bpf_object__find_program_by_name(obj, name);
if (!prog) {
- log_err("Failed to find %s BPF program", title);
+ log_err("Failed to find %s BPF program", name);
return -1;
}
err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
attach_type, BPF_F_ALLOW_MULTI);
if (err) {
- log_err("Failed to attach %s BPF program", title);
+ log_err("Failed to attach %s BPF program", name);
return -1;
}
return 0;
}
-static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title)
+static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name)
{
enum bpf_attach_type attach_type;
enum bpf_prog_type prog_type;
@@ -42,7 +42,7 @@ static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title)
if (err)
return -1;
- prog = bpf_object__find_program_by_title(obj, title);
+ prog = bpf_object__find_program_by_name(obj, name);
if (!prog)
return -1;
@@ -89,7 +89,7 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
* - child: 0x80 -> 0x90
*/
- err = prog_attach(obj, cg_child, "cgroup/getsockopt/child");
+ err = prog_attach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
if (err)
goto detach;
@@ -113,7 +113,7 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
* - parent: 0x90 -> 0xA0
*/
- err = prog_attach(obj, cg_parent, "cgroup/getsockopt/parent");
+ err = prog_attach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent");
if (err)
goto detach;
@@ -157,7 +157,7 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
* - parent: unexpected 0x40, EPERM
*/
- err = prog_detach(obj, cg_child, "cgroup/getsockopt/child");
+ err = prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
if (err) {
log_err("Failed to detach child program");
goto detach;
@@ -198,8 +198,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
}
detach:
- prog_detach(obj, cg_child, "cgroup/getsockopt/child");
- prog_detach(obj, cg_parent, "cgroup/getsockopt/parent");
+ prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
+ prog_detach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent");
return err;
}
@@ -236,7 +236,7 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
/* Attach child program and make sure it adds 0x10. */
- err = prog_attach(obj, cg_child, "cgroup/setsockopt");
+ err = prog_attach(obj, cg_child, "cgroup/setsockopt", "_setsockopt");
if (err)
goto detach;
@@ -263,7 +263,7 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
/* Attach parent program and make sure it adds another 0x10. */
- err = prog_attach(obj, cg_parent, "cgroup/setsockopt");
+ err = prog_attach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt");
if (err)
goto detach;
@@ -289,8 +289,8 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
}
detach:
- prog_detach(obj, cg_child, "cgroup/setsockopt");
- prog_detach(obj, cg_parent, "cgroup/setsockopt");
+ prog_detach(obj, cg_child, "cgroup/setsockopt", "_setsockopt");
+ prog_detach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt");
return err;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 7bf3a7a97d7b..9825f1f7bfcc 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -21,7 +21,7 @@ static void test_tailcall_1(void)
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -38,9 +38,9 @@ static void test_tailcall_1(void)
goto out;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -70,9 +70,9 @@ static void test_tailcall_1(void)
err, errno, retval);
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -92,9 +92,9 @@ static void test_tailcall_1(void)
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
j = bpf_map__def(prog_array)->max_entries - 1 - i;
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -159,7 +159,7 @@ static void test_tailcall_2(void)
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -176,9 +176,9 @@ static void test_tailcall_2(void)
goto out;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -233,7 +233,7 @@ static void test_tailcall_count(const char *which)
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -249,7 +249,7 @@ static void test_tailcall_count(const char *which)
if (CHECK_FAIL(map_fd < 0))
goto out;
- prog = bpf_object__find_program_by_title(obj, "classifier/0");
+ prog = bpf_object__find_program_by_name(obj, "classifier_0");
if (CHECK_FAIL(!prog))
goto out;
@@ -329,7 +329,7 @@ static void test_tailcall_4(void)
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -354,9 +354,9 @@ static void test_tailcall_4(void)
return;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -417,7 +417,7 @@ static void test_tailcall_5(void)
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -442,9 +442,9 @@ static void test_tailcall_5(void)
return;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -503,7 +503,7 @@ static void test_tailcall_bpf2bpf_1(void)
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -521,9 +521,9 @@ static void test_tailcall_bpf2bpf_1(void)
/* nop -> jmp */
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -587,7 +587,7 @@ static void test_tailcall_bpf2bpf_2(void)
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -603,7 +603,7 @@ static void test_tailcall_bpf2bpf_2(void)
if (CHECK_FAIL(map_fd < 0))
goto out;
- prog = bpf_object__find_program_by_title(obj, "classifier/0");
+ prog = bpf_object__find_program_by_name(obj, "classifier_0");
if (CHECK_FAIL(!prog))
goto out;
@@ -665,7 +665,7 @@ static void test_tailcall_bpf2bpf_3(void)
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -682,9 +682,9 @@ static void test_tailcall_bpf2bpf_3(void)
goto out;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -762,7 +762,7 @@ static void test_tailcall_bpf2bpf_4(bool noise)
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -779,9 +779,9 @@ static void test_tailcall_bpf2bpf_4(bool noise)
goto out;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
index d39bc00feb45..e47835f0a674 100644
--- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c
+++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
@@ -10,7 +10,7 @@
void test_trace_printk(void)
{
- int err, iter = 0, duration = 0, found = 0;
+ int err = 0, iter = 0, found = 0;
struct trace_printk__bss *bss;
struct trace_printk *skel;
char *buf = NULL;
@@ -18,25 +18,24 @@ void test_trace_printk(void)
size_t buflen;
skel = trace_printk__open();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "trace_printk__open"))
return;
- ASSERT_EQ(skel->rodata->fmt[0], 'T', "invalid printk fmt string");
+ ASSERT_EQ(skel->rodata->fmt[0], 'T', "skel->rodata->fmt[0]");
skel->rodata->fmt[0] = 't';
err = trace_printk__load(skel);
- if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
+ if (!ASSERT_OK(err, "trace_printk__load"))
goto cleanup;
bss = skel->bss;
err = trace_printk__attach(skel);
- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "trace_printk__attach"))
goto cleanup;
fp = fopen(TRACEBUF, "r");
- if (CHECK(fp == NULL, "could not open trace buffer",
- "error %d opening %s", errno, TRACEBUF))
+ if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)"))
goto cleanup;
/* We do not want to wait forever if this test fails... */
@@ -46,14 +45,10 @@ void test_trace_printk(void)
usleep(1);
trace_printk__detach(skel);
- if (CHECK(bss->trace_printk_ran == 0,
- "bpf_trace_printk never ran",
- "ran == %d", bss->trace_printk_ran))
+ if (!ASSERT_GT(bss->trace_printk_ran, 0, "bss->trace_printk_ran"))
goto cleanup;
- if (CHECK(bss->trace_printk_ret <= 0,
- "bpf_trace_printk returned <= 0 value",
- "got %d", bss->trace_printk_ret))
+ if (!ASSERT_GT(bss->trace_printk_ret, 0, "bss->trace_printk_ret"))
goto cleanup;
/* verify our search string is in the trace buffer */
@@ -66,8 +61,7 @@ void test_trace_printk(void)
break;
}
- if (CHECK(!found, "message from bpf_trace_printk not found",
- "no instance of %s in %s", SEARCHMSG, TRACEBUF))
+ if (!ASSERT_EQ(found, bss->trace_printk_ran, "found"))
goto cleanup;
cleanup:
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c
new file mode 100644
index 000000000000..61a24e62e1a0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+
+#include "trace_vprintk.lskel.h"
+
+#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe"
+#define SEARCHMSG "1,2,3,4,5,6,7,8,9,10"
+
+void test_trace_vprintk(void)
+{
+ int err = 0, iter = 0, found = 0;
+ struct trace_vprintk__bss *bss;
+ struct trace_vprintk *skel;
+ char *buf = NULL;
+ FILE *fp = NULL;
+ size_t buflen;
+
+ skel = trace_vprintk__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load"))
+ goto cleanup;
+
+ bss = skel->bss;
+
+ err = trace_vprintk__attach(skel);
+ if (!ASSERT_OK(err, "trace_vprintk__attach"))
+ goto cleanup;
+
+ fp = fopen(TRACEBUF, "r");
+ if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)"))
+ goto cleanup;
+
+ /* We do not want to wait forever if this test fails... */
+ fcntl(fileno(fp), F_SETFL, O_NONBLOCK);
+
+ /* wait for tracepoint to trigger */
+ usleep(1);
+ trace_vprintk__detach(skel);
+
+ if (!ASSERT_GT(bss->trace_vprintk_ran, 0, "bss->trace_vprintk_ran"))
+ goto cleanup;
+
+ if (!ASSERT_GT(bss->trace_vprintk_ret, 0, "bss->trace_vprintk_ret"))
+ goto cleanup;
+
+ /* verify our search string is in the trace buffer */
+ while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) {
+ if (strstr(buf, SEARCHMSG) != NULL)
+ found++;
+ if (found == bss->trace_vprintk_ran)
+ break;
+ if (++iter > 1000)
+ break;
+ }
+
+ if (!ASSERT_EQ(found, bss->trace_vprintk_ran, "found"))
+ goto cleanup;
+
+ if (!ASSERT_LT(bss->null_data_vprintk_ret, 0, "bss->null_data_vprintk_ret"))
+ goto cleanup;
+
+cleanup:
+ trace_vprintk__destroy(skel);
+ free(buf);
+ if (fp)
+ fclose(fp);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
new file mode 100644
index 000000000000..f3927829a55a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "test_progs.h"
+#include "xdpwall.skel.h"
+
+void test_xdpwall(void)
+{
+ struct xdpwall *skel;
+
+ skel = xdpwall__open_and_load();
+ ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?");
+
+ xdpwall__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 95a5a0778ed7..f266c757b3df 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -19,9 +19,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
#define PROG(F) PROG_(F, _##F)
-#define PROG_(NUM, NAME) SEC("flow_dissector/"#NUM) int bpf_func##NAME
+#define PROG_(NUM, NAME) SEC("flow_dissector") int flow_dissector_##NUM
/* These are the identifiers of the BPF programs that will be used in tail
* calls. Name is limited to 16 characters, with the terminating character and
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
index a25373002055..3f81ff92184c 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
@@ -20,7 +20,7 @@ struct {
__u32 invocations = 0;
-SEC("cgroup_skb/egress/1")
+SEC("cgroup_skb/egress")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
@@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb)
return 1;
}
-SEC("cgroup_skb/egress/2")
+SEC("cgroup_skb/egress")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
index a149f33bc533..d662db27fe4a 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
@@ -20,7 +20,7 @@ struct {
__u32 invocations = 0;
-SEC("cgroup_skb/egress/1")
+SEC("cgroup_skb/egress")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
@@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb)
return 1;
}
-SEC("cgroup_skb/egress/2")
+SEC("cgroup_skb/egress")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
diff --git a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
index 75e8e1069fe7..df918b2469da 100644
--- a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
+++ b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
@@ -47,7 +47,7 @@ check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
u32 arraymap_output = 0;
-SEC("classifier")
+SEC("tc")
int test_pkt_access(struct __sk_buff *skb)
{
struct callback_ctx data;
diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
index 913dd91aafff..276994d5c0c7 100644
--- a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
+++ b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
@@ -78,7 +78,7 @@ int hashmap_output = 0;
int hashmap_elems = 0;
int percpu_map_elems = 0;
-SEC("classifier")
+SEC("tc")
int test_pkt_access(struct __sk_buff *skb)
{
struct callback_ctx data;
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
index 55e283050cab..7236da72ce80 100644
--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -9,8 +9,8 @@
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} perf_buf_map SEC(".maps");
#define _(P) (__builtin_preserve_access_index(P))
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
index 470f8723e463..8a8cf59017aa 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -8,7 +8,7 @@ extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
__u32 c, __u64 d) __ksym;
-SEC("classifier")
+SEC("tc")
int kfunc_call_test2(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
@@ -23,7 +23,7 @@ int kfunc_call_test2(struct __sk_buff *skb)
return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
}
-SEC("classifier")
+SEC("tc")
int kfunc_call_test1(struct __sk_buff *skb)
{
struct bpf_sock *sk = skb->sk;
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
index 5fbd9e232d44..c1fdecabeabf 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
@@ -33,7 +33,7 @@ int __noinline f1(struct __sk_buff *skb)
return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4);
}
-SEC("classifier")
+SEC("tc")
int kfunc_call_test1(struct __sk_buff *skb)
{
return f1(skb);
diff --git a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
index 25467d13c356..b3fcb5274ee0 100644
--- a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
+++ b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
@@ -11,8 +11,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(stack_trace_t));
+ __type(key, __u32);
+ __type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
index 7f2eaa2f89f8..992b7861003a 100644
--- a/tools/testing/selftests/bpf/progs/skb_pkt_end.c
+++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
@@ -25,7 +25,7 @@ out:
return ip;
}
-SEC("classifier/cls")
+SEC("tc")
int main_prog(struct __sk_buff *skb)
{
struct iphdr *ip = NULL;
diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
index 4797dc985064..73872c535cbb 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
@@ -7,22 +7,22 @@ int _version SEC("version") = 1;
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} sock_map_rx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} sock_map_tx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} sock_map_msg SEC(".maps");
struct {
diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c
index 9d8c212dde9f..177a59069dae 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_multi.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c
@@ -4,9 +4,8 @@
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
-SEC("cgroup/getsockopt/child")
+SEC("cgroup/getsockopt")
int _getsockopt_child(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
@@ -29,7 +28,7 @@ int _getsockopt_child(struct bpf_sockopt *ctx)
return 1;
}
-SEC("cgroup/getsockopt/parent")
+SEC("cgroup/getsockopt")
int _getsockopt_parent(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
diff --git a/tools/testing/selftests/bpf/progs/tailcall1.c b/tools/testing/selftests/bpf/progs/tailcall1.c
index 7115bcefbe8a..8159a0b4a69a 100644
--- a/tools/testing/selftests/bpf/progs/tailcall1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall1.c
@@ -11,8 +11,8 @@ struct {
} jmp_table SEC(".maps");
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -20,7 +20,7 @@ TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
/* Multiple locations to make sure we patch
@@ -45,4 +45,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall2.c b/tools/testing/selftests/bpf/progs/tailcall2.c
index 0431e4fe7efd..a5ff53e61702 100644
--- a/tools/testing/selftests/bpf/progs/tailcall2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall2.c
@@ -10,41 +10,41 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 1);
return 0;
}
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 2);
return 1;
}
-SEC("classifier/2")
-int bpf_func_2(struct __sk_buff *skb)
+SEC("tc")
+int classifier_2(struct __sk_buff *skb)
{
return 2;
}
-SEC("classifier/3")
-int bpf_func_3(struct __sk_buff *skb)
+SEC("tc")
+int classifier_3(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 4);
return 3;
}
-SEC("classifier/4")
-int bpf_func_4(struct __sk_buff *skb)
+SEC("tc")
+int classifier_4(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 3);
return 4;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
@@ -56,4 +56,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c
index 910858fe078a..f60bcd7b8d4b 100644
--- a/tools/testing/selftests/bpf/progs/tailcall3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall3.c
@@ -12,15 +12,15 @@ struct {
int count = 0;
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
count++;
bpf_tail_call_static(skb, &jmp_table, 0);
return 1;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
@@ -28,4 +28,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c
index bd4be135c39d..a56bbc2313ca 100644
--- a/tools/testing/selftests/bpf/progs/tailcall4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall4.c
@@ -13,8 +13,8 @@ struct {
int selector = 0;
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -22,7 +22,7 @@ TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, selector);
@@ -30,4 +30,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c
index adf30a33064e..8d03496eb6ca 100644
--- a/tools/testing/selftests/bpf/progs/tailcall5.c
+++ b/tools/testing/selftests/bpf/progs/tailcall5.c
@@ -13,8 +13,8 @@ struct {
int selector = 0;
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -22,7 +22,7 @@ TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
int idx = 0;
@@ -37,4 +37,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall6.c b/tools/testing/selftests/bpf/progs/tailcall6.c
index 0f4a811cc028..d77b8abd62f3 100644
--- a/tools/testing/selftests/bpf/progs/tailcall6.c
+++ b/tools/testing/selftests/bpf/progs/tailcall6.c
@@ -12,8 +12,8 @@ struct {
int count, which;
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
count++;
if (__builtin_constant_p(which))
@@ -22,7 +22,7 @@ int bpf_func_0(struct __sk_buff *skb)
return 1;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
if (__builtin_constant_p(which))
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
index 0103f3dd9f02..8c91428deb90 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
@@ -10,8 +10,8 @@ struct {
} jmp_table SEC(".maps");
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -26,7 +26,7 @@ int subprog_tail(struct __sk_buff *skb)
return skb->len * 2;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 1);
@@ -35,4 +35,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
index 3cc4c12817b5..ce97d141daee 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
@@ -22,14 +22,14 @@ int subprog_tail(struct __sk_buff *skb)
int count = 0;
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
count++;
return subprog_tail(skb);
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
@@ -38,4 +38,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
index 0d5482bea6c9..7fab39a3bb12 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
@@ -33,23 +33,23 @@ int subprog_tail(struct __sk_buff *skb)
return skb->len * 2;
}
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
volatile char arr[128] = {};
return subprog_tail2(skb);
}
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
{
volatile char arr[128] = {};
return skb->len * 3;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
volatile char arr[128] = {};
@@ -58,4 +58,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
index e89368a50b97..b67e8022d500 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -50,30 +50,29 @@ int subprog_tail(struct __sk_buff *skb)
return skb->len;
}
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
{
return subprog_tail_2(skb);
}
-SEC("classifier/2")
-int bpf_func_2(struct __sk_buff *skb)
+SEC("tc")
+int classifier_2(struct __sk_buff *skb)
{
count++;
return subprog_tail_2(skb);
}
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
return subprog_tail_1(skb);
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
index c1e0c8c7c55f..c218cf8989a9 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
@@ -21,8 +21,8 @@ struct inner_map_sz2 {
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
/* it's possible to use anonymous struct as inner map definition here */
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -61,8 +61,8 @@ struct inner_map_sz4 {
struct outer_arr_dyn {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(map_flags, BPF_F_INNER_MAP);
@@ -81,7 +81,7 @@ struct outer_arr_dyn {
struct outer_hash {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 5);
- __uint(key_size, sizeof(int));
+ __type(key, int);
/* Here everything works flawlessly due to reuse of struct inner_map
* and compiler will complain at the attempt to use non-inner_map
* references below. This is great experience.
@@ -111,8 +111,8 @@ struct sockarr_sz2 {
struct outer_sockarr_sz1 {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
__array(values, struct sockarr_sz1);
} outer_sockarr SEC(".maps") = {
.values = { (void *)&sockarr_sz1 },
diff --git a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
index 9a6b85dd52d2..e2bea4da194b 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
@@ -145,7 +145,7 @@ release:
return TC_ACT_OK;
}
-SEC("classifier/ingress")
+SEC("tc")
int cls_ingress(struct __sk_buff *skb)
{
struct ipv6hdr *ip6h;
diff --git a/tools/testing/selftests/bpf/progs/test_cgroup_link.c b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
index 77e47b9e4446..4faba88e45a5 100644
--- a/tools/testing/selftests/bpf/progs/test_cgroup_link.c
+++ b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
@@ -6,14 +6,14 @@
int calls = 0;
int alt_calls = 0;
-SEC("cgroup_skb/egress1")
+SEC("cgroup_skb/egress")
int egress(struct __sk_buff *skb)
{
__sync_fetch_and_add(&calls, 1);
return 1;
}
-SEC("cgroup_skb/egress2")
+SEC("cgroup_skb/egress")
int egress_alt(struct __sk_buff *skb)
{
__sync_fetch_and_add(&alt_calls, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_check_mtu.c b/tools/testing/selftests/bpf/progs/test_check_mtu.c
index 71184af57749..2ec1de11a3ae 100644
--- a/tools/testing/selftests/bpf/progs/test_check_mtu.c
+++ b/tools/testing/selftests/bpf/progs/test_check_mtu.c
@@ -153,7 +153,7 @@ int xdp_input_len_exceed(struct xdp_md *ctx)
return retval;
}
-SEC("classifier")
+SEC("tc")
int tc_use_helper(struct __sk_buff *ctx)
{
int retval = BPF_OK; /* Expected retval on successful test */
@@ -172,7 +172,7 @@ out:
return retval;
}
-SEC("classifier")
+SEC("tc")
int tc_exceed_mtu(struct __sk_buff *ctx)
{
__u32 ifindex = GLOBAL_USER_IFINDEX;
@@ -196,7 +196,7 @@ int tc_exceed_mtu(struct __sk_buff *ctx)
return retval;
}
-SEC("classifier")
+SEC("tc")
int tc_exceed_mtu_da(struct __sk_buff *ctx)
{
/* SKB Direct-Access variant */
@@ -223,7 +223,7 @@ int tc_exceed_mtu_da(struct __sk_buff *ctx)
return retval;
}
-SEC("classifier")
+SEC("tc")
int tc_minus_delta(struct __sk_buff *ctx)
{
int retval = BPF_OK; /* Expected retval on successful test */
@@ -245,7 +245,7 @@ int tc_minus_delta(struct __sk_buff *ctx)
return retval;
}
-SEC("classifier")
+SEC("tc")
int tc_input_len(struct __sk_buff *ctx)
{
int retval = BPF_OK; /* Expected retval on successful test */
@@ -265,7 +265,7 @@ int tc_input_len(struct __sk_buff *ctx)
return retval;
}
-SEC("classifier")
+SEC("tc")
int tc_input_len_exceed(struct __sk_buff *ctx)
{
int retval = BPF_DROP; /* Fail */
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index e2a5acc4785c..2833ad722cb7 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -928,7 +928,7 @@ static INLINING verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics)
}
}
-SEC("classifier/cls_redirect")
+SEC("tc")
int cls_redirect(struct __sk_buff *skb)
{
metrics_t *metrics = get_global_metrics();
diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c
index 1319be1c54ba..719e314ef3e4 100644
--- a/tools/testing/selftests/bpf/progs/test_global_data.c
+++ b/tools/testing/selftests/bpf/progs/test_global_data.c
@@ -68,7 +68,7 @@ static struct foo struct3 = {
bpf_map_update_elem(&result_##map, &key, var, 0); \
} while (0)
-SEC("classifier/static_data_load")
+SEC("tc")
int load_static_data(struct __sk_buff *skb)
{
static const __u64 bar = ~0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c
index 880260f6d536..7b42dad187b8 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func1.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func1.c
@@ -38,7 +38,7 @@ int f3(int val, struct __sk_buff *skb, int var)
return skb->ifindex * val * var;
}
-SEC("classifier/test")
+SEC("tc")
int test_cls(struct __sk_buff *skb)
{
return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func3.c b/tools/testing/selftests/bpf/progs/test_global_func3.c
index 86f0ecb304fc..01bf8275dfd6 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func3.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func3.c
@@ -54,7 +54,7 @@ int f8(struct __sk_buff *skb)
}
#endif
-SEC("classifier/test")
+SEC("tc")
int test_cls(struct __sk_buff *skb)
{
#ifndef NO_FN8
diff --git a/tools/testing/selftests/bpf/progs/test_global_func5.c b/tools/testing/selftests/bpf/progs/test_global_func5.c
index 260c25b827ef..9248d03e0d06 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func5.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func5.c
@@ -24,7 +24,7 @@ int f3(int val, struct __sk_buff *skb)
return skb->ifindex * val;
}
-SEC("classifier/test")
+SEC("tc")
int test_cls(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func6.c b/tools/testing/selftests/bpf/progs/test_global_func6.c
index 69e19c64e10b..af8c78bdfb25 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func6.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func6.c
@@ -24,7 +24,7 @@ int f3(int val, struct __sk_buff *skb)
return skb->ifindex * val;
}
-SEC("classifier/test")
+SEC("tc")
int test_cls(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func7.c b/tools/testing/selftests/bpf/progs/test_global_func7.c
index 309b3f6136bd..6cb8e2f5254c 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func7.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func7.c
@@ -10,7 +10,7 @@ void foo(struct __sk_buff *skb)
skb->tc_index = 0;
}
-SEC("classifier/test")
+SEC("tc")
int test_cls(struct __sk_buff *skb)
{
foo(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index 1cfeb940cf9f..a6d91932dcd5 100644
--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -9,21 +9,19 @@ struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__uint(map_flags, 0);
- __uint(key_size, sizeof(__u32));
- /* must be sizeof(__u32) for map in map */
- __uint(value_size, sizeof(__u32));
+ __type(key, __u32);
+ __type(value, __u32);
} mim_array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__uint(map_flags, 0);
- __uint(key_size, sizeof(int));
- /* must be sizeof(__u32) for map in map */
- __uint(value_size, sizeof(__u32));
+ __type(key, int);
+ __type(value, __u32);
} mim_hash SEC(".maps");
-SEC("xdp_mimtest")
+SEC("xdp")
int xdp_mimtest0(struct xdp_md *ctx)
{
int value = 123;
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
index 703c08e06442..9c7d75cf0bd6 100644
--- a/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
@@ -13,7 +13,7 @@ struct inner {
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 0); /* This will make map creation to fail */
- __uint(key_size, sizeof(__u32));
+ __type(key, __u32);
__array(values, struct inner);
} mim SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
index 6077a025092c..2c121c5d66a7 100644
--- a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
@@ -293,7 +293,7 @@ static int handle_passive_estab(struct bpf_sock_ops *skops)
return check_active_hdr_in(skops);
}
-SEC("sockops/misc_estab")
+SEC("sockops")
int misc_estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
index fb22de7c365d..1249a945699f 100644
--- a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
+++ b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
@@ -7,15 +7,15 @@
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} array_1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
__uint(map_flags, BPF_F_PRESERVE_ELEMS);
} array_2 SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
index 8207a2dc2f9d..d37ce29fd393 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
@@ -8,8 +8,8 @@
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} perf_buf_map SEC(".maps");
SEC("tp/raw_syscalls/sys_enter")
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 852051064507..3cfd88141ddc 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
@@ -97,7 +97,7 @@ int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
return 0;
}
-SEC("classifier/test_pkt_access")
+SEC("tc")
int test_pkt_access(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
index 610c74ea9f64..d1839366f3e1 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
@@ -7,8 +7,6 @@
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define TEST_FIELD(TYPE, FIELD, MASK) \
{ \
@@ -27,7 +25,7 @@ int _version SEC("version") = 1;
}
#endif
-SEC("classifier/test_pkt_md_access")
+SEC("tc")
int test_pkt_md_access(struct __sk_buff *skb)
{
TEST_FIELD(__u8, len, 0xFF);
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
index 89b3532ccc75..8812a90da4eb 100644
--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -8,13 +8,37 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#if defined(__TARGET_ARCH_x86)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__x64_"
+#elif defined(__TARGET_ARCH_s390)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__s390x_"
+#elif defined(__TARGET_ARCH_arm64)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__arm64_"
+#else
+#define SYSCALL_WRAPPER 0
+#define SYS_PREFIX ""
+#endif
+
static struct sockaddr_in old;
-SEC("kprobe/__sys_connect")
+SEC("kprobe/" SYS_PREFIX "sys_connect")
int BPF_KPROBE(handle_sys_connect)
{
- void *ptr = (void *)PT_REGS_PARM2(ctx);
+#if SYSCALL_WRAPPER == 1
+ struct pt_regs *real_regs;
+#endif
struct sockaddr_in new;
+ void *ptr;
+
+#if SYSCALL_WRAPPER == 0
+ ptr = (void *)PT_REGS_PARM2(ctx);
+#else
+ real_regs = (struct pt_regs *)PT_REGS_PARM1(ctx);
+ bpf_probe_read_kernel(&ptr, sizeof(ptr), &PT_REGS_PARM2(real_regs));
+#endif
bpf_probe_read_user(&old, sizeof(old), ptr);
__builtin_memset(&new, 0xab, sizeof(new));
diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index 26e77dcc7e91..0f9bc258225e 100644
--- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
@@ -24,8 +24,8 @@ int _version SEC("version") = 1;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(__u32));
+ __type(key, __u32);
+ __type(value, __u32);
} outer_map SEC(".maps");
struct {
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
index 1ecd987005d2..02f79356d5eb 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_assign.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
@@ -36,7 +36,6 @@ struct {
.pinning = PIN_GLOBAL_NS,
};
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
@@ -159,7 +158,7 @@ assign:
return ret;
}
-SEC("classifier/sk_assign_test")
+SEC("tc")
int bpf_sk_assign_test(struct __sk_buff *skb)
{
struct bpf_sock_tuple *tuple, ln = {0};
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
index ac6f7f205e25..48534d810391 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
@@ -72,32 +72,32 @@ static const __u16 DST_PORT = 7007; /* Host byte order */
static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
-SEC("sk_lookup/lookup_pass")
+SEC("sk_lookup")
int lookup_pass(struct bpf_sk_lookup *ctx)
{
return SK_PASS;
}
-SEC("sk_lookup/lookup_drop")
+SEC("sk_lookup")
int lookup_drop(struct bpf_sk_lookup *ctx)
{
return SK_DROP;
}
-SEC("sk_reuseport/reuse_pass")
+SEC("sk_reuseport")
int reuseport_pass(struct sk_reuseport_md *ctx)
{
return SK_PASS;
}
-SEC("sk_reuseport/reuse_drop")
+SEC("sk_reuseport")
int reuseport_drop(struct sk_reuseport_md *ctx)
{
return SK_DROP;
}
/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
-SEC("sk_lookup/redir_port")
+SEC("sk_lookup")
int redir_port(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -116,7 +116,7 @@ int redir_port(struct bpf_sk_lookup *ctx)
}
/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
-SEC("sk_lookup/redir_ip4")
+SEC("sk_lookup")
int redir_ip4(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -139,7 +139,7 @@ int redir_ip4(struct bpf_sk_lookup *ctx)
}
/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
-SEC("sk_lookup/redir_ip6")
+SEC("sk_lookup")
int redir_ip6(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -164,7 +164,7 @@ int redir_ip6(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
-SEC("sk_lookup/select_sock_a")
+SEC("sk_lookup")
int select_sock_a(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -179,7 +179,7 @@ int select_sock_a(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
-SEC("sk_lookup/select_sock_a_no_reuseport")
+SEC("sk_lookup")
int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -194,7 +194,7 @@ int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
-SEC("sk_reuseport/select_sock_b")
+SEC("sk_reuseport")
int select_sock_b(struct sk_reuseport_md *ctx)
{
__u32 key = KEY_SERVER_B;
@@ -205,7 +205,7 @@ int select_sock_b(struct sk_reuseport_md *ctx)
}
/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
-SEC("sk_lookup/sk_assign_eexist")
+SEC("sk_lookup")
int sk_assign_eexist(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -238,7 +238,7 @@ out:
}
/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
-SEC("sk_lookup/sk_assign_replace_flag")
+SEC("sk_lookup")
int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -270,7 +270,7 @@ out:
}
/* Check that bpf_sk_assign(sk=NULL) is accepted. */
-SEC("sk_lookup/sk_assign_null")
+SEC("sk_lookup")
int sk_assign_null(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk = NULL;
@@ -313,7 +313,7 @@ out:
}
/* Check that selected sk is accessible through context. */
-SEC("sk_lookup/access_ctx_sk")
+SEC("sk_lookup")
int access_ctx_sk(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk1 = NULL, *sk2 = NULL;
@@ -379,7 +379,7 @@ out:
* are not covered because they give bogus results, that is the
* verifier ignores the offset.
*/
-SEC("sk_lookup/ctx_narrow_access")
+SEC("sk_lookup")
int ctx_narrow_access(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -553,7 +553,7 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
}
/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
-SEC("sk_lookup/sk_assign_esocknosupport")
+SEC("sk_lookup")
int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -578,28 +578,28 @@ out:
return ret;
}
-SEC("sk_lookup/multi_prog_pass1")
+SEC("sk_lookup")
int multi_prog_pass1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_pass2")
+SEC("sk_lookup")
int multi_prog_pass2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_drop1")
+SEC("sk_lookup")
int multi_prog_drop1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_DROP;
}
-SEC("sk_lookup/multi_prog_drop2")
+SEC("sk_lookup")
int multi_prog_drop2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
@@ -623,7 +623,7 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_redir1")
+SEC("sk_lookup")
int multi_prog_redir1(struct bpf_sk_lookup *ctx)
{
int ret;
@@ -633,7 +633,7 @@ int multi_prog_redir1(struct bpf_sk_lookup *ctx)
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_redir2")
+SEC("sk_lookup")
int multi_prog_redir2(struct bpf_sk_lookup *ctx)
{
int ret;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index 8249075f088f..40f161480a2f 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
@@ -15,7 +15,6 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
@@ -53,8 +52,8 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
return result;
}
-SEC("classifier/sk_lookup_success")
-int bpf_sk_lookup_test0(struct __sk_buff *skb)
+SEC("tc")
+int sk_lookup_success(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
@@ -79,8 +78,8 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
-SEC("classifier/sk_lookup_success_simple")
-int bpf_sk_lookup_test1(struct __sk_buff *skb)
+SEC("tc")
+int sk_lookup_success_simple(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -91,8 +90,8 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/err_use_after_free")
-int bpf_sk_lookup_uaf(struct __sk_buff *skb)
+SEC("tc")
+int err_use_after_free(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -106,8 +105,8 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
return family;
}
-SEC("classifier/err_modify_sk_pointer")
-int bpf_sk_lookup_modptr(struct __sk_buff *skb)
+SEC("tc")
+int err_modify_sk_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -121,8 +120,8 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/err_modify_sk_or_null_pointer")
-int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
+SEC("tc")
+int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -135,8 +134,8 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/err_no_release")
-int bpf_sk_lookup_test2(struct __sk_buff *skb)
+SEC("tc")
+int err_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -144,8 +143,8 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/err_release_twice")
-int bpf_sk_lookup_test3(struct __sk_buff *skb)
+SEC("tc")
+int err_release_twice(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -156,8 +155,8 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/err_release_unchecked")
-int bpf_sk_lookup_test4(struct __sk_buff *skb)
+SEC("tc")
+int err_release_unchecked(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -173,8 +172,8 @@ void lookup_no_release(struct __sk_buff *skb)
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
-SEC("classifier/err_no_release_subcall")
-int bpf_sk_lookup_test5(struct __sk_buff *skb)
+SEC("tc")
+int err_no_release_subcall(struct __sk_buff *skb)
{
lookup_no_release(skb);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_skb_helpers.c b/tools/testing/selftests/bpf/progs/test_skb_helpers.c
index bb3fbf1a29e3..507215791c5b 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_helpers.c
@@ -14,7 +14,7 @@ struct {
char _license[] SEC("license") = "GPL";
-SEC("classifier/test_skb_helpers")
+SEC("tc")
int test_skb_helpers(struct __sk_buff *skb)
{
struct task_struct *task;
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
index a1cc58b10c7c..00f1456aaeda 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -56,7 +56,7 @@ int prog_stream_verdict(struct __sk_buff *skb)
return verdict;
}
-SEC("sk_skb/skb_verdict")
+SEC("sk_skb")
int prog_skb_verdict(struct __sk_buff *skb)
{
unsigned int *count;
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
index 2d31f66e4f23..3c69aa971738 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
@@ -9,7 +9,7 @@ struct {
__type(value, __u64);
} sock_map SEC(".maps");
-SEC("sk_skb/skb_verdict")
+SEC("sk_skb")
int prog_skb_verdict(struct __sk_buff *skb)
{
return SK_DROP;
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_update.c b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
index 9d0c9f28cab2..6d64ea536e3d 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_update.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
@@ -24,7 +24,7 @@ struct {
__type(value, __u64);
} dst_sock_hash SEC(".maps");
-SEC("classifier/copy_sock_map")
+SEC("tc")
int copy_sock_map(void *ctx)
{
struct bpf_sock *sk;
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index 0cf0134631b4..7449fdb1763b 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
@@ -28,8 +28,8 @@ struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 128);
__uint(map_flags, BPF_F_STACK_BUILD_ID);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(stack_trace_t));
+ __type(key, __u32);
+ __type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index 00ed48672620..a8233e7f173b 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
@@ -27,8 +27,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(stack_trace_t));
+ __type(key, __u32);
+ __type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
diff --git a/tools/testing/selftests/bpf/progs/test_tc_bpf.c b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
index 18a3a7ed924a..d28ca8d1f3d0 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_bpf.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
@@ -5,7 +5,7 @@
/* Dummy prog to test TC-BPF API */
-SEC("classifier")
+SEC("tc")
int cls(struct __sk_buff *skb)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
index 0c93d326a663..3e32ea375ab4 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
@@ -70,7 +70,7 @@ static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb,
return v6_equal(ip6h->daddr, addr);
}
-SEC("classifier/chk_egress")
+SEC("tc")
int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
@@ -83,7 +83,7 @@ int tc_chk(struct __sk_buff *skb)
return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
}
-SEC("classifier/dst_ingress")
+SEC("tc")
int tc_dst(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
@@ -108,7 +108,7 @@ int tc_dst(struct __sk_buff *skb)
return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0);
}
-SEC("classifier/src_ingress")
+SEC("tc")
int tc_src(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
index f7ab69cf018e..ec4cce19362d 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
@@ -75,7 +75,7 @@ static __always_inline int fill_fib_params_v6(struct __sk_buff *skb,
return 0;
}
-SEC("classifier/chk_egress")
+SEC("tc")
int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
@@ -143,13 +143,13 @@ static __always_inline int tc_redir(struct __sk_buff *skb)
/* these are identical, but keep them separate for compatibility with the
* section names expected by test_tc_redirect.sh
*/
-SEC("classifier/dst_ingress")
+SEC("tc")
int tc_dst(struct __sk_buff *skb)
{
return tc_redir(skb);
}
-SEC("classifier/src_ingress")
+SEC("tc")
int tc_src(struct __sk_buff *skb)
{
return tc_redir(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c
index fe818cd5f010..365eacb5dc34 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_peer.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c
@@ -16,31 +16,31 @@ volatile const __u32 IFINDEX_DST;
static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66};
-SEC("classifier/chk_egress")
+SEC("tc")
int tc_chk(struct __sk_buff *skb)
{
return TC_ACT_SHOT;
}
-SEC("classifier/dst_ingress")
+SEC("tc")
int tc_dst(struct __sk_buff *skb)
{
return bpf_redirect_peer(IFINDEX_SRC, 0);
}
-SEC("classifier/src_ingress")
+SEC("tc")
int tc_src(struct __sk_buff *skb)
{
return bpf_redirect_peer(IFINDEX_DST, 0);
}
-SEC("classifier/dst_ingress_l3")
+SEC("tc")
int tc_dst_l3(struct __sk_buff *skb)
{
return bpf_redirect(IFINDEX_SRC, 0);
}
-SEC("classifier/src_ingress_l3")
+SEC("tc")
int tc_src_l3(struct __sk_buff *skb)
{
__u16 proto = skb->protocol;
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
index 47cbe2eeae43..cd747cd93dbe 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
@@ -148,7 +148,7 @@ release:
bpf_sk_release(sk);
}
-SEC("clsact/check_syncookie")
+SEC("tc")
int check_syncookie_clsact(struct __sk_buff *skb)
{
check_syncookie(skb, (void *)(long)skb->data,
@@ -156,7 +156,7 @@ int check_syncookie_clsact(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("xdp/check_syncookie")
+SEC("xdp")
int check_syncookie_xdp(struct xdp_md *ctx)
{
check_syncookie(ctx, (void *)(long)ctx->data,
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
index 678bd0fad29e..5f4e87ee949a 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
@@ -594,7 +594,7 @@ static int handle_parse_hdr(struct bpf_sock_ops *skops)
return CG_OK;
}
-SEC("sockops/estab")
+SEC("sockops")
int estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index ac63410bb541..24e9344994ef 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
@@ -24,8 +24,8 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 2);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(__u32));
+ __type(key, int);
+ __type(value, __u32);
} perf_event_map SEC(".maps");
int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index 31f9bce37491..e6aa2fc6ce6b 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
@@ -210,7 +210,7 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp)
return XDP_TX;
}
-SEC("xdp_tx_iptunnel")
+SEC("xdp")
int _xdp_tx_iptunnel(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
index 3d66599eee2e..199c61b7d062 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
@@ -2,7 +2,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("xdp_adjust_tail_grow")
+SEC("xdp")
int _xdp_adjust_tail_grow(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c
index 22065a9cfb25..b7448253d135 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c
@@ -9,9 +9,7 @@
#include <linux/if_ether.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
-SEC("xdp_adjust_tail_shrink")
+SEC("xdp")
int _xdp_adjust_tail_shrink(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
index a038e827f850..58cf4345f5cc 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
@@ -36,8 +36,8 @@ struct meta {
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} perf_buf_map SEC(".maps");
__u64 test_result_fentry = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c
index b360ba2bd441..807bf895f42c 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c
@@ -5,7 +5,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("xdp_dm_log")
+SEC("xdp")
int xdpdm_devlog(struct xdp_md *ctx)
{
char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_link.c b/tools/testing/selftests/bpf/progs/test_xdp_link.c
index eb93ea95d1d8..ee7d6ac0f615 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_link.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_link.c
@@ -5,7 +5,7 @@
char LICENSE[] SEC("license") = "GPL";
-SEC("xdp/handler")
+SEC("xdp")
int xdp_handler(struct xdp_md *xdp)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
index fcabcda30ba3..27eb52dda92c 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
@@ -206,7 +206,7 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp)
return XDP_TX;
}
-SEC("xdp_tx_iptunnel")
+SEC("xdp")
int _xdp_tx_iptunnel(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 3a67921f62b5..596c4e71bf3a 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -797,7 +797,7 @@ out:
return XDP_DROP;
}
-SEC("xdp-test-v4")
+SEC("xdp")
int balancer_ingress_v4(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
@@ -816,7 +816,7 @@ int balancer_ingress_v4(struct xdp_md *ctx)
return XDP_DROP;
}
-SEC("xdp-test-v6")
+SEC("xdp")
int balancer_ingress_v6(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
index 59ee4f182ff8..532025057711 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
@@ -12,13 +12,13 @@ struct {
__uint(max_entries, 4);
} cpu_map SEC(".maps");
-SEC("xdp_redir")
+SEC("xdp")
int xdp_redir_prog(struct xdp_md *ctx)
{
return bpf_redirect_map(&cpu_map, 1, 0);
}
-SEC("xdp_dummy")
+SEC("xdp")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
index 0ac086497722..1e6b9c38ea6d 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
@@ -9,7 +9,7 @@ struct {
__uint(max_entries, 4);
} dm_ports SEC(".maps");
-SEC("xdp_redir")
+SEC("xdp")
int xdp_redir_prog(struct xdp_md *ctx)
{
return bpf_redirect_map(&dm_ports, 1, 0);
@@ -18,7 +18,7 @@ int xdp_redir_prog(struct xdp_md *ctx)
/* invalid program on DEVMAP entry;
* SEC name means expected attach type not set
*/
-SEC("xdp_dummy")
+SEC("xdp")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
diff --git a/tools/testing/selftests/bpf/progs/trace_vprintk.c b/tools/testing/selftests/bpf/progs/trace_vprintk.c
new file mode 100644
index 000000000000..d327241ba047
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/trace_vprintk.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int null_data_vprintk_ret = 0;
+int trace_vprintk_ret = 0;
+int trace_vprintk_ran = 0;
+
+SEC("fentry/__x64_sys_nanosleep")
+int sys_enter(void *ctx)
+{
+ static const char one[] = "1";
+ static const char three[] = "3";
+ static const char five[] = "5";
+ static const char seven[] = "7";
+ static const char nine[] = "9";
+ static const char f[] = "%pS\n";
+
+ /* runner doesn't search for \t, just ensure it compiles */
+ bpf_printk("\t");
+
+ trace_vprintk_ret = __bpf_vprintk("%s,%d,%s,%d,%s,%d,%s,%d,%s,%d %d\n",
+ one, 2, three, 4, five, 6, seven, 8, nine, 10, ++trace_vprintk_ran);
+
+ /* non-NULL fmt w/ NULL data should result in error */
+ null_data_vprintk_ret = bpf_trace_vprintk(f, sizeof(f), NULL, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/xdp_dummy.c b/tools/testing/selftests/bpf/progs/xdp_dummy.c
index ea25e8881992..d988b2e0cee8 100644
--- a/tools/testing/selftests/bpf/progs/xdp_dummy.c
+++ b/tools/testing/selftests/bpf/progs/xdp_dummy.c
@@ -4,7 +4,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-SEC("xdp_dummy")
+SEC("xdp")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
index 880debcbcd65..8395782b6e0a 100644
--- a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
@@ -34,7 +34,7 @@ struct {
__uint(max_entries, 128);
} mac_map SEC(".maps");
-SEC("xdp_redirect_map_multi")
+SEC("xdp")
int xdp_redirect_map_multi_prog(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
@@ -63,7 +63,7 @@ int xdp_redirect_map_multi_prog(struct xdp_md *ctx)
}
/* The following 2 progs are for 2nd devmap prog testing */
-SEC("xdp_redirect_map_ingress")
+SEC("xdp")
int xdp_redirect_map_all_prog(struct xdp_md *ctx)
{
return bpf_redirect_map(&map_egress, 0,
diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c
index 6b9ca40bd1f4..4ad73847b8a5 100644
--- a/tools/testing/selftests/bpf/progs/xdping_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdping_kern.c
@@ -86,7 +86,7 @@ static __always_inline int icmp_check(struct xdp_md *ctx, int type)
return XDP_TX;
}
-SEC("xdpclient")
+SEC("xdp")
int xdping_client(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
@@ -150,7 +150,7 @@ int xdping_client(struct xdp_md *ctx)
return XDP_TX;
}
-SEC("xdpserver")
+SEC("xdp")
int xdping_server(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/xdpwall.c b/tools/testing/selftests/bpf/progs/xdpwall.c
new file mode 100644
index 000000000000..7a891a0c3a39
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdpwall.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <stdbool.h>
+#include <stdint.h>
+#include <linux/stddef.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+enum pkt_parse_err {
+ NO_ERR,
+ BAD_IP6_HDR,
+ BAD_IP4GUE_HDR,
+ BAD_IP6GUE_HDR,
+};
+
+enum pkt_flag {
+ TUNNEL = 0x1,
+ TCP_SYN = 0x2,
+ QUIC_INITIAL_FLAG = 0x4,
+ TCP_ACK = 0x8,
+ TCP_RST = 0x10
+};
+
+struct v4_lpm_key {
+ __u32 prefixlen;
+ __u32 src;
+};
+
+struct v4_lpm_val {
+ struct v4_lpm_key key;
+ __u8 val;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 16);
+ __type(key, struct in6_addr);
+ __type(value, bool);
+} v6_addr_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 16);
+ __type(key, __u32);
+ __type(value, bool);
+} v4_addr_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __uint(max_entries, 16);
+ __uint(key_size, sizeof(struct v4_lpm_key));
+ __uint(value_size, sizeof(struct v4_lpm_val));
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} v4_lpm_val_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 16);
+ __type(key, int);
+ __type(value, __u8);
+} tcp_port_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 16);
+ __type(key, int);
+ __type(value, __u16);
+} udp_port_map SEC(".maps");
+
+enum ip_type { V4 = 1, V6 = 2 };
+
+struct fw_match_info {
+ __u8 v4_src_ip_match;
+ __u8 v6_src_ip_match;
+ __u8 v4_src_prefix_match;
+ __u8 v4_dst_prefix_match;
+ __u8 tcp_dp_match;
+ __u16 udp_sp_match;
+ __u16 udp_dp_match;
+ bool is_tcp;
+ bool is_tcp_syn;
+};
+
+struct pkt_info {
+ enum ip_type type;
+ union {
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } ip;
+ int sport;
+ int dport;
+ __u16 trans_hdr_offset;
+ __u8 proto;
+ __u8 flags;
+};
+
+static __always_inline struct ethhdr *parse_ethhdr(void *data, void *data_end)
+{
+ struct ethhdr *eth = data;
+
+ if (eth + 1 > data_end)
+ return NULL;
+
+ return eth;
+}
+
+static __always_inline __u8 filter_ipv6_addr(const struct in6_addr *ipv6addr)
+{
+ __u8 *leaf;
+
+ leaf = bpf_map_lookup_elem(&v6_addr_map, ipv6addr);
+
+ return leaf ? *leaf : 0;
+}
+
+static __always_inline __u8 filter_ipv4_addr(const __u32 ipaddr)
+{
+ __u8 *leaf;
+
+ leaf = bpf_map_lookup_elem(&v4_addr_map, &ipaddr);
+
+ return leaf ? *leaf : 0;
+}
+
+static __always_inline __u8 filter_ipv4_lpm(const __u32 ipaddr)
+{
+ struct v4_lpm_key v4_key = {};
+ struct v4_lpm_val *lpm_val;
+
+ v4_key.src = ipaddr;
+ v4_key.prefixlen = 32;
+
+ lpm_val = bpf_map_lookup_elem(&v4_lpm_val_map, &v4_key);
+
+ return lpm_val ? lpm_val->val : 0;
+}
+
+
+static __always_inline void
+filter_src_dst_ip(struct pkt_info* info, struct fw_match_info* match_info)
+{
+ if (info->type == V6) {
+ match_info->v6_src_ip_match =
+ filter_ipv6_addr(&info->ip.ipv6->saddr);
+ } else if (info->type == V4) {
+ match_info->v4_src_ip_match =
+ filter_ipv4_addr(info->ip.ipv4->saddr);
+ match_info->v4_src_prefix_match =
+ filter_ipv4_lpm(info->ip.ipv4->saddr);
+ match_info->v4_dst_prefix_match =
+ filter_ipv4_lpm(info->ip.ipv4->daddr);
+ }
+}
+
+static __always_inline void *
+get_transport_hdr(__u16 offset, void *data, void *data_end)
+{
+ if (offset > 255 || data + offset > data_end)
+ return NULL;
+
+ return data + offset;
+}
+
+static __always_inline bool tcphdr_only_contains_flag(struct tcphdr *tcp,
+ __u32 FLAG)
+{
+ return (tcp_flag_word(tcp) &
+ (TCP_FLAG_ACK | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN)) == FLAG;
+}
+
+static __always_inline void set_tcp_flags(struct pkt_info *info,
+ struct tcphdr *tcp) {
+ if (tcphdr_only_contains_flag(tcp, TCP_FLAG_SYN))
+ info->flags |= TCP_SYN;
+ else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_ACK))
+ info->flags |= TCP_ACK;
+ else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_RST))
+ info->flags |= TCP_RST;
+}
+
+static __always_inline bool
+parse_tcp(struct pkt_info *info, void *transport_hdr, void *data_end)
+{
+ struct tcphdr *tcp = transport_hdr;
+
+ if (tcp + 1 > data_end)
+ return false;
+
+ info->sport = bpf_ntohs(tcp->source);
+ info->dport = bpf_ntohs(tcp->dest);
+ set_tcp_flags(info, tcp);
+
+ return true;
+}
+
+static __always_inline bool
+parse_udp(struct pkt_info *info, void *transport_hdr, void *data_end)
+{
+ struct udphdr *udp = transport_hdr;
+
+ if (udp + 1 > data_end)
+ return false;
+
+ info->sport = bpf_ntohs(udp->source);
+ info->dport = bpf_ntohs(udp->dest);
+
+ return true;
+}
+
+static __always_inline __u8 filter_tcp_port(int port)
+{
+ __u8 *leaf = bpf_map_lookup_elem(&tcp_port_map, &port);
+
+ return leaf ? *leaf : 0;
+}
+
+static __always_inline __u16 filter_udp_port(int port)
+{
+ __u16 *leaf = bpf_map_lookup_elem(&udp_port_map, &port);
+
+ return leaf ? *leaf : 0;
+}
+
+static __always_inline bool
+filter_transport_hdr(void *transport_hdr, void *data_end,
+ struct pkt_info *info, struct fw_match_info *match_info)
+{
+ if (info->proto == IPPROTO_TCP) {
+ if (!parse_tcp(info, transport_hdr, data_end))
+ return false;
+
+ match_info->is_tcp = true;
+ match_info->is_tcp_syn = (info->flags & TCP_SYN) > 0;
+
+ match_info->tcp_dp_match = filter_tcp_port(info->dport);
+ } else if (info->proto == IPPROTO_UDP) {
+ if (!parse_udp(info, transport_hdr, data_end))
+ return false;
+
+ match_info->udp_dp_match = filter_udp_port(info->dport);
+ match_info->udp_sp_match = filter_udp_port(info->sport);
+ }
+
+ return true;
+}
+
+static __always_inline __u8
+parse_gue_v6(struct pkt_info *info, struct ipv6hdr *ip6h, void *data_end)
+{
+ struct udphdr *udp = (struct udphdr *)(ip6h + 1);
+ void *encap_data = udp + 1;
+
+ if (udp + 1 > data_end)
+ return BAD_IP6_HDR;
+
+ if (udp->dest != bpf_htons(6666))
+ return NO_ERR;
+
+ info->flags |= TUNNEL;
+
+ if (encap_data + 1 > data_end)
+ return BAD_IP6GUE_HDR;
+
+ if (*(__u8 *)encap_data & 0x30) {
+ struct ipv6hdr *inner_ip6h = encap_data;
+
+ if (inner_ip6h + 1 > data_end)
+ return BAD_IP6GUE_HDR;
+
+ info->type = V6;
+ info->proto = inner_ip6h->nexthdr;
+ info->ip.ipv6 = inner_ip6h;
+ info->trans_hdr_offset += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
+ } else {
+ struct iphdr *inner_ip4h = encap_data;
+
+ if (inner_ip4h + 1 > data_end)
+ return BAD_IP6GUE_HDR;
+
+ info->type = V4;
+ info->proto = inner_ip4h->protocol;
+ info->ip.ipv4 = inner_ip4h;
+ info->trans_hdr_offset += sizeof(struct iphdr) + sizeof(struct udphdr);
+ }
+
+ return NO_ERR;
+}
+
+static __always_inline __u8 parse_ipv6_gue(struct pkt_info *info,
+ void *data, void *data_end)
+{
+ struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
+
+ if (ip6h + 1 > data_end)
+ return BAD_IP6_HDR;
+
+ info->proto = ip6h->nexthdr;
+ info->ip.ipv6 = ip6h;
+ info->type = V6;
+ info->trans_hdr_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
+
+ if (info->proto == IPPROTO_UDP)
+ return parse_gue_v6(info, ip6h, data_end);
+
+ return NO_ERR;
+}
+
+SEC("xdp")
+int edgewall(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)(ctx->data_end);
+ void *data = (void *)(long)(ctx->data);
+ struct fw_match_info match_info = {};
+ struct pkt_info info = {};
+ __u8 parse_err = NO_ERR;
+ void *transport_hdr;
+ struct ethhdr *eth;
+ bool filter_res;
+ __u32 proto;
+
+ eth = parse_ethhdr(data, data_end);
+ if (!eth)
+ return XDP_DROP;
+
+ proto = eth->h_proto;
+ if (proto != bpf_htons(ETH_P_IPV6))
+ return XDP_DROP;
+
+ if (parse_ipv6_gue(&info, data, data_end))
+ return XDP_DROP;
+
+ if (info.proto == IPPROTO_ICMPV6)
+ return XDP_PASS;
+
+ if (info.proto != IPPROTO_TCP && info.proto != IPPROTO_UDP)
+ return XDP_DROP;
+
+ filter_src_dst_ip(&info, &match_info);
+
+ transport_hdr = get_transport_hdr(info.trans_hdr_offset, data,
+ data_end);
+ if (!transport_hdr)
+ return XDP_DROP;
+
+ filter_res = filter_transport_hdr(transport_hdr, data_end,
+ &info, &match_info);
+ if (!filter_res)
+ return XDP_DROP;
+
+ if (match_info.is_tcp && !match_info.is_tcp_syn)
+ return XDP_PASS;
+
+ return XDP_DROP;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_bpftool.py b/tools/testing/selftests/bpf/test_bpftool.py
index 4fed2dc25c0a..1c2408ee1f5d 100644
--- a/tools/testing/selftests/bpf/test_bpftool.py
+++ b/tools/testing/selftests/bpf/test_bpftool.py
@@ -57,6 +57,11 @@ def default_iface(f):
return f(*args, iface, **kwargs)
return wrapper
+DMESG_EMITTING_HELPERS = [
+ "bpf_probe_write_user",
+ "bpf_trace_printk",
+ "bpf_trace_vprintk",
+ ]
class TestBpftool(unittest.TestCase):
@classmethod
@@ -67,10 +72,7 @@ class TestBpftool(unittest.TestCase):
@default_iface
def test_feature_dev_json(self, iface):
- unexpected_helpers = [
- "bpf_probe_write_user",
- "bpf_trace_printk",
- ]
+ unexpected_helpers = DMESG_EMITTING_HELPERS
expected_keys = [
"syscall_config",
"program_types",
@@ -94,10 +96,7 @@ class TestBpftool(unittest.TestCase):
bpftool_json(["feature", "probe"]),
bpftool_json(["feature"]),
]
- unexpected_helpers = [
- "bpf_probe_write_user",
- "bpf_trace_printk",
- ]
+ unexpected_helpers = DMESG_EMITTING_HELPERS
expected_keys = [
"syscall_config",
"system_config",
@@ -121,10 +120,7 @@ class TestBpftool(unittest.TestCase):
bpftool_json(["feature", "probe", "kernel", "full"]),
bpftool_json(["feature", "probe", "full"]),
]
- expected_helpers = [
- "bpf_probe_write_user",
- "bpf_trace_printk",
- ]
+ expected_helpers = DMESG_EMITTING_HELPERS
for tc in test_cases:
# Check if expected helpers are included at least once in any
@@ -157,7 +153,7 @@ class TestBpftool(unittest.TestCase):
not_full_set.add(helper)
self.assertCountEqual(full_set - not_full_set,
- {"bpf_probe_write_user", "bpf_trace_printk"})
+ set(DMESG_EMITTING_HELPERS))
self.assertCountEqual(not_full_set - full_set, set())
def test_feature_macros(self):
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
index 59ea56945e6c..b497bb85b667 100755
--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -112,6 +112,14 @@ setup()
ip netns add "${NS2}"
ip netns add "${NS3}"
+ # rp_filter gets confused by what these tests are doing, so disable it
+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0
+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
+
ip link add veth1 type veth peer name veth2
ip link add veth3 type veth peer name veth4
ip link add veth5 type veth peer name veth6
@@ -236,11 +244,6 @@ setup()
ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
- # rp_filter gets confused by what these tests are doing, so disable it
- ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
- ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
- ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
-
TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
sleep 1 # reduce flakiness
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh
index 9b3617d770a5..6413c1472554 100755
--- a/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh
@@ -76,8 +76,8 @@ DIR=$(dirname $0)
TEST_IF=lo
MAX_PING_TRIES=5
BPF_PROG_OBJ="${DIR}/test_tcp_check_syncookie_kern.o"
-CLSACT_SECTION="clsact/check_syncookie"
-XDP_SECTION="xdp/check_syncookie"
+CLSACT_SECTION="tc"
+XDP_SECTION="xdp"
BPF_PROG_ID=0
PROG="${DIR}/test_tcp_check_syncookie_user"
diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
index 1ccbe804e8e1..ca1372924023 100755
--- a/tools/testing/selftests/bpf/test_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tunnel.sh
@@ -168,14 +168,15 @@ add_vxlan_tunnel()
ip netns exec at_ns0 \
ip link set dev $DEV_NS address 52:54:00:d9:01:00 up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
- ip netns exec at_ns0 arp -s 10.1.1.200 52:54:00:d9:02:00
+ ip netns exec at_ns0 \
+ ip neigh add 10.1.1.200 lladdr 52:54:00:d9:02:00 dev $DEV_NS
ip netns exec at_ns0 iptables -A OUTPUT -j MARK --set-mark 0x800FF
# root namespace
ip link add dev $DEV type $TYPE external gbp dstport 4789
ip link set dev $DEV address 52:54:00:d9:02:00 up
ip addr add dev $DEV 10.1.1.200/24
- arp -s 10.1.1.100 52:54:00:d9:01:00
+ ip neigh add 10.1.1.100 lladdr 52:54:00:d9:01:00 dev $DEV
}
add_ip6vxlan_tunnel()
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
index 637fcf4fe4e3..d10cefd6eb09 100755
--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -1,5 +1,8 @@
#!/bin/sh
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
cleanup()
{
if [ "$?" = "0" ]; then
@@ -17,7 +20,7 @@ cleanup()
ip link set dev lo xdp off 2>/dev/null > /dev/null
if [ $? -ne 0 ];then
echo "selftests: [SKIP] Could not run test without the ip xdp support"
- exit 0
+ exit $KSFT_SKIP
fi
set -e
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh
index c033850886f4..57c8db9972a6 100755
--- a/tools/testing/selftests/bpf/test_xdp_redirect.sh
+++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh
@@ -52,8 +52,8 @@ test_xdp_redirect()
return 0
fi
- ip -n ns1 link set veth11 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null
- ip -n ns2 link set veth22 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null
+ ip -n ns1 link set veth11 $xdpmode obj xdp_dummy.o sec xdp &> /dev/null
+ ip -n ns2 link set veth22 $xdpmode obj xdp_dummy.o sec xdp &> /dev/null
ip link set dev veth1 $xdpmode obj test_xdp_redirect.o sec redirect_to_222 &> /dev/null
ip link set dev veth2 $xdpmode obj test_xdp_redirect.o sec redirect_to_111 &> /dev/null
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
index 1538373157e3..351955c2bdfd 100755
--- a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
+++ b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
@@ -88,7 +88,7 @@ setup_ns()
# Add a neigh entry for IPv4 ping test
ip -n ns$i neigh add 192.0.2.253 lladdr 00:00:00:00:00:01 dev veth0
ip -n ns$i link set veth0 $mode obj \
- xdp_dummy.o sec xdp_dummy &> /dev/null || \
+ xdp_dummy.o sec xdp &> /dev/null || \
{ test_fail "Unable to load dummy xdp" && exit 1; }
IFACES="$IFACES veth$i"
veth_mac[$i]=$(ip link show veth$i | awk '/link\/ether/ {print $2}')
diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
index 995278e684b6..a3a1eaee26ea 100755
--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
+++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
@@ -107,9 +107,9 @@ ip link set dev veth1 xdp pinned $BPF_DIR/progs/redirect_map_0
ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
-ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
+ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp
ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
-ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
+ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp
trap cleanup EXIT
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
index bb8b0da91686..0cbc7604a2f8 100755
--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
# Author: Jesper Dangaard Brouer <[email protected]>
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
# Allow wrapper scripts to name test
if [ -z "$TESTNAME" ]; then
TESTNAME=xdp_vlan
@@ -94,7 +97,7 @@ while true; do
-h | --help )
usage;
echo "selftests: $TESTNAME [SKIP] usage help info requested"
- exit 0
+ exit $KSFT_SKIP
;;
* )
shift
@@ -117,7 +120,7 @@ fi
ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
if [ $? -ne 0 ]; then
echo "selftests: $TESTNAME [SKIP] need ip xdp support"
- exit 0
+ exit $KSFT_SKIP
fi
# Interactive mode likely require us to cleanup netns
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
index 0b943897aaf6..c9991c3f3bd2 100644
--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -104,3 +104,164 @@
.result = ACCEPT,
.retval = POINTER_VALUE,
},
+{
+ "Spill and refill a u32 const scalar. Offset to skb->data",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ /* r4 = 20 */
+ BPF_MOV32_IMM(BPF_REG_4, 20),
+ /* *(u32 *)(r10 -8) = r4 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+ /* r4 = *(u32 *)(r10 -8) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
+ /* r0 = r2 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "Spill a u32 const, refill from another half of the uninit u32 from the stack",
+ .insns = {
+ /* r4 = 20 */
+ BPF_MOV32_IMM(BPF_REG_4, 20),
+ /* *(u32 *)(r10 -8) = r4 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+ /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -4+0 size 4",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "Spill a u32 const scalar. Refill as u16. Offset to skb->data",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ /* r4 = 20 */
+ BPF_MOV32_IMM(BPF_REG_4, 20),
+ /* *(u32 *)(r10 -8) = r4 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+ /* r4 = *(u16 *)(r10 -8) */
+ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
+ /* r0 = r2 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ /* r4 = 20 */
+ BPF_MOV32_IMM(BPF_REG_4, 20),
+ /* *(u32 *)(r10 -8) = r4 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+ /* r4 = *(u16 *)(r10 -6) */
+ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
+ /* r0 = r2 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ /* r4 = 20 */
+ BPF_MOV32_IMM(BPF_REG_4, 20),
+ /* *(u32 *)(r10 -8) = r4 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+ /* *(u32 *)(r10 -4) = r4 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
+ /* r4 = *(u32 *)(r10 -4), */
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
+ /* r0 = r2 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "Spill and refill a umax=40 bounded scalar. Offset to skb->data",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+ /* r4 = (*u32 *)(r10 - 8) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
+ /* r2 += r4 R2=pkt R4=inv,umax=40 */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
+ /* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
+ /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
+ /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
index 842d9155d36c..79a3453dab25 100644
--- a/tools/testing/selftests/bpf/xdping.c
+++ b/tools/testing/selftests/bpf/xdping.c
@@ -178,9 +178,8 @@ int main(int argc, char **argv)
return 1;
}
- main_prog = bpf_object__find_program_by_title(obj,
- server ? "xdpserver" :
- "xdpclient");
+ main_prog = bpf_object__find_program_by_name(obj,
+ server ? "xdping_server" : "xdping_client");
if (main_prog)
prog_fd = bpf_program__fd(main_prog);
if (!main_prog || prog_fd < 0) {
diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
index 127bcde06c86..6c7cf8aadc79 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.c
+++ b/tools/testing/selftests/bpf/xdpxceiver.c
@@ -384,6 +384,7 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
ifobj->umem = &ifobj->umem_arr[0];
ifobj->xsk = &ifobj->xsk_arr[0];
ifobj->use_poll = false;
+ ifobj->pacing_on = true;
ifobj->pkt_stream = test->pkt_stream_default;
if (i == 0) {
@@ -445,6 +446,12 @@ static void test_spec_set_name(struct test_spec *test, const char *name)
strncpy(test->name, name, MAX_TEST_NAME_SIZE);
}
+static void pkt_stream_reset(struct pkt_stream *pkt_stream)
+{
+ if (pkt_stream)
+ pkt_stream->rx_pkt_nb = 0;
+}
+
static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb)
{
if (pkt_nb >= pkt_stream->nb_pkts)
@@ -507,8 +514,7 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
pkt_stream->nb_pkts = nb_pkts;
for (i = 0; i < nb_pkts; i++) {
- pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size +
- DEFAULT_OFFSET;
+ pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size;
pkt_stream->pkts[i].len = pkt_len;
pkt_stream->pkts[i].payload = i;
@@ -536,14 +542,14 @@ static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
test->ifobj_rx->pkt_stream = pkt_stream;
}
-static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, u32 offset)
+static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
{
struct xsk_umem_info *umem = test->ifobj_tx->umem;
struct pkt_stream *pkt_stream;
u32 i;
pkt_stream = pkt_stream_clone(umem, test->pkt_stream_default);
- for (i = 0; i < test->pkt_stream_default->nb_pkts; i += 2) {
+ for (i = 1; i < test->pkt_stream_default->nb_pkts; i += 2) {
pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size + offset;
pkt_stream->pkts[i].len = pkt_len;
}
@@ -635,6 +641,25 @@ static void pkt_dump(void *pkt, u32 len)
fprintf(stdout, "---------------------------------------\n");
}
+static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr,
+ u64 pkt_stream_addr)
+{
+ u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
+ u32 offset = addr % umem->frame_size, expected_offset = 0;
+
+ if (!pkt_stream->use_addr_for_fill)
+ pkt_stream_addr = 0;
+
+ expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
+
+ if (offset == expected_offset)
+ return true;
+
+ ksft_test_result_fail("ERROR: [%s] expected [%u], got [%u]\n", __func__, expected_offset,
+ offset);
+ return false;
+}
+
static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
{
void *data = xsk_umem__get_data(buffer, addr);
@@ -717,13 +742,15 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
struct pollfd *fds)
{
struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
+ struct xsk_umem_info *umem = xsk->umem;
u32 idx_rx = 0, idx_fq = 0, rcvd, i;
+ u32 total = 0;
int ret;
while (pkt) {
rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
if (!rcvd) {
- if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
+ if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret < 0)
exit_with_error(-ret);
@@ -731,16 +758,16 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
continue;
}
- ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
while (ret != rcvd) {
if (ret < 0)
exit_with_error(-ret);
- if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
+ if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret < 0)
exit_with_error(-ret);
}
- ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
for (i = 0; i < rcvd; i++) {
@@ -757,15 +784,25 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
orig = xsk_umem__extract_addr(addr);
addr = xsk_umem__add_offset_to_addr(addr);
- if (!is_pkt_valid(pkt, xsk->umem->buffer, addr, desc->len))
+
+ if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len))
+ return;
+ if (!is_offset_correct(umem, pkt_stream, addr, pkt->addr))
return;
- *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
+ *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
}
- xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
+ xsk_ring_prod__submit(&umem->fq, rcvd);
xsk_ring_cons__release(&xsk->rx, rcvd);
+
+ pthread_mutex_lock(&pacing_mutex);
+ pkts_in_flight -= rcvd;
+ total += rcvd;
+ if (pkts_in_flight < umem->num_frames)
+ pthread_cond_signal(&pacing_cond);
+ pthread_mutex_unlock(&pacing_mutex);
}
}
@@ -791,10 +828,19 @@ static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb)
valid_pkts++;
}
+ pthread_mutex_lock(&pacing_mutex);
+ pkts_in_flight += valid_pkts;
+ if (ifobject->pacing_on && pkts_in_flight >= ifobject->umem->num_frames - BATCH_SIZE) {
+ kick_tx(xsk);
+ pthread_cond_wait(&pacing_cond, &pacing_mutex);
+ }
+ pthread_mutex_unlock(&pacing_mutex);
+
xsk_ring_prod__submit(&xsk->tx, i);
xsk->outstanding_tx += valid_pkts;
- complete_pkts(xsk, BATCH_SIZE);
+ complete_pkts(xsk, i);
+ usleep(10);
return i;
}
@@ -813,8 +859,6 @@ static void send_pkts(struct ifobject *ifobject)
fds.events = POLLOUT;
while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
- u32 sent;
-
if (ifobject->use_poll) {
int ret;
@@ -826,9 +870,7 @@ static void send_pkts(struct ifobject *ifobject)
continue;
}
- sent = __send_pkts(ifobject, pkt_cnt);
- pkt_cnt += sent;
- usleep(10);
+ pkt_cnt += __send_pkts(ifobject, pkt_cnt);
}
wait_for_tx_completion(ifobject->xsk);
@@ -913,18 +955,17 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
u32 ctr = 0;
void *bufs;
+ int ret;
bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
if (bufs == MAP_FAILED)
exit_with_error(errno);
- while (ctr++ < SOCK_RECONF_CTR) {
- int ret;
-
- ret = xsk_configure_umem(&ifobject->umem_arr[i], bufs, umem_sz);
- if (ret)
- exit_with_error(-ret);
+ ret = xsk_configure_umem(&ifobject->umem_arr[i], bufs, umem_sz);
+ if (ret)
+ exit_with_error(-ret);
+ while (ctr++ < SOCK_RECONF_CTR) {
ret = xsk_configure_socket(&ifobject->xsk_arr[i], &ifobject->umem_arr[i],
ifobject, i);
if (!ret)
@@ -971,13 +1012,18 @@ static void *worker_testapp_validate_tx(void *arg)
static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
{
- u32 idx = 0, i;
+ u32 idx = 0, i, buffers_to_fill;
int ret;
- ret = xsk_ring_prod__reserve(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx);
- if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
+ if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
+ buffers_to_fill = umem->num_frames;
+ else
+ buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+
+ ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
+ if (ret != buffers_to_fill)
exit_with_error(ENOSPC);
- for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++) {
+ for (i = 0; i < buffers_to_fill; i++) {
u64 addr;
if (pkt_stream->use_addr_for_fill) {
@@ -987,12 +1033,12 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
break;
addr = pkt->addr;
} else {
- addr = (i % umem->num_frames) * umem->frame_size + DEFAULT_OFFSET;
+ addr = i * umem->frame_size;
}
*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
}
- xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS);
+ xsk_ring_prod__submit(&umem->fq, buffers_to_fill);
}
static void *worker_testapp_validate_rx(void *arg)
@@ -1032,6 +1078,8 @@ static void testapp_validate_traffic(struct test_spec *test)
exit_with_error(errno);
test->current_step++;
+ pkt_stream_reset(ifobj_rx->pkt_stream);
+ pkts_in_flight = 0;
/*Spawn RX thread */
pthread_create(&t0, NULL, ifobj_rx->func_ptr, test);
@@ -1108,6 +1156,13 @@ static void testapp_bpf_res(struct test_spec *test)
testapp_validate_traffic(test);
}
+static void testapp_headroom(struct test_spec *test)
+{
+ test_spec_set_name(test, "UMEM_HEADROOM");
+ test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
+ testapp_validate_traffic(test);
+}
+
static void testapp_stats(struct test_spec *test)
{
int i;
@@ -1115,6 +1170,8 @@ static void testapp_stats(struct test_spec *test)
for (i = 0; i < STAT_TEST_TYPE_MAX; i++) {
test_spec_reset(test);
stat_test_type = i;
+ /* No or few packets will be received so cannot pace packets */
+ test->ifobj_tx->pacing_on = false;
switch (stat_test_type) {
case STAT_TEST_RX_DROPPED:
@@ -1181,7 +1238,7 @@ static bool testapp_unaligned(struct test_spec *test)
test->ifobj_tx->umem->unaligned_mode = true;
test->ifobj_rx->umem->unaligned_mode = true;
/* Let half of the packets straddle a buffer boundrary */
- pkt_stream_replace_half(test, PKT_SIZE, test->ifobj_tx->umem->frame_size - 32);
+ pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2);
test->ifobj_rx->pkt_stream->use_addr_for_fill = true;
testapp_validate_traffic(test);
@@ -1189,6 +1246,15 @@ static bool testapp_unaligned(struct test_spec *test)
return true;
}
+static void testapp_single_pkt(struct test_spec *test)
+{
+ struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}};
+
+ pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
+ testapp_validate_traffic(test);
+ pkt_stream_restore_default(test);
+}
+
static void testapp_invalid_desc(struct test_spec *test)
{
struct pkt pkts[] = {
@@ -1270,6 +1336,10 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
test_spec_set_name(test, "RUN_TO_COMPLETION");
testapp_validate_traffic(test);
break;
+ case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
+ test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
+ testapp_single_pkt(test);
+ break;
case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
test->ifobj_tx->umem->frame_size = 2048;
@@ -1305,6 +1375,9 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
if (!testapp_unaligned(test))
return;
break;
+ case TEST_TYPE_HEADROOM:
+ testapp_headroom(test);
+ break;
default:
break;
}
diff --git a/tools/testing/selftests/bpf/xdpxceiver.h b/tools/testing/selftests/bpf/xdpxceiver.h
index 5ac4a5e64744..2f705f44b748 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.h
+++ b/tools/testing/selftests/bpf/xdpxceiver.h
@@ -35,13 +35,13 @@
#define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr))
#define USLEEP_MAX 10000
#define SOCK_RECONF_CTR 10
-#define BATCH_SIZE 8
+#define BATCH_SIZE 64
#define POLL_TMOUT 1000
#define DEFAULT_PKT_CNT (4 * 1024)
#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
#define RX_FULL_RXQSIZE 32
-#define DEFAULT_OFFSET 256
+#define UMEM_HEADROOM_TEST_SIZE 128
#define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1)
#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
@@ -55,11 +55,13 @@ enum test_mode {
enum test_type {
TEST_TYPE_RUN_TO_COMPLETION,
TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME,
+ TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT,
TEST_TYPE_POLL,
TEST_TYPE_UNALIGNED,
TEST_TYPE_ALIGNED_INV_DESC,
TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME,
TEST_TYPE_UNALIGNED_INV_DESC,
+ TEST_TYPE_HEADROOM,
TEST_TYPE_TEARDOWN,
TEST_TYPE_BIDI,
TEST_TYPE_STATS,
@@ -136,6 +138,7 @@ struct ifobject {
bool tx_on;
bool rx_on;
bool use_poll;
+ bool pacing_on;
u8 dst_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN];
};
@@ -151,5 +154,9 @@ struct test_spec {
};
pthread_barrier_t barr;
+pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t pacing_cond = PTHREAD_COND_INITIALIZER;
+
+u32 pkts_in_flight;
#endif /* XDPXCEIVER_H */
diff --git a/tools/testing/selftests/drivers/dma-buf/udmabuf.c b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
index 4de902ea14d8..de1c4e6de0b2 100644
--- a/tools/testing/selftests/drivers/dma-buf/udmabuf.c
+++ b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#define __EXPORTED_HEADERS__
+
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
-#include <linux/fcntl.h>
+#include <fcntl.h>
#include <malloc.h>
#include <sys/ioctl.h>
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh
index 8817851da7a9..e9a82cae8c9a 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh
@@ -13,7 +13,7 @@
# |
# +-------------------|-----+
# | SW1 | |
-# | $swp1 + |
+# | $swp1 + |
# | 192.0.2.2/28 |
# | |
# | + g1a (gre) |
@@ -27,8 +27,8 @@
# |
# +--|----------------------+
# | | VRF2 |
-# | + $rp2 |
-# | 198.51.100.2/28 |
+# | + $rp2 |
+# | 198.51.100.2/28 |
# +-------------------------+
lib_dir=$(dirname $0)/../../../net/forwarding
@@ -116,12 +116,16 @@ cleanup()
forwarding_restore
}
-ecn_payload_get()
+ipip_payload_get()
{
+ local flags=$1; shift
+ local key=$1; shift
+
p=$(:
- )"0"$( : GRE flags
+ )"$flags"$( : GRE flags
)"0:00:"$( : Reserved + version
)"08:00:"$( : ETH protocol type
+ )"$key"$( : Key
)"4"$( : IP version
)"5:"$( : IHL
)"00:"$( : IP TOS
@@ -137,6 +141,11 @@ ecn_payload_get()
echo $p
}
+ecn_payload_get()
+{
+ echo $(ipip_payload_get "0")
+}
+
ecn_decap_test()
{
local trap_name="decap_error"
@@ -171,31 +180,6 @@ ecn_decap_test()
tc filter del dev $swp1 egress protocol ip pref 1 handle 101 flower
}
-ipip_payload_get()
-{
- local flags=$1; shift
- local key=$1; shift
-
- p=$(:
- )"$flags"$( : GRE flags
- )"0:00:"$( : Reserved + version
- )"08:00:"$( : ETH protocol type
- )"$key"$( : Key
- )"4"$( : IP version
- )"5:"$( : IHL
- )"00:"$( : IP TOS
- )"00:14:"$( : IP total length
- )"00:00:"$( : IP identification
- )"20:00:"$( : IP flags + frag off
- )"30:"$( : IP TTL
- )"01:"$( : IP proto
- )"E7:E6:"$( : IP header csum
- )"C0:00:01:01:"$( : IP saddr : 192.0.1.1
- )"C0:00:02:01:"$( : IP daddr : 192.0.2.1
- )
- echo $p
-}
-
no_matching_tunnel_test()
{
local trap_name="decap_error"
@@ -239,7 +223,8 @@ decap_error_test()
no_matching_tunnel_test "Decap error: Source IP check failed" \
192.0.2.68 "0"
no_matching_tunnel_test \
- "Decap error: Key exists but was not expected" $sip "2" ":E9:"
+ "Decap error: Key exists but was not expected" $sip "2" \
+ "00:00:00:E9:"
# Destroy the tunnel and create new one with key
__addr_add_del g1 del 192.0.2.65/32
@@ -251,7 +236,8 @@ decap_error_test()
no_matching_tunnel_test \
"Decap error: Key does not exist but was expected" $sip "0"
no_matching_tunnel_test \
- "Decap error: Packet has a wrong key field" $sip "2" "E8:"
+ "Decap error: Packet has a wrong key field" $sip "2" \
+ "00:00:00:E8:"
}
trap cleanup EXIT
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_offload.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_offload.sh
new file mode 100755
index 000000000000..ade79ef08de3
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_offload.sh
@@ -0,0 +1,276 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test qdisc offload indication
+
+
+ALL_TESTS="
+ test_root
+ test_etsprio
+"
+NUM_NETIFS=1
+lib_dir=$(dirname $0)/../../../net/forwarding
+source $lib_dir/lib.sh
+
+check_not_offloaded()
+{
+ local handle=$1; shift
+ local h
+ local offloaded
+
+ h=$(qdisc_stats_get $h1 "$handle" .handle)
+ [[ $h == '"'$handle'"' ]]
+ check_err $? "Qdisc with handle $handle does not exist"
+
+ offloaded=$(qdisc_stats_get $h1 "$handle" .offloaded)
+ [[ $offloaded == true ]]
+ check_fail $? "Qdisc with handle $handle offloaded, but should not be"
+}
+
+check_all_offloaded()
+{
+ local handle=$1; shift
+
+ if [[ ! -z $handle ]]; then
+ local offloaded=$(qdisc_stats_get $h1 "$handle" .offloaded)
+ [[ $offloaded == true ]]
+ check_err $? "Qdisc with handle $handle not offloaded"
+ fi
+
+ local unoffloaded=$(tc q sh dev $h1 invisible |
+ grep -v offloaded |
+ sed s/root/parent\ root/ |
+ cut -d' ' -f 5)
+ [[ -z $unoffloaded ]]
+ check_err $? "Qdiscs with following parents not offloaded: $unoffloaded"
+
+ pre_cleanup
+}
+
+with_ets()
+{
+ local handle=$1; shift
+ local locus=$1; shift
+
+ tc qdisc add dev $h1 $locus handle $handle \
+ ets bands 8 priomap 7 6 5 4 3 2 1 0
+ "$@"
+ tc qdisc del dev $h1 $locus
+}
+
+with_prio()
+{
+ local handle=$1; shift
+ local locus=$1; shift
+
+ tc qdisc add dev $h1 $locus handle $handle \
+ prio bands 8 priomap 7 6 5 4 3 2 1 0
+ "$@"
+ tc qdisc del dev $h1 $locus
+}
+
+with_red()
+{
+ local handle=$1; shift
+ local locus=$1; shift
+
+ tc qdisc add dev $h1 $locus handle $handle \
+ red limit 1000000 min 200000 max 300000 probability 0.5 avpkt 1500
+ "$@"
+ tc qdisc del dev $h1 $locus
+}
+
+with_tbf()
+{
+ local handle=$1; shift
+ local locus=$1; shift
+
+ tc qdisc add dev $h1 $locus handle $handle \
+ tbf rate 400Mbit burst 128K limit 1M
+ "$@"
+ tc qdisc del dev $h1 $locus
+}
+
+with_pfifo()
+{
+ local handle=$1; shift
+ local locus=$1; shift
+
+ tc qdisc add dev $h1 $locus handle $handle pfifo limit 100K
+ "$@"
+ tc qdisc del dev $h1 $locus
+}
+
+with_bfifo()
+{
+ local handle=$1; shift
+ local locus=$1; shift
+
+ tc qdisc add dev $h1 $locus handle $handle bfifo limit 100K
+ "$@"
+ tc qdisc del dev $h1 $locus
+}
+
+with_drr()
+{
+ local handle=$1; shift
+ local locus=$1; shift
+
+ tc qdisc add dev $h1 $locus handle $handle drr
+ "$@"
+ tc qdisc del dev $h1 $locus
+}
+
+with_qdiscs()
+{
+ local handle=$1; shift
+ local parent=$1; shift
+ local kind=$1; shift
+ local next_handle=$((handle * 2))
+ local locus;
+
+ if [[ $kind == "--" ]]; then
+ local cmd=$1; shift
+ $cmd $(printf %x: $parent) "$@"
+ else
+ if ((parent == 0)); then
+ locus=root
+ else
+ locus=$(printf "parent %x:1" $parent)
+ fi
+
+ with_$kind $(printf %x: $handle) "$locus" \
+ with_qdiscs $next_handle $handle "$@"
+ fi
+}
+
+get_name()
+{
+ local parent=$1; shift
+ local name=$(echo "" "${@^^}" | tr ' ' -)
+
+ if ((parent != 0)); then
+ kind=$(qdisc_stats_get $h1 $parent: .kind)
+ kind=${kind%\"}
+ kind=${kind#\"}
+ name="-${kind^^}$name"
+ fi
+
+ echo root$name
+}
+
+do_test_offloaded()
+{
+ local handle=$1; shift
+ local parent=$1; shift
+
+ RET=0
+ with_qdiscs $handle $parent "$@" -- check_all_offloaded
+ log_test $(get_name $parent "$@")" offloaded"
+}
+
+do_test_nooffload()
+{
+ local handle=$1; shift
+ local parent=$1; shift
+
+ local name=$(echo "${@^^}" | tr ' ' -)
+ local kind
+
+ RET=0
+ with_qdiscs $handle $parent "$@" -- check_not_offloaded
+ log_test $(get_name $parent "$@")" not offloaded"
+}
+
+do_test_combinations()
+{
+ local handle=$1; shift
+ local parent=$1; shift
+
+ local cont
+ local leaf
+ local fifo
+
+ for cont in "" ets prio; do
+ for leaf in "" red tbf "red tbf" "tbf red"; do
+ for fifo in "" pfifo bfifo; do
+ if [[ -z "$cont$leaf$fifo" ]]; then
+ continue
+ fi
+ do_test_offloaded $handle $parent \
+ $cont $leaf $fifo
+ done
+ done
+ done
+
+ for cont in ets prio; do
+ for leaf in red tbf; do
+ do_test_nooffload $handle $parent $cont red tbf $leaf
+ do_test_nooffload $handle $parent $cont tbf red $leaf
+ done
+ for leaf in "red red" "tbf tbf"; do
+ do_test_nooffload $handle $parent $cont $leaf
+ done
+ done
+
+ do_test_nooffload $handle $parent drr
+}
+
+test_root()
+{
+ do_test_combinations 1 0
+}
+
+do_test_etsprio()
+{
+ local parent=$1; shift
+ local tbfpfx=$1; shift
+ local cont
+
+ for cont in ets prio; do
+ RET=0
+ with_$cont 8: "$parent" \
+ with_red 11: "parent 8:1" \
+ with_red 12: "parent 8:2" \
+ with_tbf 13: "parent 8:3" \
+ with_tbf 14: "parent 8:4" \
+ check_all_offloaded
+ log_test "root$tbfpfx-ETS-{RED,TBF} offloaded"
+
+ RET=0
+ with_$cont 8: "$parent" \
+ with_red 81: "parent 8:1" \
+ with_tbf 811: "parent 81:1" \
+ with_tbf 84: "parent 8:4" \
+ with_red 841: "parent 84:1" \
+ check_all_offloaded
+ log_test "root$tbfpfx-ETS-{RED-TBF,TBF-RED} offloaded"
+
+ RET=0
+ with_$cont 8: "$parent" \
+ with_red 81: "parent 8:1" \
+ with_tbf 811: "parent 81:1" \
+ with_bfifo 8111: "parent 811:1" \
+ with_tbf 82: "parent 8:2" \
+ with_red 821: "parent 82:1" \
+ with_bfifo 8211: "parent 821:1" \
+ check_all_offloaded
+ log_test "root$tbfpfx-ETS-{RED-TBF-bFIFO,TBF-RED-bFIFO} offloaded"
+ done
+}
+
+test_etsprio()
+{
+ do_test_etsprio root ""
+}
+
+cleanup()
+{
+ tc qdisc del dev $h1 root &>/dev/null
+}
+
+trap cleanup EXIT
+h1=${NETIFS[p1]}
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
index 33ddd01689be..dd90cd87d4f9 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
@@ -331,6 +331,14 @@ get_nmarked()
ethtool_stats_get $swp3 ecn_marked
}
+get_qdisc_nmarked()
+{
+ local vlan=$1; shift
+
+ busywait_for_counter 1100 +1 \
+ qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .marked
+}
+
get_qdisc_npackets()
{
local vlan=$1; shift
@@ -384,14 +392,15 @@ build_backlog()
check_marking()
{
+ local get_nmarked=$1; shift
local vlan=$1; shift
local cond=$1; shift
local npackets_0=$(get_qdisc_npackets $vlan)
- local nmarked_0=$(get_nmarked $vlan)
+ local nmarked_0=$($get_nmarked $vlan)
sleep 5
local npackets_1=$(get_qdisc_npackets $vlan)
- local nmarked_1=$(get_nmarked $vlan)
+ local nmarked_1=$($get_nmarked $vlan)
local nmarked_d=$((nmarked_1 - nmarked_0))
local npackets_d=$((npackets_1 - npackets_0))
@@ -404,6 +413,7 @@ check_marking()
ecn_test_common()
{
local name=$1; shift
+ local get_nmarked=$1; shift
local vlan=$1; shift
local limit=$1; shift
local backlog
@@ -416,7 +426,7 @@ ecn_test_common()
RET=0
backlog=$(build_backlog $vlan $((2 * limit / 3)) udp)
check_err $? "Could not build the requested backlog"
- pct=$(check_marking $vlan "== 0")
+ pct=$(check_marking "$get_nmarked" $vlan "== 0")
check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
log_test "TC $((vlan - 10)): $name backlog < limit"
@@ -426,22 +436,23 @@ ecn_test_common()
RET=0
backlog=$(build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01)
check_err $? "Could not build the requested backlog"
- pct=$(check_marking $vlan ">= 95")
+ pct=$(check_marking "$get_nmarked" $vlan ">= 95")
check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected >= 95."
log_test "TC $((vlan - 10)): $name backlog > limit"
}
-do_ecn_test()
+__do_ecn_test()
{
+ local get_nmarked=$1; shift
local vlan=$1; shift
local limit=$1; shift
- local name=ECN
+ local name=${1-ECN}; shift
start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \
$h3_mac tos=0x01
sleep 1
- ecn_test_common "$name" $vlan $limit
+ ecn_test_common "$name" "$get_nmarked" $vlan $limit
# Up there we saw that UDP gets accepted when backlog is below the
# limit. Now that it is above, it should all get dropped, and backlog
@@ -455,6 +466,26 @@ do_ecn_test()
sleep 1
}
+do_ecn_test()
+{
+ local vlan=$1; shift
+ local limit=$1; shift
+
+ __do_ecn_test get_nmarked "$vlan" "$limit"
+}
+
+do_ecn_test_perband()
+{
+ local vlan=$1; shift
+ local limit=$1; shift
+
+ # Per-band ECN counters are not supported on Spectrum-1 and Spectrum-2.
+ [[ "$DEVLINK_VIDDID" == "15b3:cb84" ||
+ "$DEVLINK_VIDDID" == "15b3:cf6c" ]] && return
+
+ __do_ecn_test get_qdisc_nmarked "$vlan" "$limit" "per-band ECN"
+}
+
do_ecn_nodrop_test()
{
local vlan=$1; shift
@@ -465,7 +496,7 @@ do_ecn_nodrop_test()
$h3_mac tos=0x01
sleep 1
- ecn_test_common "$name" $vlan $limit
+ ecn_test_common "$name" get_nmarked $vlan $limit
# Up there we saw that UDP gets accepted when backlog is below the
# limit. Now that it is above, in nodrop mode, make sure it goes to
@@ -495,7 +526,7 @@ do_red_test()
RET=0
backlog=$(build_backlog $vlan $((2 * limit / 3)) tcp tos=0x01)
check_err $? "Could not build the requested backlog"
- pct=$(check_marking $vlan "== 0")
+ pct=$(check_marking get_nmarked $vlan "== 0")
check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
log_test "TC $((vlan - 10)): RED backlog < limit"
@@ -503,7 +534,7 @@ do_red_test()
RET=0
backlog=$(build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01)
check_fail $? "Traffic went into backlog instead of being early-dropped"
- pct=$(check_marking $vlan "== 0")
+ pct=$(check_marking get_nmarked $vlan "== 0")
check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
local diff=$((limit - backlog))
pct=$((100 * diff / limit))
@@ -544,6 +575,53 @@ do_mc_backlog_test()
log_test "TC $((vlan - 10)): Qdisc reports MC backlog"
}
+do_mark_test()
+{
+ local vlan=$1; shift
+ local limit=$1; shift
+ local subtest=$1; shift
+ local fetch_counter=$1; shift
+ local should_fail=$1; shift
+ local base
+
+ RET=0
+
+ start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \
+ $h3_mac tos=0x01
+
+ # Create a bit of a backlog and observe no mirroring due to marks.
+ qevent_rule_install_$subtest
+
+ build_backlog $vlan $((2 * limit / 3)) tcp tos=0x01 >/dev/null
+
+ base=$($fetch_counter)
+ count=$(busywait 1100 until_counter_is ">= $((base + 1))" \
+ $fetch_counter)
+ check_fail $? "Spurious packets ($base -> $count) observed without buffer pressure"
+
+ # Above limit, everything should be mirrored, we should see lots of
+ # packets.
+ build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01 >/dev/null
+ busywait_for_counter 1100 +10000 \
+ $fetch_counter > /dev/null
+ check_err_fail "$should_fail" $? "ECN-marked packets $subtest'd"
+
+ # When the rule is uninstalled, there should be no mirroring.
+ qevent_rule_uninstall_$subtest
+ busywait_for_counter 1100 +10 \
+ $fetch_counter > /dev/null
+ check_fail $? "Spurious packets observed after uninstall"
+
+ if ((should_fail)); then
+ log_test "TC $((vlan - 10)): marked packets not $subtest'd"
+ else
+ log_test "TC $((vlan - 10)): marked packets $subtest'd"
+ fi
+
+ stop_traffic
+ sleep 1
+}
+
do_drop_test()
{
local vlan=$1; shift
@@ -551,10 +629,8 @@ do_drop_test()
local trigger=$1; shift
local subtest=$1; shift
local fetch_counter=$1; shift
- local backlog
local base
local now
- local pct
RET=0
@@ -628,6 +704,22 @@ do_drop_mirror_test()
tc filter del dev $h2 ingress pref 1 handle 101 flower
}
+do_mark_mirror_test()
+{
+ local vlan=$1; shift
+ local limit=$1; shift
+
+ tc filter add dev $h2 ingress pref 1 handle 101 prot ip \
+ flower skip_sw ip_proto tcp \
+ action drop
+
+ do_mark_test "$vlan" "$limit" mirror \
+ qevent_counter_fetch_mirror \
+ $(: should_fail=)0
+
+ tc filter del dev $h2 ingress pref 1 handle 101 flower
+}
+
qevent_rule_install_trap()
{
tc filter add block 10 pref 1234 handle 102 matchall skip_sw \
@@ -655,3 +747,14 @@ do_drop_trap_test()
do_drop_test "$vlan" "$limit" "$trap_name" trap \
"qevent_counter_fetch_trap $trap_name"
}
+
+qevent_rule_install_trap_fwd()
+{
+ tc filter add block 10 pref 1234 handle 102 matchall skip_sw \
+ action trap_fwd hw_stats disabled
+}
+
+qevent_rule_uninstall_trap_fwd()
+{
+ tc filter del block 10 pref 1234 handle 102 matchall
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
index f3ef3274f9b3..1e5ad3209436 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
@@ -4,11 +4,13 @@
ALL_TESTS="
ping_ipv4
ecn_test
+ ecn_test_perband
ecn_nodrop_test
red_test
mc_backlog_test
red_mirror_test
red_trap_test
+ ecn_mirror_test
"
: ${QDISC:=ets}
source sch_red_core.sh
@@ -21,28 +23,60 @@ source sch_red_core.sh
BACKLOG1=200000
BACKLOG2=500000
-install_qdisc()
+install_root_qdisc()
{
- local -a args=("$@")
-
tc qdisc add dev $swp3 root handle 10: $QDISC \
bands 8 priomap 7 6 5 4 3 2 1 0
+}
+
+install_qdisc_tc0()
+{
+ local -a args=("$@")
+
tc qdisc add dev $swp3 parent 10:8 handle 108: red \
limit 1000000 min $BACKLOG1 max $((BACKLOG1 + 1)) \
probability 1.0 avpkt 8000 burst 38 "${args[@]}"
+}
+
+install_qdisc_tc1()
+{
+ local -a args=("$@")
+
tc qdisc add dev $swp3 parent 10:7 handle 107: red \
limit 1000000 min $BACKLOG2 max $((BACKLOG2 + 1)) \
probability 1.0 avpkt 8000 burst 63 "${args[@]}"
+}
+
+install_qdisc()
+{
+ install_root_qdisc
+ install_qdisc_tc0 "$@"
+ install_qdisc_tc1 "$@"
sleep 1
}
-uninstall_qdisc()
+uninstall_qdisc_tc0()
{
- tc qdisc del dev $swp3 parent 10:7
tc qdisc del dev $swp3 parent 10:8
+}
+
+uninstall_qdisc_tc1()
+{
+ tc qdisc del dev $swp3 parent 10:7
+}
+
+uninstall_root_qdisc()
+{
tc qdisc del dev $swp3 root
}
+uninstall_qdisc()
+{
+ uninstall_qdisc_tc0
+ uninstall_qdisc_tc1
+ uninstall_root_qdisc
+}
+
ecn_test()
{
install_qdisc ecn
@@ -53,6 +87,16 @@ ecn_test()
uninstall_qdisc
}
+ecn_test_perband()
+{
+ install_qdisc ecn
+
+ do_ecn_test_perband 10 $BACKLOG1
+ do_ecn_test_perband 11 $BACKLOG2
+
+ uninstall_qdisc
+}
+
ecn_nodrop_test()
{
install_qdisc ecn nodrop
@@ -112,6 +156,16 @@ red_trap_test()
uninstall_qdisc
}
+ecn_mirror_test()
+{
+ install_qdisc ecn qevent mark block 10
+
+ do_mark_mirror_test 10 $BACKLOG1
+ do_mark_mirror_test 11 $BACKLOG2
+
+ uninstall_qdisc
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
index ede9c38d3eff..d79a82f317d2 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
@@ -4,6 +4,7 @@
ALL_TESTS="
ping_ipv4
ecn_test
+ ecn_test_perband
ecn_nodrop_test
red_test
mc_backlog_test
@@ -35,6 +36,13 @@ ecn_test()
uninstall_qdisc
}
+ecn_test_perband()
+{
+ install_qdisc ecn
+ do_ecn_test_perband 10 $BACKLOG
+ uninstall_qdisc
+}
+
ecn_nodrop_test()
{
install_qdisc ecn nodrop
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/devlink_trap_tunnel_ipip6.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/devlink_trap_tunnel_ipip6.sh
new file mode 100755
index 000000000000..f62ce479c266
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/devlink_trap_tunnel_ipip6.sh
@@ -0,0 +1,250 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test devlink-trap tunnel exceptions functionality over mlxsw.
+# Check all exception traps to make sure they are triggered under the right
+# conditions.
+
+# +-------------------------+
+# | H1 |
+# | $h1 + |
+# | 2001:db8:1::1/64 | |
+# +-------------------|-----+
+# |
+# +-------------------|-----+
+# | SW1 | |
+# | $swp1 + |
+# | 2001:db8:1::2/64 |
+# | |
+# | + g1 (ip6gre) |
+# | loc=2001:db8:3::1 |
+# | rem=2001:db8:3::2 |
+# | tos=inherit |
+# | |
+# | + $rp1 |
+# | | 2001:db8:10::1/64 |
+# +--|----------------------+
+# |
+# +--|----------------------+
+# | | VRF2 |
+# | + $rp2 |
+# | 2001:db8:10::2/64 |
+# +-------------------------+
+
+lib_dir=$(dirname $0)/../../../../net/forwarding
+
+ALL_TESTS="
+ decap_error_test
+"
+
+NUM_NETIFS=4
+source $lib_dir/lib.sh
+source $lib_dir/tc_common.sh
+source $lib_dir/devlink_lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 2001:db8:1::1/64
+}
+
+vrf2_create()
+{
+ simple_if_init $rp2 2001:db8:10::2/64
+}
+
+vrf2_destroy()
+{
+ simple_if_fini $rp2 2001:db8:10::2/64
+}
+
+switch_create()
+{
+ ip link set dev $swp1 up
+ __addr_add_del $swp1 add 2001:db8:1::2/64
+ tc qdisc add dev $swp1 clsact
+
+ tunnel_create g1 ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \
+ ttl inherit
+ ip link set dev g1 up
+ __addr_add_del g1 add 2001:db8:3::1/128
+
+ ip link set dev $rp1 up
+ __addr_add_del $rp1 add 2001:db8:10::1/64
+}
+
+switch_destroy()
+{
+ __addr_add_del $rp1 del 2001:db8:10::1/64
+ ip link set dev $rp1 down
+
+ __addr_add_del g1 del 2001:db8:3::1/128
+ ip link set dev g1 down
+ tunnel_destroy g1
+
+ tc qdisc del dev $swp1 clsact
+ __addr_add_del $swp1 del 2001:db8:1::2/64
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ rp1=${NETIFS[p3]}
+ rp2=${NETIFS[p4]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ switch_create
+ vrf2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ vrf2_destroy
+ switch_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+ipip_payload_get()
+{
+ local saddr="20:01:0d:b8:00:02:00:00:00:00:00:00:00:00:00:01"
+ local daddr="20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:01"
+ local flags=$1; shift
+ local key=$1; shift
+
+ p=$(:
+ )"$flags"$( : GRE flags
+ )"0:00:"$( : Reserved + version
+ )"86:dd:"$( : ETH protocol type
+ )"$key"$( : Key
+ )"6"$( : IP version
+ )"0:0"$( : Traffic class
+ )"0:00:00:"$( : Flow label
+ )"00:00:"$( : Payload length
+ )"3a:"$( : Next header
+ )"04:"$( : Hop limit
+ )"$saddr:"$( : IP saddr
+ )"$daddr:"$( : IP daddr
+ )
+ echo $p
+}
+
+ecn_payload_get()
+{
+ echo $(ipip_payload_get "0")
+}
+
+ecn_decap_test()
+{
+ local trap_name="decap_error"
+ local desc=$1; shift
+ local ecn_desc=$1; shift
+ local outer_tos=$1; shift
+ local mz_pid
+
+ RET=0
+
+ tc filter add dev $swp1 egress protocol ipv6 pref 1 handle 101 \
+ flower src_ip 2001:db8:2::1 dst_ip 2001:db8:1::1 skip_sw \
+ action pass
+
+ rp1_mac=$(mac_get $rp1)
+ rp2_mac=$(mac_get $rp2)
+ payload=$(ecn_payload_get)
+
+ ip vrf exec v$rp2 $MZ -6 $rp2 -c 0 -d 1msec -a $rp2_mac -b $rp1_mac \
+ -A 2001:db8:3::2 -B 2001:db8:3::1 -t ip \
+ tos=$outer_tos,next=47,p=$payload -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name
+
+ tc_check_packets "dev $swp1 egress" 101 0
+ check_err $? "Packets were not dropped"
+
+ log_test "$desc: Inner ECN is not ECT and outer is $ecn_desc"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $swp1 egress protocol ipv6 pref 1 handle 101 flower
+}
+
+no_matching_tunnel_test()
+{
+ local trap_name="decap_error"
+ local desc=$1; shift
+ local sip=$1; shift
+ local mz_pid
+
+ RET=0
+
+ tc filter add dev $swp1 egress protocol ipv6 pref 1 handle 101 \
+ flower src_ip 2001:db8:2::1 dst_ip 2001:db8:1::1 action pass
+
+ rp1_mac=$(mac_get $rp1)
+ rp2_mac=$(mac_get $rp2)
+ payload=$(ipip_payload_get "$@")
+
+ ip vrf exec v$rp2 $MZ -6 $rp2 -c 0 -d 1msec -a $rp2_mac -b $rp1_mac \
+ -A $sip -B 2001:db8:3::1 -t ip next=47,p=$payload -q &
+ mz_pid=$!
+
+ devlink_trap_exception_test $trap_name
+
+ tc_check_packets "dev $swp1 egress" 101 0
+ check_err $? "Packets were not dropped"
+
+ log_test "$desc"
+
+ kill $mz_pid && wait $mz_pid &> /dev/null
+ tc filter del dev $swp1 egress protocol ipv6 pref 1 handle 101 flower
+}
+
+decap_error_test()
+{
+ # Correct source IP - the remote address
+ local sip=2001:db8:3::2
+
+ ecn_decap_test "Decap error" "ECT(1)" 01
+ ecn_decap_test "Decap error" "ECT(0)" 02
+ ecn_decap_test "Decap error" "CE" 03
+
+ no_matching_tunnel_test "Decap error: Source IP check failed" \
+ 2001:db8:4::2 "0"
+ no_matching_tunnel_test \
+ "Decap error: Key exists but was not expected" $sip "2" \
+ "00:00:00:E9:"
+
+ # Destroy the tunnel and create new one with key
+ __addr_add_del g1 del 2001:db8:3::1/128
+ tunnel_destroy g1
+
+ tunnel_create g1 ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \
+ ttl inherit key 233
+ __addr_add_del g1 add 2001:db8:3::1/128
+
+ no_matching_tunnel_test \
+ "Decap error: Key does not exist but was expected" $sip "0"
+ no_matching_tunnel_test \
+ "Decap error: Packet has a wrong key field" $sip "2" \
+ "00:00:00:E8:"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
index beee0d5646a6..eaf8a04a7ca5 100755
--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
+++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
@@ -1,6 +1,6 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-# Copyright 2020 NXP Semiconductors
+# Copyright 2020 NXP
WAIT_TIME=1
NUM_NETIFS=4
@@ -156,6 +156,11 @@ create_tcam_skeleton()
setup_prepare()
{
+ ip link set $eth0 up
+ ip link set $eth1 up
+ ip link set $eth2 up
+ ip link set $eth3 up
+
create_tcam_skeleton $eth0
ip link add br0 type bridge
@@ -242,9 +247,9 @@ test_vlan_push()
tcpdump_cleanup
}
-test_vlan_modify()
+test_vlan_ingress_modify()
{
- printf "Testing VLAN modification.. "
+ printf "Testing ingress VLAN modification.. "
ip link set br0 type bridge vlan_filtering 1
bridge vlan add dev $eth0 vid 200
@@ -280,6 +285,44 @@ test_vlan_modify()
ip link set br0 type bridge vlan_filtering 0
}
+test_vlan_egress_modify()
+{
+ printf "Testing egress VLAN modification.. "
+
+ tc qdisc add dev $eth1 clsact
+
+ ip link set br0 type bridge vlan_filtering 1
+ bridge vlan add dev $eth0 vid 200
+ bridge vlan add dev $eth1 vid 200
+
+ tc filter add dev $eth1 egress chain $(ES0) pref 3 \
+ protocol 802.1Q flower skip_sw vlan_id 200 vlan_prio 0 \
+ action vlan modify id 300 priority 7
+
+ tcpdump_start $eth2
+
+ $MZ $eth3.200 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip
+
+ sleep 1
+
+ tcpdump_stop
+
+ if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
+ echo "OK"
+ else
+ echo "FAIL"
+ fi
+
+ tcpdump_cleanup
+
+ tc filter del dev $eth1 egress chain $(ES0) pref 3
+ tc qdisc del dev $eth1 clsact
+
+ bridge vlan del dev $eth0 vid 200
+ bridge vlan del dev $eth1 vid 200
+ ip link set br0 type bridge vlan_filtering 0
+}
+
test_skbedit_priority()
{
local num_pkts=100
@@ -304,7 +347,8 @@ trap cleanup EXIT
ALL_TESTS="
test_vlan_pop
test_vlan_push
- test_vlan_modify
+ test_vlan_ingress_modify
+ test_vlan_egress_modify
test_skbedit_priority
"
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
index 5f5b2ba3e557..60c02b482be8 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
@@ -11,8 +11,8 @@ SYSTEM="syscalls"
EVENT="sys_enter_openat"
FIELD="filename"
EPROBE="eprobe_open"
-
-echo "e:$EPROBE $SYSTEM/$EVENT file=+0(\$filename):ustring" >> dynamic_events
+OPTIONS="file=+0(\$filename):ustring"
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
grep -q "$EPROBE" dynamic_events
test -d events/eprobes/$EPROBE
@@ -37,4 +37,54 @@ echo "-:$EPROBE" >> dynamic_events
! grep -q "$EPROBE" dynamic_events
! test -d events/eprobes/$EPROBE
+# test various ways to remove the probe (already tested with just event name)
+
+# With group name
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With group name and system/event
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE $SYSTEM/$EVENT" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With just event name and system/event
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:$EPROBE $SYSTEM/$EVENT" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With just event name and system/event and options
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With group name and system/event and options
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# Finally make sure what is in the dynamic_events file clears it too
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+LINE=`sed -e '/$EPROBE/s/^e/-/' < dynamic_events`
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
clear_trace
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 98053d3afbda..b8dbabe24ac2 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -24,6 +24,7 @@
/x86_64/smm_test
/x86_64/state_test
/x86_64/svm_vmcall_test
+/x86_64/svm_int_ctl_test
/x86_64/sync_regs_test
/x86_64/tsc_msrs_test
/x86_64/userspace_msr_exit_test
@@ -48,6 +49,7 @@
/kvm_page_table_test
/memslot_modification_stress_test
/memslot_perf_test
+/rseq_test
/set_memory_region_test
/steal_time
/kvm_binary_stats_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 5d05801ab816..d1774f461393 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -56,6 +56,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
@@ -80,6 +81,7 @@ TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
TEST_GEN_PROGS_x86_64 += kvm_page_table_test
TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
TEST_GEN_PROGS_x86_64 += memslot_perf_test
+TEST_GEN_PROGS_x86_64 += rseq_test
TEST_GEN_PROGS_x86_64 += set_memory_region_test
TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
@@ -93,6 +95,7 @@ TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
+TEST_GEN_PROGS_aarch64 += rseq_test
TEST_GEN_PROGS_aarch64 += set_memory_region_test
TEST_GEN_PROGS_aarch64 += steal_time
TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test
@@ -104,6 +107,7 @@ TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
TEST_GEN_PROGS_s390x += kvm_page_table_test
+TEST_GEN_PROGS_s390x += rseq_test
TEST_GEN_PROGS_s390x += set_memory_region_test
TEST_GEN_PROGS_s390x += kvm_binary_stats_test
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index 71e277c7c3f3..5d95113c7b7c 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -371,9 +371,7 @@ static void help(char *name)
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
- printf(" -s: specify the type of memory that should be used to\n"
- " back the guest data region.\n\n");
- backing_src_help();
+ backing_src_help("-s");
puts("");
exit(0);
}
@@ -381,7 +379,7 @@ static void help(char *name)
int main(int argc, char *argv[])
{
struct test_params params = {
- .backing_src = VM_MEM_SRC_ANONYMOUS,
+ .backing_src = DEFAULT_VM_MEM_SRC,
.vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
.vcpus = 1,
};
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index e79c1b64977f..1510b21e6306 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -179,7 +179,7 @@ static void *uffd_handler_thread_fn(void *arg)
return NULL;
}
- if (!pollfd[0].revents & POLLIN)
+ if (!(pollfd[0].revents & POLLIN))
continue;
r = read(uffd, &msg, sizeof(msg));
@@ -416,7 +416,7 @@ static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
- " [-b memory] [-t type] [-v vcpus] [-o]\n", name);
+ " [-b memory] [-s type] [-v vcpus] [-o]\n", name);
guest_modes_help();
printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
" UFFD registration mode: 'MISSING' or 'MINOR'.\n");
@@ -426,8 +426,7 @@ static void help(char *name)
printf(" -b: specify the size of the memory region which should be\n"
" demand paged by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
- printf(" -t: The type of backing memory to use. Default: anonymous\n");
- backing_src_help();
+ backing_src_help("-s");
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
@@ -439,14 +438,14 @@ int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
struct test_params p = {
- .src_type = VM_MEM_SRC_ANONYMOUS,
+ .src_type = DEFAULT_VM_MEM_SRC,
.partition_vcpu_memory_access = true,
};
int opt;
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {
+ while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
@@ -465,7 +464,7 @@ int main(int argc, char *argv[])
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
- case 't':
+ case 's':
p.src_type = parse_backing_src_type(optarg);
break;
case 'v':
@@ -485,7 +484,7 @@ int main(int argc, char *argv[])
if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
!backing_src_is_shared(p.src_type)) {
- TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");
+ TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
}
for_each_guest_mode(run_test, &p);
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 479868570d59..7ffab5bd5ce5 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -118,42 +118,64 @@ static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
toggle_dirty_logging(vm, slots, false);
}
-static void get_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
- uint64_t nr_pages)
+static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
{
- uint64_t slot_pages = nr_pages / slots;
int i;
for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i;
- unsigned long *slot_bitmap = bitmap + i * slot_pages;
- kvm_vm_get_dirty_log(vm, slot, slot_bitmap);
+ kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
}
}
-static void clear_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
- uint64_t nr_pages)
+static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
+ int slots, uint64_t pages_per_slot)
{
- uint64_t slot_pages = nr_pages / slots;
int i;
for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i;
- unsigned long *slot_bitmap = bitmap + i * slot_pages;
- kvm_vm_clear_dirty_log(vm, slot, slot_bitmap, 0, slot_pages);
+ kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
}
}
+static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
+{
+ unsigned long **bitmaps;
+ int i;
+
+ bitmaps = malloc(slots * sizeof(bitmaps[0]));
+ TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
+
+ for (i = 0; i < slots; i++) {
+ bitmaps[i] = bitmap_zalloc(pages_per_slot);
+ TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
+ }
+
+ return bitmaps;
+}
+
+static void free_bitmaps(unsigned long *bitmaps[], int slots)
+{
+ int i;
+
+ for (i = 0; i < slots; i++)
+ free(bitmaps[i]);
+
+ free(bitmaps);
+}
+
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
pthread_t *vcpu_threads;
struct kvm_vm *vm;
- unsigned long *bmap;
+ unsigned long **bitmaps;
uint64_t guest_num_pages;
uint64_t host_num_pages;
+ uint64_t pages_per_slot;
int vcpu_id;
struct timespec start;
struct timespec ts_diff;
@@ -171,7 +193,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
- bmap = bitmap_zalloc(host_num_pages);
+ pages_per_slot = host_num_pages / p->slots;
+
+ bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
if (dirty_log_manual_caps) {
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
@@ -239,7 +263,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
clock_gettime(CLOCK_MONOTONIC, &start);
- get_dirty_log(vm, p->slots, bmap, host_num_pages);
+ get_dirty_log(vm, bitmaps, p->slots);
ts_diff = timespec_elapsed(start);
get_dirty_log_total = timespec_add(get_dirty_log_total,
ts_diff);
@@ -248,7 +272,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
if (dirty_log_manual_caps) {
clock_gettime(CLOCK_MONOTONIC, &start);
- clear_dirty_log(vm, p->slots, bmap, host_num_pages);
+ clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
ts_diff = timespec_elapsed(start);
clear_dirty_log_total = timespec_add(clear_dirty_log_total,
ts_diff);
@@ -281,7 +305,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
}
- free(bmap);
+ free_bitmaps(bitmaps, p->slots);
free(vcpu_threads);
perf_test_destroy_vm(vm);
}
@@ -308,11 +332,9 @@ static void help(char *name)
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
- printf(" -s: specify the type of memory that should be used to\n"
- " back the guest data region.\n\n");
+ backing_src_help("-s");
printf(" -x: Split the memory region into this number of memslots.\n"
- " (default: 1)");
- backing_src_help();
+ " (default: 1)\n");
puts("");
exit(0);
}
@@ -324,7 +346,7 @@ int main(int argc, char *argv[])
.iterations = TEST_HOST_LOOP_N,
.wr_fract = 1,
.partition_vcpu_memory_access = true,
- .backing_src = VM_MEM_SRC_ANONYMOUS,
+ .backing_src = DEFAULT_VM_MEM_SRC,
.slots = 1,
};
int opt;
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index d79be15dd3d2..f8fddc84c0d3 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -90,18 +90,23 @@ enum vm_mem_backing_src_type {
NUM_SRC_TYPES,
};
+#define DEFAULT_VM_MEM_SRC VM_MEM_SRC_ANONYMOUS
+
struct vm_mem_backing_src_alias {
const char *name;
uint32_t flag;
};
+#define MIN_RUN_DELAY_NS 200000UL
+
bool thp_configured(void);
size_t get_trans_hugepagesz(void);
size_t get_def_hugetlb_pagesz(void);
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
size_t get_backing_src_pagesz(uint32_t i);
-void backing_src_help(void);
+void backing_src_help(const char *flag);
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
+long get_run_delay(void);
/*
* Whether or not the given source type is shared memory (as opposed to
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 242ae8e09a65..05e65ca1c30c 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -312,37 +312,37 @@ static inline void set_xmm(int n, unsigned long val)
}
}
-typedef unsigned long v1di __attribute__ ((vector_size (8)));
+#define GET_XMM(__xmm) \
+({ \
+ unsigned long __val; \
+ asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \
+ __val; \
+})
+
static inline unsigned long get_xmm(int n)
{
assert(n >= 0 && n <= 7);
- register v1di xmm0 __asm__("%xmm0");
- register v1di xmm1 __asm__("%xmm1");
- register v1di xmm2 __asm__("%xmm2");
- register v1di xmm3 __asm__("%xmm3");
- register v1di xmm4 __asm__("%xmm4");
- register v1di xmm5 __asm__("%xmm5");
- register v1di xmm6 __asm__("%xmm6");
- register v1di xmm7 __asm__("%xmm7");
switch (n) {
case 0:
- return (unsigned long)xmm0;
+ return GET_XMM(xmm0);
case 1:
- return (unsigned long)xmm1;
+ return GET_XMM(xmm1);
case 2:
- return (unsigned long)xmm2;
+ return GET_XMM(xmm2);
case 3:
- return (unsigned long)xmm3;
+ return GET_XMM(xmm3);
case 4:
- return (unsigned long)xmm4;
+ return GET_XMM(xmm4);
case 5:
- return (unsigned long)xmm5;
+ return GET_XMM(xmm5);
case 6:
- return (unsigned long)xmm6;
+ return GET_XMM(xmm6);
case 7:
- return (unsigned long)xmm7;
+ return GET_XMM(xmm7);
}
+
+ /* never reached */
return 0;
}
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index 0d04a7db7f24..36407cb0ec85 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -456,10 +456,7 @@ static void help(char *name)
" (default: 1G)\n");
printf(" -v: specify the number of vCPUs to run\n"
" (default: 1)\n");
- printf(" -s: specify the type of memory that should be used to\n"
- " back the guest data region.\n"
- " (default: anonymous)\n\n");
- backing_src_help();
+ backing_src_help("-s");
puts("");
}
@@ -468,7 +465,7 @@ int main(int argc, char *argv[])
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
struct test_params p = {
.test_mem_size = DEFAULT_TEST_MEM_SIZE,
- .src_type = VM_MEM_SRC_ANONYMOUS,
+ .src_type = DEFAULT_VM_MEM_SRC,
};
int opt;
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index af1031fed97f..b72429108993 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -11,6 +11,7 @@
#include <stdlib.h>
#include <time.h>
#include <sys/stat.h>
+#include <sys/syscall.h>
#include <linux/mman.h>
#include "linux/kernel.h"
@@ -129,13 +130,16 @@ size_t get_trans_hugepagesz(void)
{
size_t size;
FILE *f;
+ int ret;
TEST_ASSERT(thp_configured(), "THP is not configured in host kernel");
f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r");
TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size");
- fscanf(f, "%ld", &size);
+ ret = fscanf(f, "%ld", &size);
+ ret = fscanf(f, "%ld", &size);
+ TEST_ASSERT(ret < 1, "Error reading transparent_hugepage/hpage_pmd_size");
fclose(f);
return size;
@@ -279,13 +283,22 @@ size_t get_backing_src_pagesz(uint32_t i)
}
}
-void backing_src_help(void)
+static void print_available_backing_src_types(const char *prefix)
{
int i;
- printf("Available backing src types:\n");
+ printf("%sAvailable backing src types:\n", prefix);
+
for (i = 0; i < NUM_SRC_TYPES; i++)
- printf("\t%s\n", vm_mem_backing_src_alias(i)->name);
+ printf("%s %s\n", prefix, vm_mem_backing_src_alias(i)->name);
+}
+
+void backing_src_help(const char *flag)
+{
+ printf(" %s: specify the type of memory that should be used to\n"
+ " back the guest data region. (default: %s)\n",
+ flag, vm_mem_backing_src_alias(DEFAULT_VM_MEM_SRC)->name);
+ print_available_backing_src_types(" ");
}
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
@@ -296,7 +309,23 @@ enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name))
return i;
- backing_src_help();
+ print_available_backing_src_types("");
TEST_FAIL("Unknown backing src type: %s", type_name);
return -1;
}
+
+long get_run_delay(void)
+{
+ char path[64];
+ long val[2];
+ FILE *fp;
+
+ sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
+ fp = fopen(path, "r");
+ /* Return MIN_RUN_DELAY_NS upon failure just to be safe */
+ if (fscanf(fp, "%ld %ld ", &val[0], &val[1]) < 2)
+ val[1] = MIN_RUN_DELAY_NS;
+ fclose(fp);
+
+ return val[1];
+}
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
new file mode 100644
index 000000000000..4158da0da2bb
--- /dev/null
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <syscall.h>
+#include <sys/ioctl.h>
+#include <sys/sysinfo.h>
+#include <asm/barrier.h>
+#include <linux/atomic.h>
+#include <linux/rseq.h>
+#include <linux/unistd.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+#define VCPU_ID 0
+
+static __thread volatile struct rseq __rseq = {
+ .cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
+};
+
+/*
+ * Use an arbitrary, bogus signature for configuring rseq, this test does not
+ * actually enter an rseq critical section.
+ */
+#define RSEQ_SIG 0xdeadbeef
+
+/*
+ * Any bug related to task migration is likely to be timing-dependent; perform
+ * a large number of migrations to reduce the odds of a false negative.
+ */
+#define NR_TASK_MIGRATIONS 100000
+
+static pthread_t migration_thread;
+static cpu_set_t possible_mask;
+static int min_cpu, max_cpu;
+static bool done;
+
+static atomic_t seq_cnt;
+
+static void guest_code(void)
+{
+ for (;;)
+ GUEST_SYNC(0);
+}
+
+static void sys_rseq(int flags)
+{
+ int r;
+
+ r = syscall(__NR_rseq, &__rseq, sizeof(__rseq), flags, RSEQ_SIG);
+ TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
+}
+
+static int next_cpu(int cpu)
+{
+ /*
+ * Advance to the next CPU, skipping those that weren't in the original
+ * affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
+ * data storage is considered as opaque. Note, if this task is pinned
+ * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
+ * burn a lot cycles and the test will take longer than normal to
+ * complete.
+ */
+ do {
+ cpu++;
+ if (cpu > max_cpu) {
+ cpu = min_cpu;
+ TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
+ "Min CPU = %d must always be usable", cpu);
+ break;
+ }
+ } while (!CPU_ISSET(cpu, &possible_mask));
+
+ return cpu;
+}
+
+static void *migration_worker(void *ign)
+{
+ cpu_set_t allowed_mask;
+ int r, i, cpu;
+
+ CPU_ZERO(&allowed_mask);
+
+ for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
+ CPU_SET(cpu, &allowed_mask);
+
+ /*
+ * Bump the sequence count twice to allow the reader to detect
+ * that a migration may have occurred in between rseq and sched
+ * CPU ID reads. An odd sequence count indicates a migration
+ * is in-progress, while a completely different count indicates
+ * a migration occurred since the count was last read.
+ */
+ atomic_inc(&seq_cnt);
+
+ /*
+ * Ensure the odd count is visible while sched_getcpu() isn't
+ * stable, i.e. while changing affinity is in-progress.
+ */
+ smp_wmb();
+ r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
+ TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
+ errno, strerror(errno));
+ smp_wmb();
+ atomic_inc(&seq_cnt);
+
+ CPU_CLR(cpu, &allowed_mask);
+
+ /*
+ * Wait 1-10us before proceeding to the next iteration and more
+ * specifically, before bumping seq_cnt again. A delay is
+ * needed on three fronts:
+ *
+ * 1. To allow sched_setaffinity() to prompt migration before
+ * ioctl(KVM_RUN) enters the guest so that TIF_NOTIFY_RESUME
+ * (or TIF_NEED_RESCHED, which indirectly leads to handling
+ * NOTIFY_RESUME) is handled in KVM context.
+ *
+ * If NOTIFY_RESUME/NEED_RESCHED is set after KVM enters
+ * the guest, the guest will trigger a IO/MMIO exit all the
+ * way to userspace and the TIF flags will be handled by
+ * the generic "exit to userspace" logic, not by KVM. The
+ * exit to userspace is necessary to give the test a chance
+ * to check the rseq CPU ID (see #2).
+ *
+ * Alternatively, guest_code() could include an instruction
+ * to trigger an exit that is handled by KVM, but any such
+ * exit requires architecture specific code.
+ *
+ * 2. To let ioctl(KVM_RUN) make its way back to the test
+ * before the next round of migration. The test's check on
+ * the rseq CPU ID must wait for migration to complete in
+ * order to avoid false positive, thus any kernel rseq bug
+ * will be missed if the next migration starts before the
+ * check completes.
+ *
+ * 3. To ensure the read-side makes efficient forward progress,
+ * e.g. if sched_getcpu() involves a syscall. Stalling the
+ * read-side means the test will spend more time waiting for
+ * sched_getcpu() to stabilize and less time trying to hit
+ * the timing-dependent bug.
+ *
+ * Because any bug in this area is likely to be timing-dependent,
+ * run with a range of delays at 1us intervals from 1us to 10us
+ * as a best effort to avoid tuning the test to the point where
+ * it can hit _only_ the original bug and not detect future
+ * regressions.
+ *
+ * The original bug can reproduce with a delay up to ~500us on
+ * x86-64, but starts to require more iterations to reproduce
+ * as the delay creeps above ~10us, and the average runtime of
+ * each iteration obviously increases as well. Cap the delay
+ * at 10us to keep test runtime reasonable while minimizing
+ * potential coverage loss.
+ *
+ * The lower bound for reproducing the bug is likely below 1us,
+ * e.g. failures occur on x86-64 with nanosleep(0), but at that
+ * point the overhead of the syscall likely dominates the delay.
+ * Use usleep() for simplicity and to avoid unnecessary kernel
+ * dependencies.
+ */
+ usleep((i % 10) + 1);
+ }
+ done = true;
+ return NULL;
+}
+
+static int calc_min_max_cpu(void)
+{
+ int i, cnt, nproc;
+
+ if (CPU_COUNT(&possible_mask) < 2)
+ return -EINVAL;
+
+ /*
+ * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
+ * this task is affined to in order to reduce the time spent querying
+ * unusable CPUs, e.g. if this task is pinned to a small percentage of
+ * total CPUs.
+ */
+ nproc = get_nprocs_conf();
+ min_cpu = -1;
+ max_cpu = -1;
+ cnt = 0;
+
+ for (i = 0; i < nproc; i++) {
+ if (!CPU_ISSET(i, &possible_mask))
+ continue;
+ if (min_cpu == -1)
+ min_cpu = i;
+ max_cpu = i;
+ cnt++;
+ }
+
+ return (cnt < 2) ? -EINVAL : 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int r, i, snapshot;
+ struct kvm_vm *vm;
+ u32 cpu, rseq_cpu;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
+ TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
+ strerror(errno));
+
+ if (calc_min_max_cpu()) {
+ print_skip("Only one usable CPU, task migration not possible");
+ exit(KSFT_SKIP);
+ }
+
+ sys_rseq(0);
+
+ /*
+ * Create and run a dummy VM that immediately exits to userspace via
+ * GUEST_SYNC, while concurrently migrating the process by setting its
+ * CPU affinity.
+ */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+ ucall_init(vm, NULL);
+
+ pthread_create(&migration_thread, NULL, migration_worker, 0);
+
+ for (i = 0; !done; i++) {
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
+ "Guest failed?");
+
+ /*
+ * Verify rseq's CPU matches sched's CPU. Ensure migration
+ * doesn't occur between sched_getcpu() and reading the rseq
+ * cpu_id by rereading both if the sequence count changes, or
+ * if the count is odd (migration in-progress).
+ */
+ do {
+ /*
+ * Drop bit 0 to force a mismatch if the count is odd,
+ * i.e. if a migration is in-progress.
+ */
+ snapshot = atomic_read(&seq_cnt) & ~1;
+
+ /*
+ * Ensure reading sched_getcpu() and rseq.cpu_id
+ * complete in a single "no migration" window, i.e. are
+ * not reordered across the seq_cnt reads.
+ */
+ smp_rmb();
+ cpu = sched_getcpu();
+ rseq_cpu = READ_ONCE(__rseq.cpu_id);
+ smp_rmb();
+ } while (snapshot != atomic_read(&seq_cnt));
+
+ TEST_ASSERT(rseq_cpu == cpu,
+ "rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu);
+ }
+
+ /*
+ * Sanity check that the test was able to enter the guest a reasonable
+ * number of times, e.g. didn't get stalled too often/long waiting for
+ * sched_getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a
+ * fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs
+ * than migrations given the 1us+ delay in the migration task.
+ */
+ TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
+ "Only performed %d KVM_RUNs, task stalled too much?\n", i);
+
+ pthread_join(migration_thread, NULL);
+
+ kvm_vm_free(vm);
+
+ sys_rseq(RSEQ_FLAG_UNREGISTER);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index ecec30865a74..62f2eb9ee3d5 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -10,7 +10,6 @@
#include <sched.h>
#include <pthread.h>
#include <linux/kernel.h>
-#include <sys/syscall.h>
#include <asm/kvm.h>
#include <asm/kvm_para.h>
@@ -20,7 +19,6 @@
#define NR_VCPUS 4
#define ST_GPA_BASE (1 << 30)
-#define MIN_RUN_DELAY_NS 200000UL
static void *st_gva[NR_VCPUS];
static uint64_t guest_stolen_time[NR_VCPUS];
@@ -118,12 +116,12 @@ struct st_time {
uint64_t st_time;
};
-static int64_t smccc(uint32_t func, uint32_t arg)
+static int64_t smccc(uint32_t func, uint64_t arg)
{
unsigned long ret;
asm volatile(
- "mov x0, %1\n"
+ "mov w0, %w1\n"
"mov x1, %2\n"
"hvc #0\n"
"mov %0, x0\n"
@@ -217,20 +215,6 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
#endif
-static long get_run_delay(void)
-{
- char path[64];
- long val[2];
- FILE *fp;
-
- sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
- fp = fopen(path, "r");
- fscanf(fp, "%ld %ld ", &val[0], &val[1]);
- fclose(fp);
-
- return val[1];
-}
-
static void *do_steal_time(void *arg)
{
struct timespec ts, stop;
diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
index e6480fd5c4bd..8039e1eff938 100644
--- a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
+++ b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
@@ -82,7 +82,8 @@ int get_warnings_count(void)
FILE *f;
f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
- fscanf(f, "%d", &warnings);
+ if (fscanf(f, "%d", &warnings) < 1)
+ warnings = 0;
fclose(f);
return warnings;
diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
new file mode 100644
index 000000000000..df04f56ce859
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_int_ctl_test
+ *
+ * Copyright (C) 2021, Red Hat, Inc.
+ *
+ * Nested SVM testing: test simultaneous use of V_IRQ from L1 and L0.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "apic.h"
+
+#define VCPU_ID 0
+
+static struct kvm_vm *vm;
+
+bool vintr_irq_called;
+bool intr_irq_called;
+
+#define VINTR_IRQ_NUMBER 0x20
+#define INTR_IRQ_NUMBER 0x30
+
+static void vintr_irq_handler(struct ex_regs *regs)
+{
+ vintr_irq_called = true;
+}
+
+static void intr_irq_handler(struct ex_regs *regs)
+{
+ x2apic_write_reg(APIC_EOI, 0x00);
+ intr_irq_called = true;
+}
+
+static void l2_guest_code(struct svm_test_data *svm)
+{
+ /* This code raises interrupt INTR_IRQ_NUMBER in the L1's LAPIC,
+ * and since L1 didn't enable virtual interrupt masking,
+ * L2 should receive it and not L1.
+ *
+ * L2 also has virtual interrupt 'VINTR_IRQ_NUMBER' pending in V_IRQ
+ * so it should also receive it after the following 'sti'.
+ */
+ x2apic_write_reg(APIC_ICR,
+ APIC_DEST_SELF | APIC_INT_ASSERT | INTR_IRQ_NUMBER);
+
+ __asm__ __volatile__(
+ "sti\n"
+ "nop\n"
+ );
+
+ GUEST_ASSERT(vintr_irq_called);
+ GUEST_ASSERT(intr_irq_called);
+
+ __asm__ __volatile__(
+ "vmcall\n"
+ );
+}
+
+static void l1_guest_code(struct svm_test_data *svm)
+{
+ #define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ x2apic_enable();
+
+ /* Prepare for L2 execution. */
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /* No virtual interrupt masking */
+ vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
+
+ /* No intercepts for real and virtual interrupts */
+ vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR);
+
+ /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
+ vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
+ vmcb->control.int_vector = VINTR_IRQ_NUMBER;
+
+ run_guest(vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t svm_gva;
+
+ nested_svm_check_supported();
+
+ vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+
+ vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler);
+ vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler);
+
+ vcpu_alloc_svm(vm, &svm_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
+
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_FAIL("%s", (const char *)uc.args[0]);
+ break;
+ /* NOT REACHED */
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+ }
+done:
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 117bf49a3d79..eda0d2a51224 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -14,7 +14,6 @@
#include <stdint.h>
#include <time.h>
#include <sched.h>
-#include <sys/syscall.h>
#define VCPU_ID 5
@@ -98,20 +97,6 @@ static void guest_code(void)
GUEST_DONE();
}
-static long get_run_delay(void)
-{
- char path[64];
- long val[2];
- FILE *fp;
-
- sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
- fp = fopen(path, "r");
- fscanf(fp, "%ld %ld ", &val[0], &val[1]);
- fclose(fp);
-
- return val[1];
-}
-
static int cmp_timespec(struct timespec *a, struct timespec *b)
{
if (a->tv_sec > b->tv_sec)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index fa2ac0e56b43..fe7ee2b0f29c 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -48,6 +48,7 @@ ARCH ?= $(SUBARCH)
# When local build is done, headers are installed in the default
# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
+.NOTPARALLEL:
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index cfc7f4f97fd1..df341648f818 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -1,5 +1,2 @@
-##TEST_GEN_FILES := test_unix_oob
-TEST_PROGS := test_unix_oob
+TEST_GEN_PROGS := test_unix_oob
include ../../lib.mk
-
-all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/net/af_unix/test_unix_oob.c b/tools/testing/selftests/net/af_unix/test_unix_oob.c
index 0f3e3763f4f8..3dece8b29253 100644
--- a/tools/testing/selftests/net/af_unix/test_unix_oob.c
+++ b/tools/testing/selftests/net/af_unix/test_unix_oob.c
@@ -271,8 +271,9 @@ main(int argc, char **argv)
read_oob(pfd, &oob);
if (!signal_recvd || len != 127 || oob != '%' || atmark != 1) {
- fprintf(stderr, "Test 3 failed, sigurg %d len %d OOB %c ",
- "atmark %d\n", signal_recvd, len, oob, atmark);
+ fprintf(stderr,
+ "Test 3 failed, sigurg %d len %d OOB %c atmark %d\n",
+ signal_recvd, len, oob, atmark);
die(1);
}
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 21b646d10b88..86ab429fe7f3 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -43,3 +43,4 @@ CONFIG_NET_ACT_TUNNEL_KEY=m
CONFIG_NET_ACT_MIRRED=m
CONFIG_BAREUDP=m
CONFIG_IPV6_IOAM6_LWTUNNEL=y
+CONFIG_CRYPTO_SM4=y
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 13350cd5c8ac..8e67a252b672 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -289,6 +289,12 @@ set_sysctl()
run_cmd sysctl -q -w $*
}
+# get sysctl values in NS-A
+get_sysctl()
+{
+ ${NSA_CMD} sysctl -n $*
+}
+
################################################################################
# Setup for tests
@@ -1003,6 +1009,60 @@ ipv4_tcp_md5()
run_cmd nettest -s -I ${NSA_DEV} -M ${MD5_PW} -m ${NS_NET}
log_test $? 1 "MD5: VRF: Device must be a VRF - prefix"
+ test_ipv4_md5_vrf__vrf_server__no_bind_ifindex
+ test_ipv4_md5_vrf__global_server__bind_ifindex0
+}
+
+test_ipv4_md5_vrf__vrf_server__no_bind_ifindex()
+{
+ log_start
+ show_hint "Simulates applications using VRF without TCP_MD5SIG_FLAG_IFINDEX"
+ run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+ sleep 1
+ run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+ log_test $? 0 "MD5: VRF: VRF-bound server, unbound key accepts connection"
+
+ log_start
+ show_hint "Binding both the socket and the key is not required but it works"
+ run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+ sleep 1
+ run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+ log_test $? 0 "MD5: VRF: VRF-bound server, bound key accepts connection"
+}
+
+test_ipv4_md5_vrf__global_server__bind_ifindex0()
+{
+ # This particular test needs tcp_l3mdev_accept=1 for Global server to accept VRF connections
+ local old_tcp_l3mdev_accept
+ old_tcp_l3mdev_accept=$(get_sysctl net.ipv4.tcp_l3mdev_accept)
+ set_sysctl net.ipv4.tcp_l3mdev_accept=1
+
+ log_start
+ run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+ sleep 1
+ run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+ log_test $? 2 "MD5: VRF: Global server, Key bound to ifindex=0 rejects VRF connection"
+
+ log_start
+ run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+ sleep 1
+ run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
+ log_test $? 0 "MD5: VRF: Global server, key bound to ifindex=0 accepts non-VRF connection"
+ log_start
+
+ run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+ sleep 1
+ run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+ log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts VRF connection"
+
+ log_start
+ run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+ sleep 1
+ run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
+ log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts non-VRF connection"
+
+ # restore value
+ set_sysctl net.ipv4.tcp_l3mdev_accept="$old_tcp_l3mdev_accept"
}
ipv4_tcp_novrf()
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index 0d293391e9a4..b5a69ad191b0 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -2078,6 +2078,7 @@ basic_res()
"id 101 index 0 nhid 2 id 101 index 1 nhid 2 id 101 index 2 nhid 1 id 101 index 3 nhid 1"
log_test $? 0 "Dump all nexthop buckets in a group"
+ sleep 0.1
(( $($IP -j nexthop bucket list id 101 |
jq '[.[] | select(.bucket.idle_time > 0 and
.bucket.idle_time < 2)] | length') == 4 ))
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index d97bd6889446..72ee644d47bf 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -9,6 +9,7 @@ TEST_PROGS = bridge_igmp.sh \
gre_inner_v4_multipath.sh \
gre_inner_v6_multipath.sh \
gre_multipath.sh \
+ ip6_forward_instats_vrf.sh \
ip6gre_inner_v4_multipath.sh \
ip6gre_inner_v6_multipath.sh \
ipip_flat_gre_key.sh \
diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
index b802c14d2950..bf17e485684f 100644
--- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
+++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
@@ -39,3 +39,9 @@ NETIF_CREATE=yes
# Timeout (in seconds) before ping exits regardless of how many packets have
# been sent or received
PING_TIMEOUT=5
+# Flag for tc match, supposed to be skip_sw/skip_hw which means do not process
+# filter by software/hardware
+TC_FLAG=skip_hw
+# IPv6 traceroute utility name.
+TROUTE6=traceroute6
+
diff --git a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
new file mode 100755
index 000000000000..9f5b3e2e5e95
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
@@ -0,0 +1,172 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test ipv6 stats on the incoming if when forwarding with VRF
+
+ALL_TESTS="
+ ipv6_ping
+ ipv6_in_too_big_err
+ ipv6_in_hdr_err
+ ipv6_in_addr_err
+ ipv6_in_discard
+"
+
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 2001:1:1::2/64
+ ip -6 route add vrf v$h1 2001:1:2::/64 via 2001:1:1::1
+}
+
+h1_destroy()
+{
+ ip -6 route del vrf v$h1 2001:1:2::/64 via 2001:1:1::1
+ simple_if_fini $h1 2001:1:1::2/64
+}
+
+router_create()
+{
+ vrf_create router
+ __simple_if_init $rtr1 router 2001:1:1::1/64
+ __simple_if_init $rtr2 router 2001:1:2::1/64
+ mtu_set $rtr2 1280
+}
+
+router_destroy()
+{
+ mtu_restore $rtr2
+ __simple_if_fini $rtr2 2001:1:2::1/64
+ __simple_if_fini $rtr1 2001:1:1::1/64
+ vrf_destroy router
+}
+
+h2_create()
+{
+ simple_if_init $h2 2001:1:2::2/64
+ ip -6 route add vrf v$h2 2001:1:1::/64 via 2001:1:2::1
+ mtu_set $h2 1280
+}
+
+h2_destroy()
+{
+ mtu_restore $h2
+ ip -6 route del vrf v$h2 2001:1:1::/64 via 2001:1:2::1
+ simple_if_fini $h2 2001:1:2::2/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rtr1=${NETIFS[p2]}
+
+ rtr2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+ h1_create
+ router_create
+ h2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ h2_destroy
+ router_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+ipv6_in_too_big_err()
+{
+ RET=0
+
+ local t0=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
+ local vrf_name=$(master_name_get $h1)
+
+ # Send too big packets
+ ip vrf exec $vrf_name \
+ $PING6 -s 1300 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+
+ local t1=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
+ test "$((t1 - t0))" -ne 0
+ check_err $?
+ log_test "Ip6InTooBigErrors"
+}
+
+ipv6_in_hdr_err()
+{
+ RET=0
+
+ local t0=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
+ local vrf_name=$(master_name_get $h1)
+
+ # Send packets with hop limit 1, easiest with traceroute6 as some ping6
+ # doesn't allow hop limit to be specified
+ ip vrf exec $vrf_name \
+ $TROUTE6 2001:1:2::2 &> /dev/null
+
+ local t1=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
+ test "$((t1 - t0))" -ne 0
+ check_err $?
+ log_test "Ip6InHdrErrors"
+}
+
+ipv6_in_addr_err()
+{
+ RET=0
+
+ local t0=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
+ local vrf_name=$(master_name_get $h1)
+
+ # Disable forwarding temporary while sending the packet
+ sysctl -qw net.ipv6.conf.all.forwarding=0
+ ip vrf exec $vrf_name \
+ $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+ sysctl -qw net.ipv6.conf.all.forwarding=1
+
+ local t1=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
+ test "$((t1 - t0))" -ne 0
+ check_err $?
+ log_test "Ip6InAddrErrors"
+}
+
+ipv6_in_discard()
+{
+ RET=0
+
+ local t0=$(ipv6_stats_get $rtr1 Ip6InDiscards)
+ local vrf_name=$(master_name_get $h1)
+
+ # Add a policy to discard
+ ip xfrm policy add dst 2001:1:2::2/128 dir fwd action block
+ ip vrf exec $vrf_name \
+ $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+ ip xfrm policy del dst 2001:1:2::2/128 dir fwd
+
+ local t1=$(ipv6_stats_get $rtr1 Ip6InDiscards)
+ test "$((t1 - t0))" -ne 0
+ check_err $?
+ log_test "Ip6InDiscards"
+}
+ipv6_ping()
+{
+ RET=0
+
+ ping6_test $h1 2001:1:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_flat.sh b/tools/testing/selftests/net/forwarding/ip6gre_flat.sh
new file mode 100755
index 000000000000..96c97064f2d3
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6gre_flat.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel without key.
+# This test uses flat topology for IP tunneling tests. See ip6gre_lib.sh for
+# more details.
+
+ALL_TESTS="
+ gre_flat
+ gre_mtu_change
+"
+
+NUM_NETIFS=6
+source lib.sh
+source ip6gre_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create $ol1 $ul1
+ sw2_flat_create $ol2 $ul2
+}
+
+gre_flat()
+{
+ test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6"
+ test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh b/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh
new file mode 100755
index 000000000000..ff9fb0db9bd1
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with key.
+# This test uses flat topology for IP tunneling tests. See ip6gre_lib.sh for
+# more details.
+
+ALL_TESTS="
+ gre_flat
+ gre_mtu_change
+"
+
+NUM_NETIFS=6
+source lib.sh
+source ip6gre_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create $ol1 $ul1 key 233
+ sw2_flat_create $ol2 $ul2 key 233
+}
+
+gre_flat()
+{
+ test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 with key"
+ test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh b/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh
new file mode 100755
index 000000000000..12c138785242
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with keys.
+# This test uses flat topology for IP tunneling tests. See ip6gre_lib.sh for
+# more details.
+
+ALL_TESTS="
+ gre_flat
+ gre_mtu_change
+"
+
+NUM_NETIFS=6
+source lib.sh
+source ip6gre_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create $ol1 $ul1 ikey 111 okey 222
+ sw2_flat_create $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_flat()
+{
+ test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 with ikey/okey"
+ test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_hier.sh b/tools/testing/selftests/net/forwarding/ip6gre_hier.sh
new file mode 100755
index 000000000000..83b55c30a5c3
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6gre_hier.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ip6gre_lib.sh for more details.
+
+ALL_TESTS="
+ gre_hier
+ gre_mtu_change
+"
+
+NUM_NETIFS=6
+source lib.sh
+source ip6gre_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create $ol1 $ul1
+ sw2_hierarchical_create $ol2 $ul2
+}
+
+gre_hier()
+{
+ test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6"
+ test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh b/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh
new file mode 100755
index 000000000000..256607916d92
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ip6gre_lib.sh for more details.
+
+ALL_TESTS="
+ gre_hier
+ gre_mtu_change
+"
+
+NUM_NETIFS=6
+source lib.sh
+source ip6gre_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create $ol1 $ul1 key 22
+ sw2_hierarchical_create $ol2 $ul2 key 22
+}
+
+gre_hier()
+{
+ test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 with key"
+ test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh b/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh
new file mode 100755
index 000000000000..ad1bcd6334a8
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ip6gre_lib.sh for more details.
+
+ALL_TESTS="
+ gre_hier
+ gre_mtu_change
+"
+
+NUM_NETIFS=6
+source lib.sh
+source ip6gre_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create $ol1 $ul1 ikey 111 okey 222
+ sw2_hierarchical_create $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_hier()
+{
+ test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 with ikey/okey"
+ test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_lib.sh b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
new file mode 100644
index 000000000000..58a3597037b1
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
@@ -0,0 +1,438 @@
+# SPDX-License-Identifier: GPL-2.0
+#!/bin/bash
+
+# Handles creation and destruction of IP-in-IP or GRE tunnels over the given
+# topology. Supports both flat and hierarchical models.
+#
+# Flat Model:
+# Overlay and underlay share the same VRF.
+# SW1 uses default VRF so tunnel has no bound dev.
+# SW2 uses non-default VRF tunnel has a bound dev.
+# +--------------------------------+
+# | H1 |
+# | $h1 + |
+# | 198.51.100.1/24 | |
+# | 2001:db8:1::1/64 | |
+# +-------------------------|------+
+# |
+# +-------------------------|-------------------+
+# | SW1 | |
+# | $ol1 + |
+# | 198.51.100.2/24 |
+# | 2001:db8:1::2/64 |
+# | |
+# | + g1a (ip6gre) |
+# | loc=2001:db8:3::1 |
+# | rem=2001:db8:3::2 --. |
+# | tos=inherit | |
+# | . |
+# | .--------------------- |
+# | | |
+# | v |
+# | + $ul1.111 (vlan) |
+# | | 2001:db8:10::1/64 |
+# | \ |
+# | \____________ |
+# | | |
+# | VRF default + $ul1 |
+# +---------------------|-----------------------+
+# |
+# +---------------------|-----------------------+
+# | SW2 | |
+# | $ul2 + |
+# | ___________| |
+# | / |
+# | / |
+# | + $ul2.111 (vlan) |
+# | ^ 2001:db8:10::2/64 |
+# | | |
+# | | |
+# | '----------------------. |
+# | + g2a (ip6gre) | |
+# | loc=2001:db8:3::2 | |
+# | rem=2001:db8:3::1 --' |
+# | tos=inherit |
+# | |
+# | + $ol2 |
+# | | 203.0.113.2/24 |
+# | VRF v$ol2 | 2001:db8:2::2/64 |
+# +---------------------|-----------------------+
+# +---------------------|----------+
+# | H2 | |
+# | $h2 + |
+# | 203.0.113.1/24 |
+# | 2001:db8:2::1/64 |
+# +--------------------------------+
+#
+# Hierarchical model:
+# The tunnel is bound to a device in a different VRF
+#
+# +--------------------------------+
+# | H1 |
+# | $h1 + |
+# | 198.51.100.1/24 | |
+# | 2001:db8:1::1/64 | |
+# +-------------------------|------+
+# |
+# +-------------------------|-------------------+
+# | SW1 | |
+# | +-----------------------|-----------------+ |
+# | | $ol1 + | |
+# | | 198.51.100.2/24 | |
+# | | 2001:db8:1::2/64 | |
+# | | | |
+# | | + g1a (ip6gre) | |
+# | | loc=2001:db8:3::1 | |
+# | | rem=2001:db8:3::2 | |
+# | | tos=inherit | |
+# | | ^ | |
+# | | VRF v$ol1 | | |
+# | +--------------------|--------------------+ |
+# | | |
+# | +--------------------|--------------------+ |
+# | | VRF v$ul1 | | |
+# | | | | |
+# | | v | |
+# | | dummy1 + | |
+# | | 2001:db8:3::1/64 | |
+# | | .-----------' | |
+# | | | | |
+# | | v | |
+# | | + $ul1.111 (vlan) | |
+# | | | 2001:db8:10::1/64 | |
+# | | \ | |
+# | | \__________ | |
+# | | | | |
+# | | + $ul1 | |
+# | +---------------------|-------------------+ |
+# +-----------------------|---------------------+
+# |
+# +-----------------------|---------------------+
+# | SW2 | |
+# | +---------------------|-------------------+ |
+# | | + $ul2 | |
+# | | _____| | |
+# | | / | |
+# | | / | |
+# | | | $ul2.111 (vlan) | |
+# | | + 2001:db8:10::2/64 | |
+# | | ^ | |
+# | | | | |
+# | | '------. | |
+# | | dummy2 + | |
+# | | 2001:db8:3::2/64 | |
+# | | ^ | |
+# | | | | |
+# | | | | |
+# | | VRF v$ul2 | | |
+# | +---------------------|-------------------+ |
+# | | |
+# | +---------------------|-------------------+ |
+# | | VRF v$ol2 | | |
+# | | | | |
+# | | v | |
+# | | g2a (ip6gre) + | |
+# | | loc=2001:db8:3::2 | |
+# | | rem=2001:db8:3::1 | |
+# | | tos=inherit | |
+# | | | |
+# | | $ol2 + | |
+# | | 203.0.113.2/24 | | |
+# | | 2001:db8:2::2/64 | | |
+# | +---------------------|-------------------+ |
+# +-----------------------|---------------------+
+# |
+# +-----------------------|--------+
+# | H2 | |
+# | $h2 + |
+# | 203.0.113.1/24 |
+# | 2001:db8:2::1/64 |
+# +--------------------------------+
+
+source lib.sh
+source tc_common.sh
+
+h1_create()
+{
+ simple_if_init $h1 198.51.100.1/24 2001:db8:1::1/64
+ ip route add vrf v$h1 203.0.113.0/24 via 198.51.100.2
+ ip -6 route add vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2
+ ip route del vrf v$h1 203.0.113.0/24 via 198.51.100.2
+ simple_if_fini $h1 198.51.100.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 203.0.113.1/24 2001:db8:2::1/64
+ ip route add vrf v$h2 198.51.100.0/24 via 203.0.113.2
+ ip -6 route add vrf v$h2 2001:db8:1::/64 via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del vrf v$h2 2001:db8:1::/64 via 2001:db8:2::2
+ ip route del vrf v$h2 198.51.100.0/24 via 203.0.113.2
+ simple_if_fini $h2 203.0.113.1/24 2001:db8:2::1/64
+}
+
+sw1_flat_create()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip link set dev $ol1 up
+ __addr_add_del $ol1 add 198.51.100.2/24 2001:db8:1::2/64
+
+ ip link set dev $ul1 up
+ vlan_create $ul1 111 "" 2001:db8:10::1/64
+
+ tunnel_create g1a ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \
+ ttl inherit "$@"
+ ip link set dev g1a up
+ __addr_add_del g1a add "2001:db8:3::1/128"
+
+ ip -6 route add 2001:db8:3::2/128 via 2001:db8:10::2
+ ip route add 203.0.113.0/24 dev g1a
+ ip -6 route add 2001:db8:2::/64 dev g1a
+}
+
+sw1_flat_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip -6 route del 2001:db8:2::/64
+ ip route del 203.0.113.0/24
+ ip -6 route del 2001:db8:3::2/128 via 2001:db8:10::2
+
+ __simple_if_fini g1a 2001:db8:3::1/128
+ tunnel_destroy g1a
+
+ vlan_destroy $ul1 111
+ __simple_if_fini $ul1
+ __simple_if_fini $ol1 198.51.100.2/24 2001:db8:1::2/64
+}
+
+sw2_flat_create()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 203.0.113.2/24 2001:db8:2::2/64
+ __simple_if_init $ul2 v$ol2
+ vlan_create $ul2 111 v$ol2 2001:db8:10::2/64
+
+ tunnel_create g2a ip6gre 2001:db8:3::2 2001:db8:3::1 tos inherit \
+ ttl inherit dev v$ol2 "$@"
+ __simple_if_init g2a v$ol2 2001:db8:3::2/128
+
+ # Replace neighbor to avoid 1 dropped packet due to "unresolved neigh"
+ ip neigh replace dev $ol2 203.0.113.1 lladdr $(mac_get $h2)
+ ip -6 neigh replace dev $ol2 2001:db8:2::1 lladdr $(mac_get $h2)
+
+ ip -6 route add vrf v$ol2 2001:db8:3::1/128 via 2001:db8:10::1
+ ip route add vrf v$ol2 198.51.100.0/24 dev g2a
+ ip -6 route add vrf v$ol2 2001:db8:1::/64 dev g2a
+}
+
+sw2_flat_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip -6 route del vrf v$ol2 2001:db8:2::/64
+ ip route del vrf v$ol2 198.51.100.0/24
+ ip -6 route del vrf v$ol2 2001:db8:3::1/128 via 2001:db8:10::1
+
+ __simple_if_fini g2a 2001:db8:3::2/128
+ tunnel_destroy g2a
+
+ vlan_destroy $ul2 111
+ __simple_if_fini $ul2
+ simple_if_fini $ol2 203.0.113.2/24 2001:db8:2::2/64
+}
+
+sw1_hierarchical_create()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ simple_if_init $ol1 198.51.100.2/24 2001:db8:1::2/64
+ simple_if_init $ul1
+ ip link add name dummy1 type dummy
+ __simple_if_init dummy1 v$ul1 2001:db8:3::1/64
+
+ vlan_create $ul1 111 v$ul1 2001:db8:10::1/64
+ tunnel_create g1a ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \
+ ttl inherit dev dummy1 "$@"
+ ip link set dev g1a master v$ol1
+
+ ip -6 route add vrf v$ul1 2001:db8:3::2/128 via 2001:db8:10::2
+ ip route add vrf v$ol1 203.0.113.0/24 dev g1a
+ ip -6 route add vrf v$ol1 2001:db8:2::/64 dev g1a
+}
+
+sw1_hierarchical_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip -6 route del vrf v$ol1 2001:db8:2::/64
+ ip route del vrf v$ol1 203.0.113.0/24
+ ip -6 route del vrf v$ul1 2001:db8:3::2/128
+
+ tunnel_destroy g1a
+ vlan_destroy $ul1 111
+
+ __simple_if_fini dummy1 2001:db8:3::1/64
+ ip link del dev dummy1
+
+ simple_if_fini $ul1
+ simple_if_fini $ol1 198.51.100.2/24 2001:db8:1::2/64
+}
+
+sw2_hierarchical_create()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 203.0.113.2/24 2001:db8:2::2/64
+ simple_if_init $ul2
+
+ ip link add name dummy2 type dummy
+ __simple_if_init dummy2 v$ul2 2001:db8:3::2/64
+
+ vlan_create $ul2 111 v$ul2 2001:db8:10::2/64
+ tunnel_create g2a ip6gre 2001:db8:3::2 2001:db8:3::1 tos inherit \
+ ttl inherit dev dummy2 "$@"
+ ip link set dev g2a master v$ol2
+
+ # Replace neighbor to avoid 1 dropped packet due to "unresolved neigh"
+ ip neigh replace dev $ol2 203.0.113.1 lladdr $(mac_get $h2)
+ ip -6 neigh replace dev $ol2 2001:db8:2::1 lladdr $(mac_get $h2)
+
+ ip -6 route add vrf v$ul2 2001:db8:3::1/128 via 2001:db8:10::1
+ ip route add vrf v$ol2 198.51.100.0/24 dev g2a
+ ip -6 route add vrf v$ol2 2001:db8:1::/64 dev g2a
+}
+
+sw2_hierarchical_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip -6 route del vrf v$ol2 2001:db8:2::/64
+ ip route del vrf v$ol2 198.51.100.0/24
+ ip -6 route del vrf v$ul2 2001:db8:3::1/128
+
+ tunnel_destroy g2a
+ vlan_destroy $ul2 111
+
+ __simple_if_fini dummy2 2001:db8:3::2/64
+ ip link del dev dummy2
+
+ simple_if_fini $ul2
+ simple_if_fini $ol2 203.0.113.2/24 2001:db8:2::2/64
+}
+
+test_traffic_ip4ip6()
+{
+ RET=0
+
+ h1mac=$(mac_get $h1)
+ ol1mac=$(mac_get $ol1)
+
+ tc qdisc add dev $ul1 clsact
+ tc filter add dev $ul1 egress proto all pref 1 handle 101 \
+ flower $TC_FLAG action pass
+
+ tc qdisc add dev $ol2 clsact
+ tc filter add dev $ol2 egress protocol ipv4 pref 1 handle 101 \
+ flower $TC_FLAG dst_ip 203.0.113.1 action pass
+
+ $MZ $h1 -c 1000 -p 64 -a $h1mac -b $ol1mac -A 198.51.100.1 \
+ -B 203.0.113.1 -t ip -q -d 1msec
+
+ # Check ports after encap and after decap.
+ tc_check_at_least_x_packets "dev $ul1 egress" 101 1000
+ check_err $? "Packets did not go through $ul1, tc_flag = $TC_FLAG"
+
+ tc_check_at_least_x_packets "dev $ol2 egress" 101 1000
+ check_err $? "Packets did not go through $ol2, tc_flag = $TC_FLAG"
+
+ log_test "$@"
+
+ tc filter del dev $ol2 egress protocol ipv4 pref 1 handle 101 flower
+ tc qdisc del dev $ol2 clsact
+ tc filter del dev $ul1 egress proto all pref 1 handle 101 flower
+ tc qdisc del dev $ul1 clsact
+}
+
+test_traffic_ip6ip6()
+{
+ RET=0
+
+ h1mac=$(mac_get $h1)
+ ol1mac=$(mac_get $ol1)
+
+ tc qdisc add dev $ul1 clsact
+ tc filter add dev $ul1 egress proto all pref 1 handle 101 \
+ flower $TC_FLAG action pass
+
+ tc qdisc add dev $ol2 clsact
+ tc filter add dev $ol2 egress protocol ipv6 pref 1 handle 101 \
+ flower $TC_FLAG dst_ip 2001:db8:2::1 action pass
+
+ $MZ -6 $h1 -c 1000 -p 64 -a $h1mac -b $ol1mac -A 2001:db8:1::1 \
+ -B 2001:db8:2::1 -t ip -q -d 1msec
+
+ # Check ports after encap and after decap.
+ tc_check_at_least_x_packets "dev $ul1 egress" 101 1000
+ check_err $? "Packets did not go through $ul1, tc_flag = $TC_FLAG"
+
+ tc_check_at_least_x_packets "dev $ol2 egress" 101 1000
+ check_err $? "Packets did not go through $ol2, tc_flag = $TC_FLAG"
+
+ log_test "$@"
+
+ tc filter del dev $ol2 egress protocol ipv6 pref 1 handle 101 flower
+ tc qdisc del dev $ol2 clsact
+ tc filter del dev $ul1 egress proto all pref 1 handle 101 flower
+ tc qdisc del dev $ul1 clsact
+}
+
+topo_mtu_change()
+{
+ local mtu=$1
+
+ ip link set mtu $mtu dev $h1
+ ip link set mtu $mtu dev $ol1
+ ip link set mtu $mtu dev g1a
+ ip link set mtu $mtu dev $ul1
+ ip link set mtu $mtu dev $ul1.111
+ ip link set mtu $mtu dev $h2
+ ip link set mtu $mtu dev $ol2
+ ip link set mtu $mtu dev g2a
+ ip link set mtu $mtu dev $ul2
+ ip link set mtu $mtu dev $ul2.111
+}
+
+test_mtu_change()
+{
+ RET=0
+
+ ping6_do $h1 2001:db8:2::1 "-s 1800 -w 3"
+ check_fail $? "ping GRE IPv6 should not pass with packet size 1800"
+
+ RET=0
+
+ topo_mtu_change 2000
+ ping6_do $h1 2001:db8:2::1 "-s 1800 -w 3"
+ check_err $?
+ log_test "ping GRE IPv6, packet size 1800 after MTU change"
+}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index e7fc5c35b569..92087d423bcf 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -751,6 +751,14 @@ qdisc_parent_stats_get()
| jq '.[] | select(.parent == "'"$parent"'") | '"$selector"
}
+ipv6_stats_get()
+{
+ local dev=$1; shift
+ local stat=$1; shift
+
+ cat /proc/net/dev_snmp6/$dev | grep "^$stat" | cut -f2
+}
+
humanize()
{
local speed=$1; shift
diff --git a/tools/testing/selftests/net/forwarding/tc_common.sh b/tools/testing/selftests/net/forwarding/tc_common.sh
index 0e18e8be6e2a..bce8bb8d2b6f 100644
--- a/tools/testing/selftests/net/forwarding/tc_common.sh
+++ b/tools/testing/selftests/net/forwarding/tc_common.sh
@@ -16,6 +16,16 @@ tc_check_packets()
tc_rule_handle_stats_get "$id" "$handle" > /dev/null
}
+tc_check_at_least_x_packets()
+{
+ local id=$1
+ local handle=$2
+ local count=$3
+
+ busywait "$TC_HIT_TIMEOUT" until_counter_is ">= $count" \
+ tc_rule_handle_stats_get "$id" "$handle" > /dev/null
+}
+
tc_check_packets_hitting()
{
local id=$1
diff --git a/tools/testing/selftests/net/ioam6.sh b/tools/testing/selftests/net/ioam6.sh
index 3caf72bb9c6a..a2b9fad5a9a6 100755
--- a/tools/testing/selftests/net/ioam6.sh
+++ b/tools/testing/selftests/net/ioam6.sh
@@ -6,7 +6,7 @@
# This script evaluates the IOAM insertion for IPv6 by checking the IOAM data
# consistency directly inside packets on the receiver side. Tests are divided
# into three categories: OUTPUT (evaluates the IOAM processing by the sender),
-# INPUT (evaluates the IOAM processing by the receiver) and GLOBAL (evaluates
+# INPUT (evaluates the IOAM processing by a receiver) and GLOBAL (evaluates
# wider use cases that do not fall into the other two categories). Both OUTPUT
# and INPUT tests only use a two-node topology (alpha and beta), while GLOBAL
# tests use the entire three-node topology (alpha, beta, gamma). Each test is
@@ -200,7 +200,7 @@ check_kernel_compatibility()
ip -netns ioam-tmp-node link set veth0 up
ip -netns ioam-tmp-node link set veth1 up
- ip -netns ioam-tmp-node ioam namespace add 0 &>/dev/null
+ ip -netns ioam-tmp-node ioam namespace add 0
ns_ad=$?
ip -netns ioam-tmp-node ioam namespace show | grep -q "namespace 0"
@@ -214,11 +214,11 @@ check_kernel_compatibility()
exit 1
fi
- ip -netns ioam-tmp-node route add db02::/64 encap ioam6 trace prealloc \
- type 0x800000 ns 0 size 4 dev veth0 &>/dev/null
+ ip -netns ioam-tmp-node route add db02::/64 encap ioam6 mode inline \
+ trace prealloc type 0x800000 ns 0 size 4 dev veth0
tr_ad=$?
- ip -netns ioam-tmp-node -6 route | grep -q "encap ioam6 trace"
+ ip -netns ioam-tmp-node -6 route | grep -q "encap ioam6"
tr_sh=$?
if [[ $tr_ad != 0 || $tr_sh != 0 ]]
@@ -232,6 +232,30 @@ check_kernel_compatibility()
ip link del veth0 2>/dev/null || true
ip netns del ioam-tmp-node || true
+
+ lsmod | grep -q "ip6_tunnel"
+ ip6tnl_loaded=$?
+
+ if [ $ip6tnl_loaded = 0 ]
+ then
+ encap_tests=0
+ else
+ modprobe ip6_tunnel &>/dev/null
+ lsmod | grep -q "ip6_tunnel"
+ encap_tests=$?
+
+ if [ $encap_tests != 0 ]
+ then
+ ip a | grep -q "ip6tnl0"
+ encap_tests=$?
+
+ if [ $encap_tests != 0 ]
+ then
+ echo "Note: ip6_tunnel not found neither as a module nor inside the" \
+ "kernel, tests that require it (encap mode) will be omitted"
+ fi
+ fi
+ fi
}
cleanup()
@@ -242,6 +266,11 @@ cleanup()
ip netns del ioam-node-alpha || true
ip netns del ioam-node-beta || true
ip netns del ioam-node-gamma || true
+
+ if [ $ip6tnl_loaded != 0 ]
+ then
+ modprobe -r ip6_tunnel 2>/dev/null || true
+ fi
}
setup()
@@ -329,6 +358,12 @@ log_test_failed()
printf "TEST: %-60s [FAIL]\n" "${desc}"
}
+log_results()
+{
+ echo "- Tests passed: ${npassed}"
+ echo "- Tests failed: ${nfailed}"
+}
+
run_test()
{
local name=$1
@@ -349,17 +384,27 @@ run_test()
ip netns exec $node_src ping6 -t 64 -c 1 -W 1 $ip6_dst &>/dev/null
if [ $? != 0 ]
then
+ nfailed=$((nfailed+1))
log_test_failed "${desc}"
kill -2 $spid &>/dev/null
else
wait $spid
- [ $? = 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}"
+ if [ $? = 0 ]
+ then
+ npassed=$((npassed+1))
+ log_test_passed "${desc}"
+ else
+ nfailed=$((nfailed+1))
+ log_test_failed "${desc}"
+ fi
fi
}
run()
{
echo
+ printf "%0.s-" {1..74}
+ echo
echo "OUTPUT tests"
printf "%0.s-" {1..74}
echo
@@ -369,7 +414,8 @@ run()
for t in $TESTS_OUTPUT
do
- $t
+ $t "inline"
+ [ $encap_tests = 0 ] && $t "encap"
done
# clean OUTPUT settings
@@ -378,6 +424,8 @@ run()
echo
+ printf "%0.s-" {1..74}
+ echo
echo "INPUT tests"
printf "%0.s-" {1..74}
echo
@@ -387,7 +435,8 @@ run()
for t in $TESTS_INPUT
do
- $t
+ $t "inline"
+ [ $encap_tests = 0 ] && $t "encap"
done
# clean INPUT settings
@@ -396,7 +445,8 @@ run()
ip -netns ioam-node-alpha ioam namespace set 123 schema ${ALPHA[8]}
ip -netns ioam-node-alpha route change db01::/64 dev veth0
-
+ echo
+ printf "%0.s-" {1..74}
echo
echo "GLOBAL tests"
printf "%0.s-" {1..74}
@@ -404,8 +454,12 @@ run()
for t in $TESTS_GLOBAL
do
- $t
+ $t "inline"
+ [ $encap_tests = 0 ] && $t "encap"
done
+
+ echo
+ log_results
}
bit2type=(
@@ -431,11 +485,16 @@ out_undef_ns()
##############################################################################
local desc="Unknown IOAM namespace"
- ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \
- type 0x800000 ns 0 size 4 dev veth0
+ [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
+
+ ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
+ trace prealloc type 0x800000 ns 0 size 4 dev veth0
+
+ run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \
+ db01::2 db01::1 veth0 0x800000 0
- run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \
- db01::1 veth0 0x800000 0
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
}
out_no_room()
@@ -446,11 +505,16 @@ out_no_room()
##############################################################################
local desc="Missing trace room"
- ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \
- type 0xc00000 ns 123 size 4 dev veth0
+ [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
- run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \
- db01::1 veth0 0xc00000 123
+ ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
+ trace prealloc type 0xc00000 ns 123 size 4 dev veth0
+
+ run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \
+ db01::2 db01::1 veth0 0xc00000 123
+
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
}
out_bits()
@@ -465,15 +529,36 @@ out_bits()
local tmp=${bit2size[22]}
bit2size[22]=$(( $tmp + ${#ALPHA[9]} + ((4 - (${#ALPHA[9]} % 4)) % 4) ))
+ [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
+
for i in {0..22}
do
- ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \
- prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} dev veth0
-
- run_test "out_bit$i" "${desc/<n>/$i}" ioam-node-alpha ioam-node-beta \
- db01::2 db01::1 veth0 ${bit2type[$i]} 123
+ ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
+ trace prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \
+ dev veth0 &>/dev/null
+
+ local cmd_res=$?
+ local descr="${desc/<n>/$i}"
+
+ if [[ $i -ge 12 && $i -le 21 ]]
+ then
+ if [ $cmd_res != 0 ]
+ then
+ npassed=$((npassed+1))
+ log_test_passed "$descr"
+ else
+ nfailed=$((nfailed+1))
+ log_test_failed "$descr"
+ fi
+ else
+ run_test "out_bit$i" "$descr ($1 mode)" ioam-node-alpha \
+ ioam-node-beta db01::2 db01::1 veth0 ${bit2type[$i]} 123
+ fi
done
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
+
bit2size[22]=$tmp
}
@@ -485,11 +570,16 @@ out_full_supp_trace()
##############################################################################
local desc="Full supported trace"
- ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \
- type 0xfff002 ns 123 size 100 dev veth0
+ [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
+
+ ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
+ trace prealloc type 0xfff002 ns 123 size 100 dev veth0
- run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \
- db01::1 veth0 0xfff002 123
+ run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \
+ db01::2 db01::1 veth0 0xfff002 123
+
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
}
@@ -510,11 +600,16 @@ in_undef_ns()
##############################################################################
local desc="Unknown IOAM namespace"
- ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \
- type 0x800000 ns 0 size 4 dev veth0
+ [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
+
+ ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
+ trace prealloc type 0x800000 ns 0 size 4 dev veth0
+
+ run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \
+ db01::2 db01::1 veth0 0x800000 0
- run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \
- db01::1 veth0 0x800000 0
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
}
in_no_room()
@@ -525,11 +620,16 @@ in_no_room()
##############################################################################
local desc="Missing trace room"
- ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \
- type 0xc00000 ns 123 size 4 dev veth0
+ [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
+
+ ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
+ trace prealloc type 0xc00000 ns 123 size 4 dev veth0
+
+ run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \
+ db01::2 db01::1 veth0 0xc00000 123
- run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \
- db01::1 veth0 0xc00000 123
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
}
in_bits()
@@ -544,15 +644,21 @@ in_bits()
local tmp=${bit2size[22]}
bit2size[22]=$(( $tmp + ${#BETA[9]} + ((4 - (${#BETA[9]} % 4)) % 4) ))
- for i in {0..22}
+ [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
+
+ for i in {0..11} {22..22}
do
- ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \
- prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} dev veth0
+ ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
+ trace prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \
+ dev veth0
- run_test "in_bit$i" "${desc/<n>/$i}" ioam-node-alpha ioam-node-beta \
- db01::2 db01::1 veth0 ${bit2type[$i]} 123
+ run_test "in_bit$i" "${desc/<n>/$i} ($1 mode)" ioam-node-alpha \
+ ioam-node-beta db01::2 db01::1 veth0 ${bit2type[$i]} 123
done
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
+
bit2size[22]=$tmp
}
@@ -569,11 +675,16 @@ in_oflag()
# back the IOAM namespace that was previously configured on the sender.
ip -netns ioam-node-alpha ioam namespace add 123
- ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \
- type 0xc00000 ns 123 size 4 dev veth0
+ [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
+
+ ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
+ trace prealloc type 0xc00000 ns 123 size 4 dev veth0
+
+ run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \
+ db01::2 db01::1 veth0 0xc00000 123
- run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \
- db01::1 veth0 0xc00000 123
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
# And we clean the exception for this test to get things back to normal for
# other INPUT tests
@@ -588,11 +699,16 @@ in_full_supp_trace()
##############################################################################
local desc="Full supported trace"
- ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \
- type 0xfff002 ns 123 size 80 dev veth0
+ [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up
- run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \
- db01::1 veth0 0xfff002 123
+ ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \
+ trace prealloc type 0xfff002 ns 123 size 80 dev veth0
+
+ run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \
+ db01::2 db01::1 veth0 0xfff002 123
+
+ [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down
}
@@ -611,11 +727,16 @@ fwd_full_supp_trace()
##############################################################################
local desc="Forward - Full supported trace"
- ip -netns ioam-node-alpha route change db02::/64 encap ioam6 trace prealloc \
- type 0xfff002 ns 123 size 244 via db01::1 dev veth0
+ [ "$1" = "encap" ] && mode="$1 tundst db02::2" || mode="$1"
+ [ "$1" = "encap" ] && ip -netns ioam-node-gamma link set ip6tnl0 up
+
+ ip -netns ioam-node-alpha route change db02::/64 encap ioam6 mode $mode \
+ trace prealloc type 0xfff002 ns 123 size 244 via db01::1 dev veth0
- run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-gamma db01::2 \
- db02::2 veth0 0xfff002 123
+ run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-gamma \
+ db01::2 db02::2 veth0 0xfff002 123
+
+ [ "$1" = "encap" ] && ip -netns ioam-node-gamma link set ip6tnl0 down
}
@@ -625,6 +746,9 @@ fwd_full_supp_trace()
# #
################################################################################
+npassed=0
+nfailed=0
+
if [ "$(id -u)" -ne 0 ]
then
echo "SKIP: Need root privileges"
diff --git a/tools/testing/selftests/net/ioam6_parser.c b/tools/testing/selftests/net/ioam6_parser.c
index d376cb2c383c..8f6997d35816 100644
--- a/tools/testing/selftests/net/ioam6_parser.c
+++ b/tools/testing/selftests/net/ioam6_parser.c
@@ -94,16 +94,6 @@ enum {
TEST_OUT_BIT9,
TEST_OUT_BIT10,
TEST_OUT_BIT11,
- TEST_OUT_BIT12,
- TEST_OUT_BIT13,
- TEST_OUT_BIT14,
- TEST_OUT_BIT15,
- TEST_OUT_BIT16,
- TEST_OUT_BIT17,
- TEST_OUT_BIT18,
- TEST_OUT_BIT19,
- TEST_OUT_BIT20,
- TEST_OUT_BIT21,
TEST_OUT_BIT22,
TEST_OUT_FULL_SUPP_TRACE,
@@ -125,16 +115,6 @@ enum {
TEST_IN_BIT9,
TEST_IN_BIT10,
TEST_IN_BIT11,
- TEST_IN_BIT12,
- TEST_IN_BIT13,
- TEST_IN_BIT14,
- TEST_IN_BIT15,
- TEST_IN_BIT16,
- TEST_IN_BIT17,
- TEST_IN_BIT18,
- TEST_IN_BIT19,
- TEST_IN_BIT20,
- TEST_IN_BIT21,
TEST_IN_BIT22,
TEST_IN_FULL_SUPP_TRACE,
@@ -199,30 +179,6 @@ static int check_ioam_header(int tid, struct ioam6_trace_hdr *ioam6h,
ioam6h->nodelen != 2 ||
ioam6h->remlen;
- case TEST_OUT_BIT12:
- case TEST_IN_BIT12:
- case TEST_OUT_BIT13:
- case TEST_IN_BIT13:
- case TEST_OUT_BIT14:
- case TEST_IN_BIT14:
- case TEST_OUT_BIT15:
- case TEST_IN_BIT15:
- case TEST_OUT_BIT16:
- case TEST_IN_BIT16:
- case TEST_OUT_BIT17:
- case TEST_IN_BIT17:
- case TEST_OUT_BIT18:
- case TEST_IN_BIT18:
- case TEST_OUT_BIT19:
- case TEST_IN_BIT19:
- case TEST_OUT_BIT20:
- case TEST_IN_BIT20:
- case TEST_OUT_BIT21:
- case TEST_IN_BIT21:
- return ioam6h->overflow ||
- ioam6h->nodelen ||
- ioam6h->remlen != 1;
-
case TEST_OUT_BIT22:
case TEST_IN_BIT22:
return ioam6h->overflow ||
@@ -326,6 +282,66 @@ static int check_ioam6_data(__u8 **p, struct ioam6_trace_hdr *ioam6h,
*p += sizeof(__u32);
}
+ if (ioam6h->type.bit12) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit13) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit14) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit15) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit16) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit17) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit18) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit19) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit20) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
+ if (ioam6h->type.bit21) {
+ if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+ return 1;
+ *p += sizeof(__u32);
+ }
+
if (ioam6h->type.bit22) {
len = cnf.sc_data ? strlen(cnf.sc_data) : 0;
aligned = cnf.sc_data ? __ALIGN_KERNEL(len, 4) : 0;
@@ -455,26 +471,6 @@ static int str2id(const char *tname)
return TEST_OUT_BIT10;
if (!strcmp("out_bit11", tname))
return TEST_OUT_BIT11;
- if (!strcmp("out_bit12", tname))
- return TEST_OUT_BIT12;
- if (!strcmp("out_bit13", tname))
- return TEST_OUT_BIT13;
- if (!strcmp("out_bit14", tname))
- return TEST_OUT_BIT14;
- if (!strcmp("out_bit15", tname))
- return TEST_OUT_BIT15;
- if (!strcmp("out_bit16", tname))
- return TEST_OUT_BIT16;
- if (!strcmp("out_bit17", tname))
- return TEST_OUT_BIT17;
- if (!strcmp("out_bit18", tname))
- return TEST_OUT_BIT18;
- if (!strcmp("out_bit19", tname))
- return TEST_OUT_BIT19;
- if (!strcmp("out_bit20", tname))
- return TEST_OUT_BIT20;
- if (!strcmp("out_bit21", tname))
- return TEST_OUT_BIT21;
if (!strcmp("out_bit22", tname))
return TEST_OUT_BIT22;
if (!strcmp("out_full_supp_trace", tname))
@@ -509,26 +505,6 @@ static int str2id(const char *tname)
return TEST_IN_BIT10;
if (!strcmp("in_bit11", tname))
return TEST_IN_BIT11;
- if (!strcmp("in_bit12", tname))
- return TEST_IN_BIT12;
- if (!strcmp("in_bit13", tname))
- return TEST_IN_BIT13;
- if (!strcmp("in_bit14", tname))
- return TEST_IN_BIT14;
- if (!strcmp("in_bit15", tname))
- return TEST_IN_BIT15;
- if (!strcmp("in_bit16", tname))
- return TEST_IN_BIT16;
- if (!strcmp("in_bit17", tname))
- return TEST_IN_BIT17;
- if (!strcmp("in_bit18", tname))
- return TEST_IN_BIT18;
- if (!strcmp("in_bit19", tname))
- return TEST_IN_BIT19;
- if (!strcmp("in_bit20", tname))
- return TEST_IN_BIT20;
- if (!strcmp("in_bit21", tname))
- return TEST_IN_BIT21;
if (!strcmp("in_bit22", tname))
return TEST_IN_BIT22;
if (!strcmp("in_full_supp_trace", tname))
@@ -606,16 +582,6 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
[TEST_OUT_BIT9] = check_ioam_header_and_data,
[TEST_OUT_BIT10] = check_ioam_header_and_data,
[TEST_OUT_BIT11] = check_ioam_header_and_data,
- [TEST_OUT_BIT12] = check_ioam_header,
- [TEST_OUT_BIT13] = check_ioam_header,
- [TEST_OUT_BIT14] = check_ioam_header,
- [TEST_OUT_BIT15] = check_ioam_header,
- [TEST_OUT_BIT16] = check_ioam_header,
- [TEST_OUT_BIT17] = check_ioam_header,
- [TEST_OUT_BIT18] = check_ioam_header,
- [TEST_OUT_BIT19] = check_ioam_header,
- [TEST_OUT_BIT20] = check_ioam_header,
- [TEST_OUT_BIT21] = check_ioam_header,
[TEST_OUT_BIT22] = check_ioam_header_and_data,
[TEST_OUT_FULL_SUPP_TRACE] = check_ioam_header_and_data,
[TEST_IN_UNDEF_NS] = check_ioam_header,
@@ -633,16 +599,6 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
[TEST_IN_BIT9] = check_ioam_header_and_data,
[TEST_IN_BIT10] = check_ioam_header_and_data,
[TEST_IN_BIT11] = check_ioam_header_and_data,
- [TEST_IN_BIT12] = check_ioam_header,
- [TEST_IN_BIT13] = check_ioam_header,
- [TEST_IN_BIT14] = check_ioam_header,
- [TEST_IN_BIT15] = check_ioam_header,
- [TEST_IN_BIT16] = check_ioam_header,
- [TEST_IN_BIT17] = check_ioam_header,
- [TEST_IN_BIT18] = check_ioam_header,
- [TEST_IN_BIT19] = check_ioam_header,
- [TEST_IN_BIT20] = check_ioam_header,
- [TEST_IN_BIT21] = check_ioam_header,
[TEST_IN_BIT22] = check_ioam_header_and_data,
[TEST_IN_FULL_SUPP_TRACE] = check_ioam_header_and_data,
[TEST_FWD_FULL_SUPP_TRACE] = check_ioam_header_and_data,
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 255793c5ac4f..293d349e21fe 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -945,12 +945,15 @@ subflows_tests()
# subflow limited by client
reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 0 0
+ ip netns exec $ns2 ./pm_nl_ctl limits 0 0
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "single subflow, limited by client" 0 0 0
# subflow limited by server
reset
+ ip netns exec $ns1 ./pm_nl_ctl limits 0 0
ip netns exec $ns2 ./pm_nl_ctl limits 0 1
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
@@ -973,7 +976,7 @@ subflows_tests()
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "multiple subflows" 2 2 2
- # multiple subflows limited by serverf
+ # multiple subflows limited by server
reset
ip netns exec $ns1 ./pm_nl_ctl limits 0 1
ip netns exec $ns2 ./pm_nl_ctl limits 0 2
diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
index 3c741abe034e..cbacf9f6538b 100755
--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
+++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
@@ -70,7 +70,7 @@ check()
check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "defaults addr list"
check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
-subflows 0" "defaults limits"
+subflows 2" "defaults limits"
ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.1
ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.2 flags subflow dev lo
@@ -118,11 +118,11 @@ check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "flush addrs"
ip netns exec $ns1 ./pm_nl_ctl limits 9 1
check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
-subflows 0" "rcv addrs above hard limit"
+subflows 2" "rcv addrs above hard limit"
ip netns exec $ns1 ./pm_nl_ctl limits 1 9
check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
-subflows 0" "subflows above hard limit"
+subflows 2" "subflows above hard limit"
ip netns exec $ns1 ./pm_nl_ctl limits 8 8
check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 8
diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c
index bd6288302094..b599003eb5ba 100644
--- a/tools/testing/selftests/net/nettest.c
+++ b/tools/testing/selftests/net/nettest.c
@@ -28,6 +28,7 @@
#include <unistd.h>
#include <time.h>
#include <errno.h>
+#include <getopt.h>
#include <linux/xfrm.h>
#include <linux/ipsec.h>
@@ -101,6 +102,8 @@ struct sock_args {
struct sockaddr_in6 v6;
} md5_prefix;
unsigned int prefix_len;
+ /* 0: default, -1: force off, +1: force on */
+ int bind_key_ifindex;
/* expected addresses and device index for connection */
const char *expected_dev;
@@ -271,11 +274,14 @@ static int tcp_md5sig(int sd, void *addr, socklen_t alen, struct sock_args *args
}
memcpy(&md5sig.tcpm_addr, addr, alen);
- if (args->ifindex) {
+ if ((args->ifindex && args->bind_key_ifindex >= 0) || args->bind_key_ifindex >= 1) {
opt = TCP_MD5SIG_EXT;
md5sig.tcpm_flags |= TCP_MD5SIG_FLAG_IFINDEX;
md5sig.tcpm_ifindex = args->ifindex;
+ log_msg("TCP_MD5SIG_FLAG_IFINDEX set tcpm_ifindex=%d\n", md5sig.tcpm_ifindex);
+ } else {
+ log_msg("TCP_MD5SIG_FLAG_IFINDEX off\n", md5sig.tcpm_ifindex);
}
rc = setsockopt(sd, IPPROTO_TCP, opt, &md5sig, sizeof(md5sig));
@@ -1822,6 +1828,14 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
}
#define GETOPT_STR "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq"
+#define OPT_FORCE_BIND_KEY_IFINDEX 1001
+#define OPT_NO_BIND_KEY_IFINDEX 1002
+
+static struct option long_opts[] = {
+ {"force-bind-key-ifindex", 0, 0, OPT_FORCE_BIND_KEY_IFINDEX},
+ {"no-bind-key-ifindex", 0, 0, OPT_NO_BIND_KEY_IFINDEX},
+ {0, 0, 0, 0}
+};
static void print_usage(char *prog)
{
@@ -1858,6 +1872,10 @@ static void print_usage(char *prog)
" -M password use MD5 sum protection\n"
" -X password MD5 password for client mode\n"
" -m prefix/len prefix and length to use for MD5 key\n"
+ " --no-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX off\n"
+ " --force-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX on\n"
+ " (default: only if -I is passed)\n"
+ "\n"
" -g grp multicast group (e.g., 239.1.1.1)\n"
" -i interactive mode (default is echo and terminate)\n"
"\n"
@@ -1893,7 +1911,7 @@ int main(int argc, char *argv[])
* process input args
*/
- while ((rc = getopt(argc, argv, GETOPT_STR)) != -1) {
+ while ((rc = getopt_long(argc, argv, GETOPT_STR, long_opts, NULL)) != -1) {
switch (rc) {
case 'B':
both_mode = 1;
@@ -1966,6 +1984,12 @@ int main(int argc, char *argv[])
case 'M':
args.password = optarg;
break;
+ case OPT_FORCE_BIND_KEY_IFINDEX:
+ args.bind_key_ifindex = 1;
+ break;
+ case OPT_NO_BIND_KEY_IFINDEX:
+ args.bind_key_ifindex = -1;
+ break;
case 'X':
args.client_pw = optarg;
break;
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 97fceb9be9ed..d3047e251fe9 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -29,6 +29,8 @@ struct tls_crypto_info_keys {
union {
struct tls12_crypto_info_aes_gcm_128 aes128;
struct tls12_crypto_info_chacha20_poly1305 chacha20;
+ struct tls12_crypto_info_sm4_gcm sm4gcm;
+ struct tls12_crypto_info_sm4_ccm sm4ccm;
};
size_t len;
};
@@ -49,6 +51,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
tls12->aes128.info.version = tls_version;
tls12->aes128.info.cipher_type = cipher_type;
break;
+ case TLS_CIPHER_SM4_GCM:
+ tls12->len = sizeof(struct tls12_crypto_info_sm4_gcm);
+ tls12->sm4gcm.info.version = tls_version;
+ tls12->sm4gcm.info.cipher_type = cipher_type;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ tls12->len = sizeof(struct tls12_crypto_info_sm4_ccm);
+ tls12->sm4ccm.info.version = tls_version;
+ tls12->sm4ccm.info.cipher_type = cipher_type;
+ break;
default:
break;
}
@@ -148,13 +160,13 @@ FIXTURE_VARIANT(tls)
uint16_t cipher_type;
};
-FIXTURE_VARIANT_ADD(tls, 12_gcm)
+FIXTURE_VARIANT_ADD(tls, 12_aes_gcm)
{
.tls_version = TLS_1_2_VERSION,
.cipher_type = TLS_CIPHER_AES_GCM_128,
};
-FIXTURE_VARIANT_ADD(tls, 13_gcm)
+FIXTURE_VARIANT_ADD(tls, 13_aes_gcm)
{
.tls_version = TLS_1_3_VERSION,
.cipher_type = TLS_CIPHER_AES_GCM_128,
@@ -172,6 +184,18 @@ FIXTURE_VARIANT_ADD(tls, 13_chacha)
.cipher_type = TLS_CIPHER_CHACHA20_POLY1305,
};
+FIXTURE_VARIANT_ADD(tls, 13_sm4_gcm)
+{
+ .tls_version = TLS_1_3_VERSION,
+ .cipher_type = TLS_CIPHER_SM4_GCM,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm)
+{
+ .tls_version = TLS_1_3_VERSION,
+ .cipher_type = TLS_CIPHER_SM4_CCM,
+};
+
FIXTURE_SETUP(tls)
{
struct tls_crypto_info_keys tls12;
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
index 427d94816f2d..d4ffebb989f8 100755
--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -199,7 +199,6 @@ fi
# test basic connectivity
if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
echo "ERROR: ns1 cannot reach ns2" 1>&2
- bash
exit 1
fi
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
index d7e07f4c3d7f..da1c1e4b6c86 100755
--- a/tools/testing/selftests/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -741,6 +741,149 @@ EOF
return $lret
}
+# test port shadowing.
+# create two listening services, one on router (ns0), one
+# on client (ns2), which is masqueraded from ns1 point of view.
+# ns2 sends udp packet coming from service port to ns1, on a highport.
+# Later, if n1 uses same highport to connect to ns0:service, packet
+# might be port-forwarded to ns2 instead.
+
+# second argument tells if we expect the 'fake-entry' to take effect
+# (CLIENT) or not (ROUTER).
+test_port_shadow()
+{
+ local test=$1
+ local expect=$2
+ local daddrc="10.0.1.99"
+ local daddrs="10.0.1.1"
+ local result=""
+ local logmsg=""
+
+ echo ROUTER | ip netns exec "$ns0" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
+ nc_r=$!
+
+ echo CLIENT | ip netns exec "$ns2" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
+ nc_c=$!
+
+ # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
+ echo "fake-entry" | ip netns exec "$ns2" nc -w 1 -p 1405 -u "$daddrc" 41404 > /dev/null
+
+ # ns1 tries to connect to ns0:1405. With default settings this should connect
+ # to client, it matches the conntrack entry created above.
+
+ result=$(echo "" | ip netns exec "$ns1" nc -w 1 -p 41404 -u "$daddrs" 1405)
+
+ if [ "$result" = "$expect" ] ;then
+ echo "PASS: portshadow test $test: got reply from ${expect}${logmsg}"
+ else
+ echo "ERROR: portshadow test $test: got reply from \"$result\", not $expect as intended"
+ ret=1
+ fi
+
+ kill $nc_r $nc_c 2>/dev/null
+
+ # flush udp entries for next test round, if any
+ ip netns exec "$ns0" conntrack -F >/dev/null 2>&1
+}
+
+# This prevents port shadow of router service via packet filter,
+# packets claiming to originate from service port from internal
+# network are dropped.
+test_port_shadow_filter()
+{
+ local family=$1
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family filter {
+ chain forward {
+ type filter hook forward priority 0; policy accept;
+ meta iif veth1 udp sport 1405 drop
+ }
+}
+EOF
+ test_port_shadow "port-filter" "ROUTER"
+
+ ip netns exec "$ns0" nft delete table $family filter
+}
+
+# This prevents port shadow of router service via notrack.
+test_port_shadow_notrack()
+{
+ local family=$1
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family raw {
+ chain prerouting {
+ type filter hook prerouting priority -300; policy accept;
+ meta iif veth0 udp dport 1405 notrack
+ udp dport 1405 notrack
+ }
+ chain output {
+ type filter hook output priority -300; policy accept;
+ udp sport 1405 notrack
+ }
+}
+EOF
+ test_port_shadow "port-notrack" "ROUTER"
+
+ ip netns exec "$ns0" nft delete table $family raw
+}
+
+# This prevents port shadow of router service via sport remap.
+test_port_shadow_pat()
+{
+ local family=$1
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family pat {
+ chain postrouting {
+ type nat hook postrouting priority -1; policy accept;
+ meta iif veth1 udp sport <= 1405 masquerade to : 1406-65535 random
+ }
+}
+EOF
+ test_port_shadow "pat" "ROUTER"
+
+ ip netns exec "$ns0" nft delete table $family pat
+}
+
+test_port_shadowing()
+{
+ local family="ip"
+
+ ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+ ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ meta oif veth0 masquerade
+ }
+}
+EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add add $family masquerade hook"
+ return $ksft_skip
+ fi
+
+ # test default behaviour. Packet from ns1 to ns0 is redirected to ns2.
+ test_port_shadow "default" "CLIENT"
+
+ # test packet filter based mitigation: prevent forwarding of
+ # packets claiming to come from the service port.
+ test_port_shadow_filter "$family"
+
+ # test conntrack based mitigation: connections going or coming
+ # from router:service bypass connection tracking.
+ test_port_shadow_notrack "$family"
+
+ # test nat based mitigation: fowarded packets coming from service port
+ # are masqueraded with random highport.
+ test_port_shadow_pat "$family"
+
+ ip netns exec "$ns0" nft delete table $family nat
+}
# ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99
for i in 0 1 2; do
@@ -861,6 +1004,8 @@ reset_counters
$test_inet_nat && test_redirect inet
$test_inet_nat && test_redirect6 inet
+test_port_shadowing
+
if [ $ret -ne 0 ];then
echo -n "FAIL: "
nft --version
diff --git a/tools/testing/selftests/netfilter/nft_nat_zones.sh b/tools/testing/selftests/netfilter/nft_nat_zones.sh
new file mode 100755
index 000000000000..b9ab37380f33
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_nat_zones.sh
@@ -0,0 +1,309 @@
+#!/bin/bash
+#
+# Test connection tracking zone and NAT source port reallocation support.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+# Don't increase too much, 2000 clients should work
+# just fine but script can then take several minutes with
+# KASAN/debug builds.
+maxclients=100
+
+have_iperf=1
+ret=0
+
+# client1---.
+# veth1-.
+# |
+# NAT Gateway --veth0--> Server
+# | |
+# veth2-' |
+# client2---' |
+# .... |
+# clientX----vethX---'
+
+# All clients share identical IP address.
+# NAT Gateway uses policy routing and conntrack zones to isolate client
+# namespaces. Each client connects to Server, each with colliding tuples:
+# clientsaddr:10000 -> serveraddr:dport
+# NAT Gateway is supposed to do port reallocation for each of the
+# connections.
+
+sfx=$(mktemp -u "XXXXXXXX")
+gw="ns-gw-$sfx"
+cl1="ns-cl1-$sfx"
+cl2="ns-cl2-$sfx"
+srv="ns-srv-$sfx"
+
+v4gc1=$(sysctl -n net.ipv4.neigh.default.gc_thresh1 2>/dev/null)
+v4gc2=$(sysctl -n net.ipv4.neigh.default.gc_thresh2 2>/dev/null)
+v4gc3=$(sysctl -n net.ipv4.neigh.default.gc_thresh3 2>/dev/null)
+v6gc1=$(sysctl -n net.ipv6.neigh.default.gc_thresh1 2>/dev/null)
+v6gc2=$(sysctl -n net.ipv6.neigh.default.gc_thresh2 2>/dev/null)
+v6gc3=$(sysctl -n net.ipv6.neigh.default.gc_thresh3 2>/dev/null)
+
+cleanup()
+{
+ ip netns del $gw
+ ip netns del $srv
+ for i in $(seq 1 $maxclients); do
+ ip netns del ns-cl$i-$sfx 2>/dev/null
+ done
+
+ sysctl -q net.ipv4.neigh.default.gc_thresh1=$v4gc1 2>/dev/null
+ sysctl -q net.ipv4.neigh.default.gc_thresh2=$v4gc2 2>/dev/null
+ sysctl -q net.ipv4.neigh.default.gc_thresh3=$v4gc3 2>/dev/null
+ sysctl -q net.ipv6.neigh.default.gc_thresh1=$v6gc1 2>/dev/null
+ sysctl -q net.ipv6.neigh.default.gc_thresh2=$v6gc2 2>/dev/null
+ sysctl -q net.ipv6.neigh.default.gc_thresh3=$v6gc3 2>/dev/null
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+conntrack -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without conntrack tool"
+ exit $ksft_skip
+fi
+
+iperf3 -v >/dev/null 2>&1
+if [ $? -ne 0 ];then
+ have_iperf=0
+fi
+
+ip netns add "$gw"
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace $gw"
+ exit $ksft_skip
+fi
+ip -net "$gw" link set lo up
+
+trap cleanup EXIT
+
+ip netns add "$srv"
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create server netns $srv"
+ exit $ksft_skip
+fi
+
+ip link add veth0 netns "$gw" type veth peer name eth0 netns "$srv"
+ip -net "$gw" link set veth0 up
+ip -net "$srv" link set lo up
+ip -net "$srv" link set eth0 up
+
+sysctl -q net.ipv6.neigh.default.gc_thresh1=512 2>/dev/null
+sysctl -q net.ipv6.neigh.default.gc_thresh2=1024 2>/dev/null
+sysctl -q net.ipv6.neigh.default.gc_thresh3=4096 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh1=512 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh2=1024 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh3=4096 2>/dev/null
+
+for i in $(seq 1 $maxclients);do
+ cl="ns-cl$i-$sfx"
+
+ ip netns add "$cl"
+ if [ $? -ne 0 ];then
+ echo "SKIP: Could not create client netns $cl"
+ exit $ksft_skip
+ fi
+ ip link add veth$i netns "$gw" type veth peer name eth0 netns "$cl" > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+ fi
+done
+
+for i in $(seq 1 $maxclients);do
+ cl="ns-cl$i-$sfx"
+ echo netns exec "$cl" ip link set lo up
+ echo netns exec "$cl" ip link set eth0 up
+ echo netns exec "$cl" sysctl -q net.ipv4.tcp_syn_retries=2
+ echo netns exec "$gw" ip link set veth$i up
+ echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.arp_ignore=2
+ echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.rp_filter=0
+
+ # clients have same IP addresses.
+ echo netns exec "$cl" ip addr add 10.1.0.3/24 dev eth0
+ echo netns exec "$cl" ip addr add dead:1::3/64 dev eth0
+ echo netns exec "$cl" ip route add default via 10.1.0.2 dev eth0
+ echo netns exec "$cl" ip route add default via dead:1::2 dev eth0
+
+ # NB: same addresses on client-facing interfaces.
+ echo netns exec "$gw" ip addr add 10.1.0.2/24 dev veth$i
+ echo netns exec "$gw" ip addr add dead:1::2/64 dev veth$i
+
+ # gw: policy routing
+ echo netns exec "$gw" ip route add 10.1.0.0/24 dev veth$i table $((1000+i))
+ echo netns exec "$gw" ip route add dead:1::0/64 dev veth$i table $((1000+i))
+ echo netns exec "$gw" ip route add 10.3.0.0/24 dev veth0 table $((1000+i))
+ echo netns exec "$gw" ip route add dead:3::0/64 dev veth0 table $((1000+i))
+ echo netns exec "$gw" ip rule add fwmark $i lookup $((1000+i))
+done | ip -batch /dev/stdin
+
+ip -net "$gw" addr add 10.3.0.1/24 dev veth0
+ip -net "$gw" addr add dead:3::1/64 dev veth0
+
+ip -net "$srv" addr add 10.3.0.99/24 dev eth0
+ip -net "$srv" addr add dead:3::99/64 dev eth0
+
+ip netns exec $gw nft -f /dev/stdin<<EOF
+table inet raw {
+ map iiftomark {
+ type ifname : mark
+ }
+
+ map iiftozone {
+ typeof iifname : ct zone
+ }
+
+ set inicmp {
+ flags dynamic
+ type ipv4_addr . ifname . ipv4_addr
+ }
+ set inflows {
+ flags dynamic
+ type ipv4_addr . inet_service . ifname . ipv4_addr . inet_service
+ }
+
+ set inflows6 {
+ flags dynamic
+ type ipv6_addr . inet_service . ifname . ipv6_addr . inet_service
+ }
+
+ chain prerouting {
+ type filter hook prerouting priority -64000; policy accept;
+ ct original zone set meta iifname map @iiftozone
+ meta mark set meta iifname map @iiftomark
+
+ tcp flags & (syn|ack) == ack add @inflows { ip saddr . tcp sport . meta iifname . ip daddr . tcp dport counter }
+ add @inflows6 { ip6 saddr . tcp sport . meta iifname . ip6 daddr . tcp dport counter }
+ ip protocol icmp add @inicmp { ip saddr . meta iifname . ip daddr counter }
+ }
+
+ chain nat_postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ ct mark set meta mark meta oifname veth0 masquerade
+ }
+
+ chain mangle_prerouting {
+ type filter hook prerouting priority -100; policy accept;
+ ct direction reply meta mark set ct mark
+ }
+}
+EOF
+
+( echo add element inet raw iiftomark \{
+ for i in $(seq 1 $((maxclients-1))); do
+ echo \"veth$i\" : $i,
+ done
+ echo \"veth$maxclients\" : $maxclients \}
+ echo add element inet raw iiftozone \{
+ for i in $(seq 1 $((maxclients-1))); do
+ echo \"veth$i\" : $i,
+ done
+ echo \"veth$maxclients\" : $maxclients \}
+) | ip netns exec $gw nft -f /dev/stdin
+
+ip netns exec "$gw" sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv4.conf.all.rp_filter=0 >/dev/null
+
+# useful for debugging: allows to use 'ping' from clients to gateway.
+ip netns exec "$gw" sysctl -q net.ipv4.fwmark_reflect=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv6.fwmark_reflect=1 > /dev/null
+
+for i in $(seq 1 $maxclients); do
+ cl="ns-cl$i-$sfx"
+ ip netns exec $cl ping -i 0.5 -q -c 3 10.3.0.99 > /dev/null 2>&1 &
+ if [ $? -ne 0 ]; then
+ echo FAIL: Ping failure from $cl 1>&2
+ ret=1
+ break
+ fi
+done
+
+wait
+
+for i in $(seq 1 $maxclients); do
+ ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" | grep -q "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 counter packets 3 bytes 252 }"
+ if [ $? -ne 0 ];then
+ ret=1
+ echo "FAIL: counter icmp mismatch for veth$i" 1>&2
+ ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" 1>&2
+ break
+ fi
+done
+
+ip netns exec $gw nft get element inet raw inicmp "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 }" | grep -q "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }"
+if [ $? -ne 0 ];then
+ ret=1
+ echo "FAIL: counter icmp mismatch for veth0: { 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }"
+ ip netns exec $gw nft get element inet raw inicmp "{ 10.3.99 . \"veth0\" . 10.3.0.1 }" 1>&2
+fi
+
+if [ $ret -eq 0 ]; then
+ echo "PASS: ping test from all $maxclients namespaces"
+fi
+
+if [ $have_iperf -eq 0 ];then
+ echo "SKIP: iperf3 not installed"
+ if [ $ret -ne 0 ];then
+ exit $ret
+ fi
+ exit $ksft_skip
+fi
+
+ip netns exec $srv iperf3 -s > /dev/null 2>&1 &
+iperfpid=$!
+sleep 1
+
+for i in $(seq 1 $maxclients); do
+ if [ $ret -ne 0 ]; then
+ break
+ fi
+ cl="ns-cl$i-$sfx"
+ ip netns exec $cl iperf3 -c 10.3.0.99 --cport 10000 -n 1 > /dev/null
+ if [ $? -ne 0 ]; then
+ echo FAIL: Failure to connect for $cl 1>&2
+ ip netns exec $gw conntrack -S 1>&2
+ ret=1
+ fi
+done
+if [ $ret -eq 0 ];then
+ echo "PASS: iperf3 connections for all $maxclients net namespaces"
+fi
+
+kill $iperfpid
+wait
+
+for i in $(seq 1 $maxclients); do
+ ip netns exec $gw nft get element inet raw inflows "{ 10.1.0.3 . 10000 . \"veth$i\" . 10.3.0.99 . 5201 }" > /dev/null
+ if [ $? -ne 0 ];then
+ ret=1
+ echo "FAIL: can't find expected tcp entry for veth$i" 1>&2
+ break
+ fi
+done
+if [ $ret -eq 0 ];then
+ echo "PASS: Found client connection for all $maxclients net namespaces"
+fi
+
+ip netns exec $gw nft get element inet raw inflows "{ 10.3.0.99 . 5201 . \"veth0\" . 10.3.0.1 . 10000 }" > /dev/null
+if [ $? -ne 0 ];then
+ ret=1
+ echo "FAIL: cannot find return entry on veth0" 1>&2
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh
new file mode 100755
index 000000000000..ac646376eb01
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_zones_many.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+# Test insertion speed for packets with identical addresses/ports
+# that are all placed in distinct conntrack zones.
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns="ns-$sfx"
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+zones=20000
+have_ct_tool=0
+ret=0
+
+cleanup()
+{
+ ip netns del $ns
+}
+
+ip netns add $ns
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace $gw"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+conntrack -V > /dev/null 2>&1
+if [ $? -eq 0 ];then
+ have_ct_tool=1
+fi
+
+ip -net "$ns" link set lo up
+
+test_zones() {
+ local max_zones=$1
+
+ip netns exec $ns sysctl -q net.netfilter.nf_conntrack_udp_timeout=3600
+ip netns exec $ns nft -f /dev/stdin<<EOF
+flush ruleset
+table inet raw {
+ map rndzone {
+ typeof numgen inc mod $max_zones : ct zone
+ }
+
+ chain output {
+ type filter hook output priority -64000; policy accept;
+ udp dport 12345 ct zone set numgen inc mod 65536 map @rndzone
+ }
+}
+EOF
+ (
+ echo "add element inet raw rndzone {"
+ for i in $(seq 1 $max_zones);do
+ echo -n "$i : $i"
+ if [ $i -lt $max_zones ]; then
+ echo ","
+ else
+ echo "}"
+ fi
+ done
+ ) | ip netns exec $ns nft -f /dev/stdin
+
+ local i=0
+ local j=0
+ local outerstart=$(date +%s%3N)
+ local stop=$outerstart
+
+ while [ $i -lt $max_zones ]; do
+ local start=$(date +%s%3N)
+ i=$((i + 10000))
+ j=$((j + 1))
+ dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null
+ if [ $? -ne 0 ] ;then
+ ret=1
+ break
+ fi
+
+ stop=$(date +%s%3N)
+ local duration=$((stop-start))
+ echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)"
+ done
+
+ if [ $have_ct_tool -eq 1 ]; then
+ local count=$(ip netns exec "$ns" conntrack -C)
+ local duration=$((stop-outerstart))
+
+ if [ $count -eq $max_zones ]; then
+ echo "PASS: inserted $count entries from packet path in $duration ms total"
+ else
+ ip netns exec $ns conntrack -S 1>&2
+ echo "FAIL: inserted $count entries from packet path in $duration ms total, expected $max_zones entries"
+ ret=1
+ fi
+ fi
+
+ if [ $ret -ne 0 ];then
+ echo "FAIL: insert $max_zones entries from packet path" 1>&2
+ fi
+}
+
+test_conntrack_tool() {
+ local max_zones=$1
+
+ ip netns exec $ns conntrack -F >/dev/null 2>/dev/null
+
+ local outerstart=$(date +%s%3N)
+ local start=$(date +%s%3N)
+ local stop=$start
+ local i=0
+ while [ $i -lt $max_zones ]; do
+ i=$((i + 1))
+ ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \
+ --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i >/dev/null 2>&1
+ if [ $? -ne 0 ];then
+ ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \
+ --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i > /dev/null
+ echo "FAIL: conntrack -I returned an error"
+ ret=1
+ break
+ fi
+
+ if [ $((i%10000)) -eq 0 ];then
+ stop=$(date +%s%3N)
+
+ local duration=$((stop-start))
+ echo "PASS: added 10000 entries in $duration ms (now $i total)"
+ start=$stop
+ fi
+ done
+
+ local count=$(ip netns exec "$ns" conntrack -C)
+ local duration=$((stop-outerstart))
+
+ if [ $count -eq $max_zones ]; then
+ echo "PASS: inserted $count entries via ctnetlink in $duration ms"
+ else
+ ip netns exec $ns conntrack -S 1>&2
+ echo "FAIL: inserted $count entries via ctnetlink in $duration ms, expected $max_zones entries ($duration ms)"
+ ret=1
+ fi
+}
+
+test_zones $zones
+
+if [ $have_ct_tool -eq 1 ];then
+ test_conntrack_tool $zones
+else
+ echo "SKIP: Could not run ctnetlink insertion test without conntrack tool"
+ if [ $ret -eq 0 ];then
+ exit $ksft_skip
+ fi
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
index bd1ca25febe4..aed632d29fff 100644
--- a/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <ppc-asm.h>
+#include <basic_asm.h>
#include <asm/unistd.h>
.text
@@ -26,3 +26,38 @@ FUNC_START(getppid_tm_suspended)
1:
li r3, -1
blr
+
+
+.macro scv level
+ .long (0x44000001 | (\level) << 5)
+.endm
+
+FUNC_START(getppid_scv_tm_active)
+ PUSH_BASIC_STACK(0)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ scv 0
+ tend.
+ POP_BASIC_STACK(0)
+ blr
+1:
+ li r3, -1
+ POP_BASIC_STACK(0)
+ blr
+
+FUNC_START(getppid_scv_tm_suspended)
+ PUSH_BASIC_STACK(0)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ tsuspend.
+ scv 0
+ tresume.
+ tend.
+ POP_BASIC_STACK(0)
+ blr
+1:
+ li r3, -1
+ POP_BASIC_STACK(0)
+ blr
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall.c b/tools/testing/selftests/powerpc/tm/tm-syscall.c
index 467a6b3134b2..b763354c2eb4 100644
--- a/tools/testing/selftests/powerpc/tm/tm-syscall.c
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall.c
@@ -19,23 +19,36 @@
#include "utils.h"
#include "tm.h"
+#ifndef PPC_FEATURE2_SCV
+#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
+#endif
+
extern int getppid_tm_active(void);
extern int getppid_tm_suspended(void);
+extern int getppid_scv_tm_active(void);
+extern int getppid_scv_tm_suspended(void);
unsigned retries = 0;
#define TEST_DURATION 10 /* seconds */
-pid_t getppid_tm(bool suspend)
+pid_t getppid_tm(bool scv, bool suspend)
{
int i;
pid_t pid;
for (i = 0; i < TM_RETRIES; i++) {
- if (suspend)
- pid = getppid_tm_suspended();
- else
- pid = getppid_tm_active();
+ if (suspend) {
+ if (scv)
+ pid = getppid_scv_tm_suspended();
+ else
+ pid = getppid_tm_suspended();
+ } else {
+ if (scv)
+ pid = getppid_scv_tm_active();
+ else
+ pid = getppid_tm_active();
+ }
if (pid >= 0)
return pid;
@@ -82,15 +95,24 @@ int tm_syscall(void)
* Test a syscall within a suspended transaction and verify
* that it succeeds.
*/
- FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
+ FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
/*
* Test a syscall within an active transaction and verify that
* it fails with the correct failure code.
*/
- FAIL_IF(getppid_tm(false) != -1); /* Should fail... */
+ FAIL_IF(getppid_tm(false, false) != -1); /* Should fail... */
FAIL_IF(!failure_is_persistent()); /* ...persistently... */
FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
+
+ /* Now do it all again with scv if it is available. */
+ if (have_hwcap2(PPC_FEATURE2_SCV)) {
+ FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
+ FAIL_IF(getppid_tm(true, false) != -1); /* Should fail... */
+ FAIL_IF(!failure_is_persistent()); /* ...persistently... */
+ FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
+ }
+
gettimeofday(&now, 0);
}
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 10ab56c2484a..60aa1a4fc69b 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -414,9 +414,6 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
uffd_test_ops->allocate_area((void **)&area_src);
uffd_test_ops->allocate_area((void **)&area_dst);
- uffd_test_ops->release_pages(area_src);
- uffd_test_ops->release_pages(area_dst);
-
userfaultfd_open(features);
count_verify = malloc(nr_pages * sizeof(unsigned long long));
@@ -437,6 +434,26 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
*(area_count(area_src, nr) + 1) = 1;
}
+ /*
+ * After initialization of area_src, we must explicitly release pages
+ * for area_dst to make sure it's fully empty. Otherwise we could have
+ * some area_dst pages be errornously initialized with zero pages,
+ * hence we could hit memory corruption later in the test.
+ *
+ * One example is when THP is globally enabled, above allocate_area()
+ * calls could have the two areas merged into a single VMA (as they
+ * will have the same VMA flags so they're mergeable). When we
+ * initialize the area_src above, it's possible that some part of
+ * area_dst could have been faulted in via one huge THP that will be
+ * shared between area_src and area_dst. It could cause some of the
+ * area_dst won't be trapped by missing userfaults.
+ *
+ * This release_pages() will guarantee even if that happened, we'll
+ * proactively split the thp and drop any accidentally initialized
+ * pages within area_dst.
+ */
+ uffd_test_ops->release_pages(area_dst);
+
pipefd = malloc(sizeof(int) * nr_cpus * 2);
if (!pipefd)
err("pipefd");
diff --git a/tools/testing/vsock/vsock_diag_test.c b/tools/testing/vsock/vsock_diag_test.c
index cec6f5a738e1..fa927ad16f8a 100644
--- a/tools/testing/vsock/vsock_diag_test.c
+++ b/tools/testing/vsock/vsock_diag_test.c
@@ -332,8 +332,6 @@ static void test_no_sockets(const struct test_opts *opts)
read_vsock_stat(&sockets);
check_no_sockets(&sockets);
-
- free_sock_stat(&sockets);
}
static void test_listen_socket_server(const struct test_opts *opts)
diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c
index ee8208b2f946..69c3ead25313 100644
--- a/tools/usb/testusb.c
+++ b/tools/usb/testusb.c
@@ -265,12 +265,6 @@ nomem:
}
entry->ifnum = ifnum;
-
- /* FIXME update USBDEVFS_CONNECTINFO so it tells about high speed etc */
-
- fprintf(stderr, "%s speed\t%s\t%u\n",
- speed(entry->speed), entry->name, entry->ifnum);
-
entry->next = testdevs;
testdevs = entry;
return 0;
@@ -299,6 +293,14 @@ static void *handle_testdev (void *arg)
return 0;
}
+ status = ioctl(fd, USBDEVFS_GET_SPEED, NULL);
+ if (status < 0)
+ fprintf(stderr, "USBDEVFS_GET_SPEED failed %d\n", status);
+ else
+ dev->speed = status;
+ fprintf(stderr, "%s speed\t%s\t%u\n",
+ speed(dev->speed), dev->name, dev->ifnum);
+
restart:
for (i = 0; i < TEST_CASES; i++) {
if (dev->test != -1 && dev->test != i)
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 0517c744b04e..f62f10c988db 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -1331,7 +1331,7 @@ int main(int argc, char *argv[])
if (opt_list && opt_list_mapcnt)
kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY);
- if (opt_mark_idle && opt_file)
+ if (opt_mark_idle)
page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR);
if (opt_list && opt_pid)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 439d3b4cd1a9..7851f3a1b5f7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -235,9 +235,13 @@ static void ack_flush(void *_completed)
{
}
-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
{
- if (unlikely(!cpus))
+ const struct cpumask *cpus;
+
+ if (likely(cpumask_available(tmp)))
+ cpus = tmp;
+ else
cpus = cpu_online_mask;
if (cpumask_empty(cpus))
@@ -263,14 +267,34 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
continue;
kvm_make_request(req, vcpu);
- cpu = vcpu->cpu;
if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
continue;
- if (tmp != NULL && cpu != -1 && cpu != me &&
- kvm_request_needs_ipi(vcpu, req))
- __cpumask_set_cpu(cpu, tmp);
+ /*
+ * tmp can be "unavailable" if cpumasks are allocated off stack
+ * as allocation of the mask is deliberately not fatal and is
+ * handled by falling back to kicking all online CPUs.
+ */
+ if (!cpumask_available(tmp))
+ continue;
+
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any
+ * point after kvm_request_needs_ipi(), which could result in
+ * sending an IPI to the previous pCPU. But, that's ok because
+ * the purpose of the IPI is to ensure the vCPU returns to
+ * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates.
+ * Entering READING_SHADOW_PAGE_TABLES after this point is also
+ * ok, as the requirement is only that KVM wait for vCPUs that
+ * were reading SPTEs _before_ any changes were finalized. See
+ * kvm_vcpu_kick() for more details on handling requests.
+ */
+ if (kvm_request_needs_ipi(vcpu, req)) {
+ cpu = READ_ONCE(vcpu->cpu);
+ if (cpu != -1 && cpu != me)
+ __cpumask_set_cpu(cpu, tmp);
+ }
}
called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
@@ -302,13 +326,8 @@ EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
- /*
- * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
- * kvm_make_all_cpus_request.
- */
- long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
-
++kvm->stat.generic.remote_tlb_flush_requests;
+
/*
* We want to publish modifications to the page tables before reading
* mode. Pairs with a memory barrier in arch-specific code.
@@ -323,7 +342,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
if (!kvm_arch_flush_remote_tlb(kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.generic.remote_tlb_flush;
- cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
#endif
@@ -528,7 +546,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
}
}
- if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
+ if (range->flush_on_ret && ret)
kvm_flush_remote_tlbs(kvm);
if (locked)
@@ -3134,15 +3152,19 @@ out:
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
{
- unsigned int old, val, shrink;
+ unsigned int old, val, shrink, grow_start;
old = val = vcpu->halt_poll_ns;
shrink = READ_ONCE(halt_poll_ns_shrink);
+ grow_start = READ_ONCE(halt_poll_ns_grow_start);
if (shrink == 0)
val = 0;
else
val /= shrink;
+ if (val < grow_start)
+ val = 0;
+
vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
}
@@ -3290,16 +3312,24 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
*/
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
- int me;
- int cpu = vcpu->cpu;
+ int me, cpu;
if (kvm_vcpu_wake_up(vcpu))
return;
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any point
+ * after kvm_arch_vcpu_should_kick(), which could result in sending an
+ * IPI to the previous pCPU. But, that's ok because the purpose of the
+ * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
+ * vCPU also requires it to leave IN_GUEST_MODE.
+ */
me = get_cpu();
- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
- if (kvm_arch_vcpu_should_kick(vcpu))
+ if (kvm_arch_vcpu_should_kick(vcpu)) {
+ cpu = READ_ONCE(vcpu->cpu);
+ if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
smp_send_reschedule(cpu);
+ }
put_cpu();
}
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);