aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/obsolete/sysfs-class-rfkill29
-rw-r--r--Documentation/ABI/removed/sysfs-class-rfkill13
-rw-r--r--Documentation/ABI/stable/sysfs-class-rfkill27
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-batman-adv17
-rw-r--r--Documentation/DocBook/media/v4l/media-types.xml4
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt11
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt2
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt54
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65217.txt10
-rw-r--r--Documentation/networking/mac80211-injection.txt17
-rw-r--r--Documentation/networking/rds.txt4
-rw-r--r--Documentation/rfkill.txt2
-rw-r--r--Documentation/watchdog/watchdog-parameters.txt4
-rw-r--r--MAINTAINERS80
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig18
-rw-r--r--arch/arc/Makefile4
-rw-r--r--arch/arc/configs/axs101_defconfig4
-rw-r--r--arch/arc/configs/axs103_defconfig10
-rw-r--r--arch/arc/configs/axs103_smp_defconfig10
-rw-r--r--arch/arc/configs/nsim_700_defconfig5
-rw-r--r--arch/arc/configs/nsim_hs_defconfig3
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig6
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig3
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig12
-rw-r--r--arch/arc/configs/tb10x_defconfig18
-rw-r--r--arch/arc/include/asm/arcregs.h32
-rw-r--r--arch/arc/include/asm/irq.h2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h11
-rw-r--r--arch/arc/kernel/entry-arcv2.S11
-rw-r--r--arch/arc/kernel/intc-compact.c3
-rw-r--r--arch/arc/kernel/mcip.c60
-rw-r--r--arch/arc/kernel/setup.c80
-rw-r--r--arch/arc/kernel/smp.c3
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi14
-rw-r--r--arch/arm/boot/dts/am335x-chilisom.dtsi14
-rw-r--r--arch/arm/boot/dts/am335x-nano.dts14
-rw-r--r--arch/arm/boot/dts/am335x-pepper.dts14
-rw-r--r--arch/arm/boot/dts/am335x-shc.dts4
-rw-r--r--arch/arm/boot/dts/am335x-sl50.dts13
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts4
-rw-r--r--arch/arm/boot/dts/am57xx-cl-som-am57x.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-ds112.dts2
-rw-r--r--arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts31
-rw-r--r--arch/arm/boot/dts/r8a7791-porter.dts1
-rw-r--r--arch/arm/boot/dts/sama5d2-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/tps65217.dtsi56
-rw-r--r--arch/arm/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h21
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kvm/guest.c2
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/mach-omap2/board-generic.c22
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c6
-rw-r--r--arch/arm/mach-omap2/omap_device.c14
-rw-r--r--arch/arm/mach-shmobile/common.h1
-rw-r--r--arch/arm/mach-shmobile/headsmp-scu.S6
-rw-r--r--arch/arm/mach-shmobile/headsmp.S28
-rw-r--r--arch/arm/mach-shmobile/platsmp-apmu.c1
-rw-r--r--arch/arm/mach-shmobile/platsmp-scu.c4
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c2
-rw-r--r--arch/arm/mach-tegra/board-paz00.c17
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/pageattr.c3
-rw-r--r--arch/arm64/include/asm/pgtable.h7
-rw-r--r--arch/arm64/kvm/guest.c2
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c20
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/mmap.c4
-rw-r--r--arch/mips/jz4740/gpio.c2
-rw-r--r--arch/mips/kernel/r2300_fpu.S2
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/mips/mm/mmap.c4
-rw-r--r--arch/mips/mm/sc-mips.c13
-rw-r--r--arch/parisc/include/asm/floppy.h2
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/ptrace.c16
-rw-r--r--arch/parisc/kernel/syscall.S5
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/kernel/eeh_driver.c3
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c3
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/mm/hash64_64k.c8
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c12
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c13
-rw-r--r--arch/powerpc/mm/mmap.c4
-rw-r--r--arch/s390/include/asm/fpu/internal.h2
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/sparc/Makefile6
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/entry.S17
-rw-r--r--arch/sparc/kernel/hvcalls.S3
-rw-r--r--arch/sparc/kernel/signal_64.c2
-rw-r--r--arch/sparc/kernel/sparc_ksyms_64.c1
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/sparc/kernel/syscalls.S36
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/um/kernel/reboot.c1
-rw-r--r--arch/um/kernel/signal.c2
-rw-r--r--arch/x86/entry/entry_32.S1
-rw-r--r--arch/x86/entry/entry_64_compat.S1
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/uaccess_32.h26
-rw-r--r--arch/x86/include/asm/xen/pci.h4
-rw-r--r--arch/x86/kernel/acpi/sleep.c7
-rw-r--r--arch/x86/kvm/emulate.c4
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c14
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/mm/pageattr.c14
-rw-r--r--arch/x86/pci/common.c26
-rw-r--r--arch/x86/pci/intel_mid_pci.c9
-rw-r--r--arch/x86/pci/irq.c23
-rw-r--r--arch/x86/pci/xen.c5
-rw-r--r--arch/x86/platform/intel-quark/imr.c4
-rw-r--r--arch/x86/um/os-Linux/task_size.c4
-rw-r--r--block/Kconfig13
-rw-r--r--block/blk-map.c91
-rw-r--r--block/blk-merge.c8
-rw-r--r--drivers/acpi/nfit.c105
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/pci_link.c128
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci.c49
-rw-r--r--drivers/ata/ahci.h5
-rw-r--r--drivers/ata/ahci_xgene.c85
-rw-r--r--drivers/ata/libahci.c63
-rw-r--r--drivers/ata/libata-scsi.c11
-rw-r--r--drivers/ata/pata_rb532_cf.c11
-rw-r--r--drivers/bluetooth/Kconfig11
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c7
-rw-r--r--drivers/bluetooth/btbcm.c3
-rw-r--r--drivers/bluetooth/btusb.c3
-rw-r--r--drivers/bluetooth/hci_ag6xx.c337
-rw-r--r--drivers/bluetooth/hci_bcm.c2
-rw-r--r--drivers/bluetooth/hci_intel.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c6
-rw-r--r--drivers/bluetooth/hci_uart.h8
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/clk/ti/dpll3xxx.c3
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c1
-rw-r--r--drivers/devfreq/tegra-devfreq.c2
-rw-r--r--drivers/dma/pxa_dma.c8
-rw-r--r--drivers/gpio/gpio-rcar.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c112
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c18
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c7
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c153
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h20
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/dev.c7
-rw-r--r--drivers/gpu/host1x/dev.h1
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c3
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c9
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c7
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c41
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c63
-rw-r--r--drivers/iommu/dmar.c5
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c18
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/i2c/adp1653.c2
-rw-r--r--drivers/media/i2c/adv7604.c3
-rw-r--r--drivers/media/usb/au0828/au0828-video.c3
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mtd/ubi/upd.c2
-rw-r--r--drivers/net/can/rcar_can.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/can/usb/gs_usb.c24
-rw-r--r--drivers/net/dsa/mv88e6171.c1
-rw-r--r--drivers/net/dsa/mv88e6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c441
-rw-r--r--drivers/net/dsa/mv88e6xxx.h6
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c171
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c126
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c63
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c55
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c20
-rw-r--r--drivers/net/ethernet/ethoc.c1
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c222
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h348
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c15
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c5
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c62
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/jme.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c276
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c20
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c425
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2195
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c404
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h60
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c384
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c18
-rw-r--r--drivers/net/ethernet/renesas/Kconfig4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h330
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c226
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c150
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c473
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c51
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c45
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/hyperv/hyperv_net.h4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c56
-rw-r--r--drivers/net/hyperv/rndis_filter.c19
-rw-r--r--drivers/net/ieee802154/at86rf230.c25
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/phy/micrel.c28
-rw-r--r--drivers/net/ppp/ppp_generic.c11
-rw-r--r--drivers/net/tun.c17
-rw-r--r--drivers/net/usb/ax88172a.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c26
-rw-r--r--drivers/net/usb/lan78xx.c107
-rw-r--r--drivers/net/usb/lan78xx.h1
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/net/veth.c26
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c73
-rw-r--r--drivers/net/vrf.c13
-rw-r--r--drivers/net/vxlan.c53
-rw-r--r--drivers/net/wan/lmc/lmc_main.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c8
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c11
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c16
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c19
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c8
-rw-r--r--drivers/nvdimm/bus.c20
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/core.c111
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c149
-rw-r--r--drivers/of/of_mdio.c5
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/pci-keystone-dw.c11
-rw-r--r--drivers/pci/host/pci-layerscape.c21
-rw-r--r--drivers/pci/xen-pcifront.c10
-rw-r--r--drivers/power/bq27xxx_battery_i2c.c37
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/ssb/Kconfig1
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/vt6655/rxtx.c12
-rw-r--r--drivers/staging/vt6656/rxtx.c12
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c4
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/chipidea/otg.c2
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.c6
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c23
-rw-r--r--drivers/usb/dwc2/hcd_intr.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/ep0.c5
-rw-r--r--drivers/usb/dwc3/gadget.c70
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.h15
-rw-r--r--drivers/usb/gadget/udc/udc-core.c3
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c20
-rw-r--r--drivers/usb/serial/Kconfig16
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/cp210x.c3
-rw-r--r--drivers/usb/serial/mxu11x0.c1006
-rw-r--r--drivers/usb/serial/option.c14
-rw-r--r--drivers/usb/serial/qcserial.c7
-rw-r--r--drivers/vfio/pci/vfio_pci.c9
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c9
-rw-r--r--drivers/vfio/vfio_iommu_type1.c6
-rw-r--r--drivers/vhost/vhost.c15
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c2
-rw-r--r--drivers/watchdog/Kconfig11
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/sun4v_wdt.c191
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c9
-rw-r--r--drivers/xen/xen-scsiback.c80
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/affs/file.c5
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/block_dev.c19
-rw-r--r--fs/btrfs/root-tree.c10
-rw-r--r--fs/ceph/addr.c4
-rw-r--r--fs/ceph/caps.c27
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c16
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifsfs.h12
-rw-r--r--fs/cifs/cifssmb.c21
-rw-r--r--fs/cifs/smb2pdu.c24
-rw-r--r--fs/dax.c21
-rw-r--r--fs/dcache.c20
-rw-r--r--fs/ext2/file.c19
-rw-r--r--fs/ext2/inode.c16
-rw-r--r--fs/ext4/file.c19
-rw-r--r--fs/ext4/inode.c6
-rw-r--r--fs/ext4/ioctl.c5
-rw-r--r--fs/fs-writeback.c54
-rw-r--r--fs/hpfs/namei.c31
-rw-r--r--fs/jffs2/README.Locking5
-rw-r--r--fs/jffs2/build.c75
-rw-r--r--fs/jffs2/file.c39
-rw-r--r--fs/jffs2/gc.c17
-rw-r--r--fs/jffs2/nodelist.h6
-rw-r--r--fs/namei.c22
-rw-r--r--fs/nfs/blocklayout/extent_tree.c10
-rw-r--r--fs/nfs/nfs42proc.c119
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/pnfs.c55
-rw-r--r--fs/ocfs2/aops.c1
-rw-r--r--fs/overlayfs/dir.c10
-rw-r--r--fs/overlayfs/inode.c2
-rw-r--r--fs/overlayfs/super.c13
-rw-r--r--fs/pnode.c9
-rw-r--r--fs/read_write.c9
-rw-r--r--fs/super.c1
-rw-r--r--fs/userfaultfd.c6
-rw-r--r--fs/xattr.c6
-rw-r--r--fs/xfs/xfs_aops.c6
-rw-r--r--fs/xfs/xfs_aops.h1
-rw-r--r--fs/xfs/xfs_bmap_util.c3
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/bio.h37
-rw-r--r--include/linux/blkdev.h25
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/dax.h8
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/ieee80211.h26
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/libnvdimm.h3
-rw-r--r--include/linux/mlx4/driver.h3
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/mlx5_ifc.h4
-rw-r--r--include/linux/netdevice.h31
-rw-r--r--include/linux/nfs_fs.h4
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/pci.h17
-rw-r--r--include/linux/perf_event.h7
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/qed/qed_eth_if.h12
-rw-r--r--include/linux/qed/qed_if.h6
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/rfkill-gpio.h37
-rw-r--r--include/linux/rfkill.h18
-rw-r--r--include/linux/skbuff.h28
-rw-r--r--include/linux/stmmac.h18
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/net/6lowpan.h32
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/cfg80211.h8
-rw-r--r--include/net/checksum.h5
-rw-r--r--include/net/codel.h4
-rw-r--r--include/net/devlink.h140
-rw-r--r--include/net/dsa.h2
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip_tunnels.h15
-rw-r--r--include/net/iw_handler.h6
-rw-r--r--include/net/mac80211.h198
-rw-r--r--include/net/mac802154.h5
-rw-r--r--include/net/pkt_cls.h17
-rw-r--r--include/net/sch_generic.h26
-rw-r--r--include/net/tc_act/tc_ife.h61
-rw-r--r--include/rxrpc/packet.h15
-rw-r--r--include/sound/hdaudio.h2
-rw-r--r--include/uapi/linux/bpf.h16
-rw-r--r--include/uapi/linux/devlink.h72
-rw-r--r--include/uapi/linux/ethtool.h7
-rw-r--r--include/uapi/linux/if_bridge.h22
-rw-r--r--include/uapi/linux/kernel.h1
-rw-r--r--include/uapi/linux/media.h54
-rw-r--r--include/uapi/linux/mroute6.h9
-rw-r--r--include/uapi/linux/ndctl.h11
-rw-r--r--include/uapi/linux/nl80211.h14
-rw-r--r--include/uapi/linux/pkt_cls.h1
-rw-r--r--include/uapi/linux/rfkill.h2
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h38
-rw-r--r--kernel/events/core.c368
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/trace/trace_events.c17
-rw-r--r--kernel/trace/trace_events_filter.c13
-rw-r--r--kernel/trace/trace_stack.c6
-rw-r--r--mm/filemap.c12
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/memory.c14
-rw-r--r--mm/migrate.c2
-rw-r--r--net/6lowpan/core.c39
-rw-r--r--net/6lowpan/debugfs.c247
-rw-r--r--net/6lowpan/iphc.c413
-rw-r--r--net/Kconfig16
-rw-r--r--net/batman-adv/Kconfig14
-rw-r--r--net/batman-adv/Makefile3
-rw-r--r--net/batman-adv/bat_algo.h28
-rw-r--r--net/batman-adv/bat_iv_ogm.c2
-rw-r--r--net/batman-adv/bat_v.c347
-rw-r--r--net/batman-adv/bat_v_elp.c515
-rw-r--r--net/batman-adv/bat_v_elp.h33
-rw-r--r--net/batman-adv/bat_v_ogm.c833
-rw-r--r--net/batman-adv/bat_v_ogm.h36
-rw-r--r--net/batman-adv/distributed-arp-table.c4
-rw-r--r--net/batman-adv/fragmentation.c8
-rw-r--r--net/batman-adv/gateway_common.c4
-rw-r--r--net/batman-adv/gateway_common.h2
-rw-r--r--net/batman-adv/icmp_socket.c2
-rw-r--r--net/batman-adv/main.c7
-rw-r--r--net/batman-adv/main.h12
-rw-r--r--net/batman-adv/network-coding.c22
-rw-r--r--net/batman-adv/packet.h49
-rw-r--r--net/batman-adv/send.c55
-rw-r--r--net/batman-adv/send.h10
-rw-r--r--net/batman-adv/sysfs.c130
-rw-r--r--net/batman-adv/types.h93
-rw-r--r--net/bluetooth/Kconfig9
-rw-r--r--net/bluetooth/Makefile1
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/leds.c74
-rw-r--r--net/bluetooth/leds.h16
-rw-r--r--net/bridge/br_fdb.c15
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_if.c37
-rw-r--r--net/bridge/br_mdb.c16
-rw-r--r--net/bridge/br_multicast.c83
-rw-r--r--net/ceph/messenger.c15
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/devlink.c738
-rw-r--r--net/core/filter.c123
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c15
-rw-r--r--net/core/skbuff.c23
-rw-r--r--net/dsa/slave.c21
-rw-r--r--net/ieee802154/6lowpan/core.c7
-rw-r--r--net/ipv4/af_inet.c26
-rw-r--r--net/ipv4/arp.c35
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/igmp.c3
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ip_gre.c10
-rw-r--r--net/ipv4/ip_options.c14
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ip_tunnel.c3
-rw-r--r--net/ipv4/ping.c7
-rw-r--r--net/ipv4/tcp_metrics.c2
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv4/tcp_probe.c8
-rw-r--r--net/ipv4/udp_tunnel.c2
-rw-r--r--net/ipv6/addrconf.c34
-rw-r--r--net/ipv6/exthdrs_core.c6
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_output.c1
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/mac80211/agg-rx.c52
-rw-r--r--net/mac80211/agg-tx.c53
-rw-r--r--net/mac80211/cfg.c34
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/debugfs_key.c5
-rw-r--r--net/mac80211/driver-ops.c10
-rw-r--r--net/mac80211/driver-ops.h4
-rw-r--r--net/mac80211/ht.c5
-rw-r--r--net/mac80211/ibss.c32
-rw-r--r--net/mac80211/ieee80211_i.h39
-rw-r--r--net/mac80211/iface.c14
-rw-r--r--net/mac80211/key.c86
-rw-r--r--net/mac80211/key.h10
-rw-r--r--net/mac80211/mesh.c9
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_hwmp.c6
-rw-r--r--net/mac80211/mesh_pathtbl.c111
-rw-r--r--net/mac80211/mesh_plink.c10
-rw-r--r--net/mac80211/mlme.c79
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c16
-rw-r--r--net/mac80211/rx.c178
-rw-r--r--net/mac80211/sta_info.c35
-rw-r--r--net/mac80211/sta_info.h24
-rw-r--r--net/mac80211/status.c2
-rw-r--r--net/mac80211/tkip.c36
-rw-r--r--net/mac80211/tkip.h2
-rw-r--r--net/mac80211/trace.h43
-rw-r--r--net/mac80211/tx.c100
-rw-r--r--net/mac80211/util.c116
-rw-r--r--net/mac80211/vht.c57
-rw-r--r--net/mac80211/wpa.c11
-rw-r--r--net/mac802154/main.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c6
-rw-r--r--net/netfilter/nf_dup_netdev.c1
-rw-r--r--net/netlabel/netlabel_domainhash.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c6
-rw-r--r--net/openvswitch/datapath.c40
-rw-r--r--net/openvswitch/datapath.h4
-rw-r--r--net/openvswitch/vport-internal_dev.c10
-rw-r--r--net/rds/Kconfig7
-rw-r--r--net/rds/Makefile4
-rw-r--r--net/rds/af_rds.c26
-rw-r--r--net/rds/ib.c47
-rw-r--r--net/rds/ib.h37
-rw-r--r--net/rds/ib_cm.c59
-rw-r--r--net/rds/ib_fmr.c248
-rw-r--r--net/rds/ib_frmr.c376
-rw-r--r--net/rds/ib_mr.h148
-rw-r--r--net/rds/ib_rdma.c495
-rw-r--r--net/rds/ib_send.c6
-rw-r--r--net/rds/ib_stats.c2
-rw-r--r--net/rds/iw.c312
-rw-r--r--net/rds/iw.h398
-rw-r--r--net/rds/iw_cm.c769
-rw-r--r--net/rds/iw_rdma.c837
-rw-r--r--net/rds/iw_recv.c904
-rw-r--r--net/rds/iw_ring.c169
-rw-r--r--net/rds/iw_send.c981
-rw-r--r--net/rds/iw_stats.c95
-rw-r--r--net/rds/iw_sysctl.c123
-rw-r--r--net/rds/rdma_transport.c21
-rw-r--r--net/rds/rdma_transport.h5
-rw-r--r--net/rds/rds.h1
-rw-r--r--net/rds/recv.c20
-rw-r--r--net/rfkill/Kconfig3
-rw-r--r--net/rfkill/core.c172
-rw-r--r--net/rfkill/rfkill-gpio.c24
-rw-r--r--net/rxrpc/af_rxrpc.c39
-rw-r--r--net/rxrpc/ar-accept.c56
-rw-r--r--net/rxrpc/ar-ack.c213
-rw-r--r--net/rxrpc/ar-call.c84
-rw-r--r--net/rxrpc/ar-connection.c83
-rw-r--r--net/rxrpc/ar-connevent.c79
-rw-r--r--net/rxrpc/ar-error.c13
-rw-r--r--net/rxrpc/ar-input.c118
-rw-r--r--net/rxrpc/ar-internal.h196
-rw-r--r--net/rxrpc/ar-local.c29
-rw-r--r--net/rxrpc/ar-output.c73
-rw-r--r--net/rxrpc/ar-peer.c2
-rw-r--r--net/rxrpc/ar-proc.c10
-rw-r--r--net/rxrpc/ar-recvmsg.c20
-rw-r--r--net/rxrpc/ar-security.c6
-rw-r--r--net/rxrpc/ar-skbuff.c7
-rw-r--r--net/rxrpc/ar-transport.c1
-rw-r--r--net/rxrpc/rxkad.c165
-rw-r--r--net/rxrpc/sysctl.c2
-rw-r--r--net/sched/Kconfig22
-rw-r--r--net/sched/Makefile3
-rw-r--r--net/sched/act_ife.c870
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_meta_mark.c79
-rw-r--r--net/sched/act_meta_skbprio.c76
-rw-r--r--net/sched/act_mirred.c1
-rw-r--r--net/sched/cls_u32.c37
-rw-r--r--net/sched/sch_api.c8
-rw-r--r--net/sched/sch_cbq.c12
-rw-r--r--net/sched/sch_choke.c6
-rw-r--r--net/sched/sch_codel.c10
-rw-r--r--net/sched/sch_drr.c9
-rw-r--r--net/sched/sch_dsmark.c11
-rw-r--r--net/sched/sch_fq.c4
-rw-r--r--net/sched/sch_fq_codel.c17
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sched/sch_hfsc.c9
-rw-r--r--net/sched/sch_hhf.c10
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c5
-rw-r--r--net/sched/sch_multiq.c16
-rw-r--r--net/sched/sch_netem.c13
-rw-r--r--net/sched/sch_pie.c5
-rw-r--r--net/sched/sch_prio.c15
-rw-r--r--net/sched/sch_qfq.c9
-rw-r--r--net/sched/sch_red.c10
-rw-r--r--net/sched/sch_sfb.c10
-rw-r--r--net/sched/sch_sfq.c16
-rw-r--r--net/sched/sch_tbf.c15
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/probe.c10
-rw-r--r--net/sctp/proc.c10
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c2
-rw-r--r--net/switchdev/switchdev.c5
-rw-r--r--net/tipc/bcast.c5
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c18
-rw-r--r--net/tipc/link.c122
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/name_table.c6
-rw-r--r--net/tipc/net.c7
-rw-r--r--net/tipc/netlink.c69
-rw-r--r--net/tipc/netlink.h11
-rw-r--r--net/tipc/node.c25
-rw-r--r--net/tipc/socket.c42
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/tipc/udp_media.c40
-rw-r--r--net/wireless/Kconfig25
-rw-r--r--net/wireless/core.c12
-rw-r--r--net/wireless/mlme.c3
-rw-r--r--net/wireless/nl80211.c31
-rw-r--r--net/wireless/radiotap.c1
-rw-r--r--net/wireless/reg.c122
-rw-r--r--net/wireless/sme.c15
-rw-r--r--net/wireless/util.c277
-rw-r--r--net/wireless/wext-core.c52
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--sound/core/control_compat.c90
-rw-r--r--sound/core/pcm_compat.c177
-rw-r--r--sound/core/rawmidi_compat.c56
-rw-r--r--sound/core/seq/oss/seq_oss.c2
-rw-r--r--sound/core/seq/oss/seq_oss_device.h1
-rw-r--r--sound/core/seq/oss/seq_oss_init.c16
-rw-r--r--sound/core/timer_compat.c18
-rw-r--r--sound/hda/hdac_controller.c7
-rw-r--r--sound/pci/hda/hda_controller.c47
-rw-r--r--sound/pci/hda/hda_intel.c16
-rw-r--r--sound/pci/hda/patch_hdmi.c19
-rw-r--r--sound/pci/hda/patch_realtek.c40
-rw-r--r--sound/pci/rme9652/hdsp.c4
-rw-r--r--sound/pci/rme9652/hdspm.c16
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/testing/nvdimm/test/nfit.c8
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance.tc15
-rw-r--r--virt/kvm/arm/vgic.c4
-rw-r--r--virt/kvm/async_pf.c2
778 files changed, 19981 insertions, 12317 deletions
diff --git a/Documentation/ABI/obsolete/sysfs-class-rfkill b/Documentation/ABI/obsolete/sysfs-class-rfkill
deleted file mode 100644
index ff60ad9eca4c..000000000000
--- a/Documentation/ABI/obsolete/sysfs-class-rfkill
+++ /dev/null
@@ -1,29 +0,0 @@
-rfkill - radio frequency (RF) connector kill switch support
-
-For details to this subsystem look at Documentation/rfkill.txt.
-
-What: /sys/class/rfkill/rfkill[0-9]+/state
-Date: 09-Jul-2007
-KernelVersion v2.6.22
-Description: Current state of the transmitter.
- This file is deprecated and scheduled to be removed in 2014,
- because its not possible to express the 'soft and hard block'
- state of the rfkill driver.
-Values: A numeric value.
- 0: RFKILL_STATE_SOFT_BLOCKED
- transmitter is turned off by software
- 1: RFKILL_STATE_UNBLOCKED
- transmitter is (potentially) active
- 2: RFKILL_STATE_HARD_BLOCKED
- transmitter is forced off by something outside of
- the driver's control.
-
-What: /sys/class/rfkill/rfkill[0-9]+/claim
-Date: 09-Jul-2007
-KernelVersion v2.6.22
-Description: This file is deprecated because there no longer is a way to
- claim just control over a single rfkill instance.
- This file is scheduled to be removed in 2012.
-Values: 0: Kernel handles events
diff --git a/Documentation/ABI/removed/sysfs-class-rfkill b/Documentation/ABI/removed/sysfs-class-rfkill
new file mode 100644
index 000000000000..3ce6231f20b2
--- /dev/null
+++ b/Documentation/ABI/removed/sysfs-class-rfkill
@@ -0,0 +1,13 @@
+rfkill - radio frequency (RF) connector kill switch support
+
+For details to this subsystem look at Documentation/rfkill.txt.
+
+What: /sys/class/rfkill/rfkill[0-9]+/claim
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Description: This file was deprecated because there no longer was a way to
+ claim just control over a single rfkill instance.
+ This file was scheduled to be removed in 2012, and was removed
+ in 2016.
+Values: 0: Kernel handles events
diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill
index 097f522c33bb..e1ba4a104753 100644
--- a/Documentation/ABI/stable/sysfs-class-rfkill
+++ b/Documentation/ABI/stable/sysfs-class-rfkill
@@ -2,9 +2,8 @@ rfkill - radio frequency (RF) connector kill switch support
For details to this subsystem look at Documentation/rfkill.txt.
-For the deprecated /sys/class/rfkill/*/state and
-/sys/class/rfkill/*/claim knobs of this interface look in
-Documentation/ABI/obsolete/sysfs-class-rfkill.
+For the deprecated /sys/class/rfkill/*/claim knobs of this interface look in
+Documentation/ABI/removed/sysfs-class-rfkill.
What: /sys/class/rfkill
Date: 09-Jul-2007
@@ -42,6 +41,28 @@ Values: A numeric value.
1: true
+What: /sys/class/rfkill/rfkill[0-9]+/state
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Description: Current state of the transmitter.
+ This file was scheduled to be removed in 2014, but due to its
+ large number of users it will be sticking around for a bit
+ longer. Despite it being marked as stabe, the newer "hard" and
+ "soft" interfaces should be preffered, since it is not possible
+ to express the 'soft and hard block' state of the rfkill driver
+ through this interface. There will likely be another attempt to
+ remove it in the future.
+Values: A numeric value.
+ 0: RFKILL_STATE_SOFT_BLOCKED
+ transmitter is turned off by software
+ 1: RFKILL_STATE_UNBLOCKED
+ transmitter is (potentially) active
+ 2: RFKILL_STATE_HARD_BLOCKED
+ transmitter is forced off by something outside of
+ the driver's control.
+
+
What: /sys/class/rfkill/rfkill[0-9]+/hard
Date: 12-March-2010
KernelVersion v2.6.34
diff --git a/Documentation/ABI/testing/sysfs-class-net-batman-adv b/Documentation/ABI/testing/sysfs-class-net-batman-adv
index 7f34a95bb963..518f6a1dbc0c 100644
--- a/Documentation/ABI/testing/sysfs-class-net-batman-adv
+++ b/Documentation/ABI/testing/sysfs-class-net-batman-adv
@@ -1,4 +1,20 @@
+What: /sys/class/net/<iface>/batman-adv/throughput_override
+Date: Feb 2014
+Contact: Antonio Quartulli <[email protected]>
+description:
+ Defines the throughput value to be used by B.A.T.M.A.N. V
+ when estimating the link throughput using this interface.
+ If the value is set to 0 then batman-adv will try to
+ estimate the throughput by itself.
+
+What: /sys/class/net/<iface>/batman-adv/elp_interval
+Date: Feb 2014
+Contact: Linus Lüssing <[email protected]>
+Description:
+ Defines the interval in milliseconds in which batman
+ sends its probing packets for link quality measurements.
+
What: /sys/class/net/<iface>/batman-adv/iface_status
Date: May 2010
Contact: Marek Lindner <[email protected]>
@@ -12,4 +28,3 @@ Description:
The /sys/class/net/<iface>/batman-adv/mesh_iface file
displays the batman mesh interface this <iface>
currently is associated with.
-
diff --git a/Documentation/DocBook/media/v4l/media-types.xml b/Documentation/DocBook/media/v4l/media-types.xml
index 1af384250910..0ee0f3386cdf 100644
--- a/Documentation/DocBook/media/v4l/media-types.xml
+++ b/Documentation/DocBook/media/v4l/media-types.xml
@@ -57,10 +57,6 @@
<entry>Connector for a RGB composite signal.</entry>
</row>
<row>
- <entry><constant>MEDIA_ENT_F_CONN_TEST</constant></entry>
- <entry>Connector for a test generator.</entry>
- </row>
- <row>
<entry><constant>MEDIA_ENT_F_CAM_SENSOR</constant></entry>
<entry>Camera video sensor entity.</entry>
</row>
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index 002d8440bf66..65edc055722f 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -6,6 +6,15 @@ Required properties:
"renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
"renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
"renesas,can-r8a7791" if CAN controller is a part of R8A7791 SoC.
+ "renesas,can-r8a7792" if CAN controller is a part of R8A7792 SoC.
+ "renesas,can-r8a7793" if CAN controller is a part of R8A7793 SoC.
+ "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
+ "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
+ "renesas,rcar-gen2-can" for a generic R-Car Gen2 compatible device.
+ When compatible with the generic version, nodes must list the
+ SoC-specific version corresponding to the platform first
+ followed by the generic version.
+
- reg: physical base address and size of the R-Car CAN register map.
- interrupts: interrupt specifier for the sole interrupt.
- clocks: phandles and clock specifiers for 3 CAN clock inputs.
@@ -25,7 +34,7 @@ Example
SoC common .dtsi file:
can0: can@e6e80000 {
- compatible = "renesas,can-r8a7791";
+ compatible = "renesas,can-r8a7791", "renesas,rcar-gen2-can";
reg = <0 0xe6e80000 0 0x1000>;
interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp9_clks R8A7791_CLK_RCAN0>,
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index a4799fff0d1f..b037a9d78d93 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -12,7 +12,7 @@ Optional properties:
only if property "phy-reset-gpios" is available. Missing the property
will have the duration be 1 millisecond. Numbers greater than 1000 are
invalid and 1 millisecond will be used instead.
-- phy-reset-active-low : If present then the reset sequence using the GPIO
+- phy-reset-active-high : If present then the reset sequence using the GPIO
specified in the "phy-reset-gpios" property is reversed (H=reset state,
L=operation state).
- phy-supply : regulator that powers the Ethernet PHY.
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index e862a922bd3f..6605d19601c2 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -17,7 +17,25 @@ Required properties:
The 1st cell is reset pre-delay in micro seconds.
The 2nd cell is reset pulse in micro seconds.
The 3rd cell is reset post-delay in micro seconds.
+
+Optional properties:
+- resets: Should contain a phandle to the STMMAC reset signal, if any
+- reset-names: Should contain the reset signal name "stmmaceth", if a
+ reset phandle is given
+- max-frame-size: See ethernet.txt file in the same directory
+- clocks: If present, the first clock should be the GMAC main clock and
+ the second clock should be peripheral's register interface clock. Further
+ clocks may be specified in derived bindings.
+- clock-names: One name for each entry in the clocks property, the
+ first one should be "stmmaceth" and the second one should be "pclk".
+- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
+ available this clock is used for programming the Timestamp Addend Register.
+ If not passed then the system clock will be used and this is fine on some
+ platforms.
+- tx-fifo-depth: See ethernet.txt file in the same directory
+- rx-fifo-depth: See ethernet.txt file in the same directory
- snps,pbl Programmable Burst Length
+- snps,aal Address-Aligned Beats
- snps,fixed-burst Program the DMA to use the fixed burst mode
- snps,mixed-burst Program the DMA to use the mixed burst mode
- snps,force_thresh_dma_mode Force DMA to use the threshold mode for
@@ -29,27 +47,28 @@ Required properties:
supported by this device instance
- snps,perfect-filter-entries: Number of perfect filter entries supported
by this device instance
-
-Optional properties:
-- resets: Should contain a phandle to the STMMAC reset signal, if any
-- reset-names: Should contain the reset signal name "stmmaceth", if a
- reset phandle is given
-- max-frame-size: See ethernet.txt file in the same directory
-- clocks: If present, the first clock should be the GMAC main clock
- The optional second clock should be peripheral's register interface clock.
- The third optional clock should be the ptp reference clock.
- Further clocks may be specified in derived bindings.
-- clock-names: One name for each entry in the clocks property.
- The first one should be "stmmaceth".
- The optional second one should be "pclk".
- The optional third one should be "clk_ptp_ref".
-- snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register.
-- tx-fifo-depth: See ethernet.txt file in the same directory
-- rx-fifo-depth: See ethernet.txt file in the same directory
+- AXI BUS Mode parameters: below the list of all the parameters to program the
+ AXI register inside the DMA module:
+ - snps,lpi_en: enable Low Power Interface
+ - snps,xit_frm: unlock on WoL
+ - snps,wr_osr_lmt: max write oustanding req. limit
+ - snps,rd_osr_lmt: max read oustanding req. limit
+ - snps,kbbe: do not cross 1KiB boundary.
+ - snps,axi_all: align address
+ - snps,blen: this is a vector of supported burst length.
+ - snps,fb: fixed-burst
+ - snps,mb: mixed-burst
+ - snps,rb: rebuild INCRx Burst
- mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
Examples:
+ stmmac_axi_setup: stmmac-axi-config {
+ snps,wr_osr_lmt = <0xf>;
+ snps,rd_osr_lmt = <0xf>;
+ snps,blen = <256 128 64 32 0 0 0>;
+ };
+
gmac0: ethernet@e0800000 {
compatible = "st,spear600-gmac";
reg = <0xe0800000 0x8000>;
@@ -65,6 +84,7 @@ Examples:
tx-fifo-depth = <16384>;
clocks = <&clock>;
clock-names = "stmmaceth";
+ snps,axi-config = <&stmmac_axi_setup>;
mdio0 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/regulator/tps65217.txt b/Documentation/devicetree/bindings/regulator/tps65217.txt
index d18109657da6..4f05d208c95c 100644
--- a/Documentation/devicetree/bindings/regulator/tps65217.txt
+++ b/Documentation/devicetree/bindings/regulator/tps65217.txt
@@ -26,11 +26,7 @@ Example:
ti,pmic-shutdown-controller;
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: dcdc1 {
- reg = <0>;
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
@@ -38,7 +34,6 @@ Example:
};
dcdc2_reg: dcdc2 {
- reg = <1>;
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -46,7 +41,6 @@ Example:
};
dcdc3_reg: dcc3 {
- reg = <2>;
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1500000>;
regulator-boot-on;
@@ -54,7 +48,6 @@ Example:
};
ldo1_reg: ldo1 {
- reg = <3>;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -62,7 +55,6 @@ Example:
};
ldo2_reg: ldo2 {
- reg = <4>;
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -70,7 +62,6 @@ Example:
};
ldo3_reg: ldo3 {
- reg = <5>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -78,7 +69,6 @@ Example:
};
ldo4_reg: ldo4 {
- reg = <6>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
diff --git a/Documentation/networking/mac80211-injection.txt b/Documentation/networking/mac80211-injection.txt
index 3a930072b161..ec8f934c2eb2 100644
--- a/Documentation/networking/mac80211-injection.txt
+++ b/Documentation/networking/mac80211-injection.txt
@@ -28,6 +28,23 @@ radiotap headers and used to control injection:
IEEE80211_RADIOTAP_F_TX_NOACK: frame should be sent without waiting for
an ACK even if it is a unicast frame
+ * IEEE80211_RADIOTAP_RATE
+
+ legacy rate for the transmission (only for devices without own rate control)
+
+ * IEEE80211_RADIOTAP_MCS
+
+ HT rate for the transmission (only for devices without own rate control).
+ Also some flags are parsed
+
+ IEEE80211_TX_RC_SHORT_GI: use short guard interval
+ IEEE80211_TX_RC_40_MHZ_WIDTH: send in HT40 mode
+
+ * IEEE80211_RADIOTAP_DATA_RETRIES
+
+ number of retries when either IEEE80211_RADIOTAP_RATE or
+ IEEE80211_RADIOTAP_MCS was used
+
The injection code can also skip all other currently defined radiotap fields
facilitating replay of captured radiotap headers directly.
diff --git a/Documentation/networking/rds.txt b/Documentation/networking/rds.txt
index e1a3d59bbe0f..9d219d856d46 100644
--- a/Documentation/networking/rds.txt
+++ b/Documentation/networking/rds.txt
@@ -19,9 +19,7 @@ to N*N if you use a connection-oriented socket transport like TCP.
RDS is not Infiniband-specific; it was designed to support different
transports. The current implementation used to support RDS over TCP as well
-as IB. Work is in progress to support RDS over iWARP, and using DCE to
-guarantee no dropped packets on Ethernet, it may be possible to use RDS over
-UDP in the future.
+as IB.
The high-level semantics of RDS from the application's point of view are
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index 2ee6ef9a6554..1f0c27049340 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -83,6 +83,8 @@ rfkill drivers that control devices that can be hard-blocked unless they also
assign the poll_hw_block() callback (then the rfkill core will poll the
device). Don't do this unless you cannot get the event in any other way.
+RFKill provides per-switch LED triggers, which can be used to drive LEDs
+according to the switch state (LED_FULL when blocked, LED_OFF otherwise).
5. Userspace support
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
index 9f9ec9f76039..4e4b6f10d841 100644
--- a/Documentation/watchdog/watchdog-parameters.txt
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -400,3 +400,7 @@ wm8350_wdt:
nowayout: Watchdog cannot be stopped once started
(default=kernel config parameter)
-------------------------------------------------
+sun4v_wdt:
+timeout_ms: Watchdog timeout in milliseconds 1..180000, default=60000)
+nowayout: Watchdog cannot be stopped once started
+-------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 27393cff1707..2132c99f7fcd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -151,7 +151,7 @@ S: Maintained
F: drivers/scsi/53c700*
6LOWPAN GENERIC (BTLE/IEEE 802.15.4)
-M: Alexander Aring <[email protected]>
+M: Alexander Aring <[email protected]>
M: Jukka Rissanen <[email protected]>
@@ -920,17 +920,24 @@ M: Emilio López <[email protected]>
S: Maintained
F: drivers/clk/sunxi/
-ARM/Amlogic MesonX SoC support
+ARM/Amlogic Meson SoC support
M: Carlo Caione <[email protected]>
L: [email protected] (moderated for non-subscribers)
+W: http://linux-meson.com/
S: Maintained
-F: drivers/media/rc/meson-ir.c
-N: meson[x68]
+F: arch/arm/mach-meson/
+F: arch/arm/boot/dts/meson*
+N: meson
ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <[email protected]>
+M: Antoine Tenart <[email protected]>
S: Maintained
F: arch/arm/mach-alpine/
+F: arch/arm/boot/dts/alpine*
+F: arch/arm64/boot/dts/al/
+F: drivers/*/*alpine*
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
M: Nicolas Ferre <[email protected]>
@@ -2152,6 +2159,7 @@ M: Simon Wunderlich <[email protected]>
M: Antonio Quartulli <[email protected]>
W: https://www.open-mesh.org/
+Q: https://patchwork.open-mesh.org/project/batman/list/
S: Maintained
F: net/batman-adv/
@@ -3445,7 +3453,6 @@ F: drivers/usb/dwc2/
DESIGNWARE USB3 DRD IP DRIVER
M: Felipe Balbi <[email protected]>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
F: drivers/usb/dwc3/
@@ -3499,6 +3506,14 @@ F: include/linux/device-mapper.h
F: include/linux/dm-*.h
F: include/uapi/linux/dm-*.h
+DEVLINK
+M: Jiri Pirko <[email protected]>
+S: Supported
+F: net/core/devlink.c
+F: include/net/devlink.h
+F: include/uapi/linux/devlink.h
+
DIALOG SEMICONDUCTOR DRIVERS
M: Support Opensource <[email protected]>
W: http://www.dialog-semiconductor.com/products
@@ -4513,6 +4528,12 @@ L: [email protected]
S: Maintained
F: drivers/dma/fsldma.*
+FREESCALE GPMI NAND DRIVER
+M: Han Xu <[email protected]>
+S: Maintained
+F: drivers/mtd/nand/gpmi-nand/*
+
FREESCALE I2C CPM DRIVER
M: Jochen Friedrich <[email protected]>
@@ -4529,7 +4550,7 @@ F: include/linux/platform_data/video-imxfb.h
F: drivers/video/fbdev/imxfb.c
FREESCALE QUAD SPI DRIVER
-M: Han Xu <[email protected]>
+M: Han Xu <[email protected]>
S: Maintained
F: drivers/mtd/spi-nor/fsl-quadspi.c
@@ -4543,6 +4564,15 @@ S: Maintained
F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h
+FREESCALE IMX / MXC FEC DRIVER
+M: Fugang Duan <[email protected]>
+S: Maintained
+F: drivers/net/ethernet/freescale/fec_main.c
+F: drivers/net/ethernet/freescale/fec_ptp.c
+F: drivers/net/ethernet/freescale/fec.h
+F: Documentation/devicetree/bindings/net/fsl-fec.txt
+
FREESCALE QUICC ENGINE LIBRARY
S: Orphan
@@ -5415,10 +5445,11 @@ S: Supported
F: drivers/idle/i7300_idle.c
IEEE 802.15.4 SUBSYSTEM
-M: Alexander Aring <[email protected]>
+M: Alexander Aring <[email protected]>
-W: https://github.com/linux-wpan
-T: git git://github.com/linux-wpan/linux-wpan-next.git
+W: http://wpan.cakelab.org/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
S: Maintained
F: net/ieee802154/
F: net/mac802154/
@@ -6759,6 +6790,7 @@ S: Maintained
F: Documentation/networking/mac80211-injection.txt
F: include/net/mac80211.h
F: net/mac80211/
+F: drivers/net/wireless/mac80211_hwsim.[ch]
MACVLAN DRIVER
M: Patrick McHardy <[email protected]>
@@ -7355,7 +7387,7 @@ F: drivers/tty/isicom.c
F: include/linux/isicom.h
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M: Felipe Balbi <[email protected]>
+M: Bin Liu <[email protected]>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
@@ -7491,7 +7523,6 @@ F: net/netrom/
NETRONOME ETHERNET DRIVERS
M: Jakub Kicinski <[email protected]>
-M: Rolf Neugebauer <[email protected]>
S: Maintained
F: drivers/net/ethernet/netronome/
@@ -7687,13 +7718,13 @@ S: Maintained
F: arch/nios2/
NOKIA N900 POWER SUPPLY DRIVERS
-M: Pali Rohár <[email protected]>
-S: Maintained
+R: Pali Rohár <[email protected]>
F: include/linux/power/bq2415x_charger.h
F: include/linux/power/bq27xxx_battery.h
F: include/linux/power/isp1704_charger.h
F: drivers/power/bq2415x_charger.c
F: drivers/power/bq27xxx_battery.c
+F: drivers/power/bq27xxx_battery_i2c.c
F: drivers/power/isp1704_charger.c
F: drivers/power/rx51_battery.c
@@ -7924,11 +7955,9 @@ F: drivers/media/platform/omap3isp/
F: drivers/staging/media/omap4iss/
OMAP USB SUPPORT
-M: Felipe Balbi <[email protected]>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
-S: Maintained
+S: Orphan
F: drivers/usb/*/*omap*
F: arch/arm/*omap*/usb*
@@ -9067,10 +9096,14 @@ S: Maintained
F: drivers/net/ethernet/rdc/r6040.c
RDS - RELIABLE DATAGRAM SOCKETS
-M: Chien Yen <[email protected]>
+M: Santosh Shilimkar <[email protected]>
L: [email protected] (moderated for non-subscribers)
+W: https://oss.oracle.com/projects/rds/
S: Supported
F: net/rds/
+F: Documentation/networking/rds.txt
READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <[email protected]>
@@ -9559,6 +9592,12 @@ M: Andreas Noever <[email protected]>
S: Maintained
F: drivers/thunderbolt/
+TI BQ27XXX POWER SUPPLY DRIVER
+R: Andrew F. Davis <[email protected]>
+F: include/linux/power/bq27xxx_battery.h
+F: drivers/power/bq27xxx_battery.c
+F: drivers/power/bq27xxx_battery_i2c.c
+
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
M: John Stultz <[email protected]>
M: Thomas Gleixner <[email protected]>
@@ -11332,6 +11371,13 @@ S: Maintained
F: drivers/usb/host/isp116x*
F: include/linux/usb/isp116x.h
+USB LAN78XX ETHERNET DRIVER
+M: Woojung Huh <[email protected]>
+M: Microchip Linux Driver Support <[email protected]>
+S: Maintained
+F: drivers/net/usb/lan78xx.*
+
USB MASS STORAGE DRIVER
M: Matthew Dharm <[email protected]>
diff --git a/Makefile b/Makefile
index fbe1b921798f..2d519d2fb3a9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Blurry Fish Butt
# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 0655495470ad..8a188bc1786a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -12,8 +12,6 @@ config ARC
select BUILDTIME_EXTABLE_SORT
select COMMON_CLK
select CLONE_BACKWARDS
- # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
- select DEVTMPFS if !INITRAMFS_SOURCE=""
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_FIND_FIRST_BIT
@@ -275,14 +273,6 @@ config ARC_DCCM_BASE
default "0xA0000000"
depends on ARC_HAS_DCCM
-config ARC_HAS_HW_MPY
- bool "Use Hardware Multiplier (Normal or Faster XMAC)"
- default y
- help
- Influences how gcc generates code for MPY operations.
- If enabled, MPYxx insns are generated, provided by Standard/XMAC
- Multipler. Otherwise software multipy lib is used
-
choice
prompt "MMU Version"
default ARC_MMU_V3 if ARC_CPU_770
@@ -542,14 +532,6 @@ config ARC_DBG_TLB_MISS_COUNT
Counts number of I and D TLB Misses and exports them via Debugfs
The counters can be cleared via Debugfs as well
-if SMP
-
-config ARC_IPI_DBG
- bool "Debug Inter Core interrupts"
- default n
-
-endif
-
endif
config ARC_UBOOT_SUPPORT
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index aeb19021099e..c8230f3395f2 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -74,10 +74,6 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
ldflags-$(upto_gcc44) += -marclinux
-ifndef CONFIG_ARC_HAS_HW_MPY
- cflags-y += -mno-mpy
-endif
-
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index f1ac9818b751..5d4e2a07ad3e 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -39,6 +39,7 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -73,7 +74,6 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
@@ -91,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 323486d6ee83..87ee46b237ef 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -39,14 +39,10 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_AXS=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
@@ -78,14 +74,12 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
@@ -97,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 66191cd0447e..d80daf4f7e73 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -40,14 +40,10 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_AXS=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
@@ -79,14 +75,12 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
@@ -98,12 +92,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 138f9d887957..f41095340b6a 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -4,6 +4,7 @@ CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -26,7 +27,6 @@ CONFIG_ARC_PLAT_SIM=y
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -34,6 +34,7 @@ CONFIG_UNIX_DIAG=y
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -51,7 +52,6 @@ CONFIG_SERIAL_ARC=y
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
@@ -63,4 +63,3 @@ CONFIG_NFS_FS=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index f68838e8068a..cfaa33cb5921 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -35,6 +35,7 @@ CONFIG_UNIX_DIAG=y
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +50,6 @@ CONFIG_SERIAL_ARC=y
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
@@ -61,4 +61,3 @@ CONFIG_NFS_FS=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 96bd1c20fb0b..bb2a8dc778b5 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -21,13 +22,11 @@ CONFIG_MODULES=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARC_PLAT_SIM=y
-CONFIG_ARC_BOARD_ML509=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -35,6 +34,7 @@ CONFIG_UNIX_DIAG=y
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +49,6 @@ CONFIG_SERIAL_ARC=y
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
@@ -60,4 +59,3 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 31e1d95764ff..646182e93753 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -33,6 +33,7 @@ CONFIG_UNIX_DIAG=y
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -58,7 +59,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index fcae66683ca0..ceca2541950d 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -34,12 +34,12 @@ CONFIG_UNIX_DIAG=y
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_OSCI_LAN=y
CONFIG_INPUT_EVDEV=y
# CONFIG_MOUSE_PS2_ALPS is not set
# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
@@ -58,7 +58,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index b01b659168ea..4b6da90f6f26 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
@@ -18,15 +19,11 @@ CONFIG_MODULES=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARC_PLAT_SIM=y
-CONFIG_ARC_BOARD_ML509=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
-CONFIG_ARC_HAS_LL64=y
-# CONFIG_ARC_HAS_RTSC is not set
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=y
@@ -40,6 +37,7 @@ CONFIG_INET=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -56,14 +54,11 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_NET_OSCI_LAN=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
CONFIG_MOUSE_PS2_TOUCHKIT=y
# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_ARC_PS2=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
@@ -75,9 +70,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
-CONFIG_ARCPGU_RGB888=y
-CONFIG_ARCPGU_DISPTYPE=0
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 3b4dc9cebcf1..9b342eaf95ae 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -3,6 +3,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
CONFIG_DEFAULT_HOSTNAME="tb10x"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -26,12 +27,10 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLOCK is not set
CONFIG_ARC_PLAT_TB10X=y
CONFIG_ARC_CACHE_LINE_SHIFT=5
-CONFIG_ARC_STACK_NONEXEC=y
CONFIG_HZ=250
CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -44,8 +43,8 @@ CONFIG_IP_MULTICAST=y
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_NETDEVICES=y
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -55,9 +54,6 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_DEBUG_FS=y
-CONFIG_STMMAC_DA=y
-CONFIG_STMMAC_CHAINED=y
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
@@ -91,7 +87,6 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_LEDS_TRIGGER_TRANSIENT=y
CONFIG_DMADEVICES=y
CONFIG_DW_DMAC=y
-CONFIG_NET_DMA=y
CONFIG_ASYNC_TX_DMA=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
@@ -100,17 +95,16 @@ CONFIG_TMPFS=y
CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
-CONFIG_MAGIC_SYSRQ=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y
CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index fdc5be5b1029..f9f4c6f59fdb 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -10,7 +10,8 @@
#define _ASM_ARC_ARCREGS_H
/* Build Configuration Registers */
-#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
+#define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */
+#define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */
#define ARC_REG_CRC_BCR 0x62
#define ARC_REG_VECBASE_BCR 0x68
#define ARC_REG_PERIBASE_BCR 0x69
@@ -18,10 +19,10 @@
#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */
#define ARC_REG_SLC_BCR 0xce
-#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
+#define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */
#define ARC_REG_TIMERS_BCR 0x75
#define ARC_REG_AP_BCR 0x76
-#define ARC_REG_ICCM_BCR 0x78
+#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
#define ARC_REG_XY_MEM_BCR 0x79
#define ARC_REG_MAC_BCR 0x7a
#define ARC_REG_MUL_BCR 0x7b
@@ -36,6 +37,7 @@
#define ARC_REG_IRQ_BCR 0xF3
#define ARC_REG_SMART_BCR 0xFF
#define ARC_REG_CLUSTER_BCR 0xcf
+#define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */
/* status32 Bits Positions */
#define STATUS_AE_BIT 5 /* Exception active */
@@ -246,7 +248,7 @@ struct bcr_perip {
#endif
};
-struct bcr_iccm {
+struct bcr_iccm_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int base:16, pad:5, sz:3, ver:8;
#else
@@ -254,17 +256,15 @@ struct bcr_iccm {
#endif
};
-/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
-struct bcr_dccm_base {
+struct bcr_iccm_arcv2 {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int addr:24, ver:8;
+ unsigned int pad:8, sz11:4, sz01:4, sz10:4, sz00:4, ver:8;
#else
- unsigned int ver:8, addr:24;
+ unsigned int ver:8, sz00:4, sz10:4, sz01:4, sz11:4, pad:8;
#endif
};
-/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
-struct bcr_dccm {
+struct bcr_dccm_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int res:21, sz:3, ver:8;
#else
@@ -272,6 +272,14 @@ struct bcr_dccm {
#endif
};
+struct bcr_dccm_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:12, cyc:3, pad1:1, sz1:4, sz0:4, ver:8;
+#else
+ unsigned int ver:8, sz0:4, sz1:4, pad1:1, cyc:3, pad2:12;
+#endif
+};
+
/* ARCompact: Both SP and DP FPU BCRs have same format */
struct bcr_fp_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -315,9 +323,9 @@ struct bcr_bpu_arcv2 {
struct bcr_generic {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:24, ver:8;
+ unsigned int info:24, ver:8;
#else
- unsigned int ver:8, pad:24;
+ unsigned int ver:8, info:24;
#endif
};
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 4fd7d62a6e30..49014f0ef36d 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -16,11 +16,9 @@
#ifdef CONFIG_ISA_ARCOMPACT
#define TIMER0_IRQ 3
#define TIMER1_IRQ 4
-#define IPI_IRQ (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */
#else
#define TIMER0_IRQ 16
#define TIMER1_IRQ 17
-#define IPI_IRQ 19
#endif
#include <linux/interrupt.h>
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 1fc18ee06cf2..37c2f751eebf 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -22,6 +22,7 @@
#define AUX_IRQ_CTRL 0x00E
#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
#define AUX_IRQ_PRIORITY 0x206
#define ICAUSE 0x40a
#define AUX_IRQ_SELECT 0x40b
@@ -115,6 +116,16 @@ static inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
+static inline void arc_softirq_trigger(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, irq);
+}
+
+static inline void arc_softirq_clear(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, 0);
+}
+
#else
.macro IRQ_DISABLE scratch
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index b17830294706..c1264607bbff 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
VECTOR handle_interrupt ; (16) Timer0
VECTOR handle_interrupt ; unused (Timer1)
VECTOR handle_interrupt ; unused (WDT)
-VECTOR handle_interrupt ; (19) ICI (inter core interrupt)
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt ; (23) End of fixed IRQs
+VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
+VECTOR handle_interrupt ; (20) perf Interrupt
+VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
+VECTOR handle_interrupt ; unused
+VECTOR handle_interrupt ; (23) unused
+# End of fixed IRQs
.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
VECTOR handle_interrupt
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 06bcedf19b62..224d1c3aa9c4 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -81,9 +81,6 @@ static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
{
switch (irq) {
case TIMER0_IRQ:
-#ifdef CONFIG_SMP
- case IPI_IRQ:
-#endif
irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
break;
default:
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index bc771f58fefb..c41c364b926c 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -11,9 +11,13 @@
#include <linux/smp.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <asm/irqflags-arcv2.h>
#include <asm/mcip.h>
#include <asm/setup.h>
+#define IPI_IRQ 19
+#define SOFTIRQ_IRQ 21
+
static char smp_cpuinfo_buf[128];
static int idu_detected;
@@ -22,6 +26,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
+ smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
}
static void mcip_ipi_send(int cpu)
@@ -29,46 +34,44 @@ static void mcip_ipi_send(int cpu)
unsigned long flags;
int ipi_was_pending;
+ /* ARConnect can only send IPI to others */
+ if (unlikely(cpu == raw_smp_processor_id())) {
+ arc_softirq_trigger(SOFTIRQ_IRQ);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
/*
- * NOTE: We must spin here if the other cpu hasn't yet
- * serviced a previous message. This can burn lots
- * of time, but we MUST follows this protocol or
- * ipi messages can be lost!!!
- * Also, we must release the lock in this loop because
- * the other side may get to this same loop and not
- * be able to ack -- thus causing deadlock.
+ * If receiver already has a pending interrupt, elide sending this one.
+ * Linux cross core calling works well with concurrent IPIs
+ * coalesced into one
+ * see arch/arc/kernel/smp.c: ipi_send_msg_one()
*/
+ __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
+ ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
+ if (!ipi_was_pending)
+ __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
- do {
- raw_spin_lock_irqsave(&mcip_lock, flags);
- __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
- ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
- if (ipi_was_pending == 0)
- break; /* break out but keep lock */
- raw_spin_unlock_irqrestore(&mcip_lock, flags);
- } while (1);
-
- __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
-
-#ifdef CONFIG_ARC_IPI_DBG
- if (ipi_was_pending)
- pr_info("IPI ACK delayed from cpu %d\n", cpu);
-#endif
}
static void mcip_ipi_clear(int irq)
{
unsigned int cpu, c;
unsigned long flags;
- unsigned int __maybe_unused copy;
+
+ if (unlikely(irq == SOFTIRQ_IRQ)) {
+ arc_softirq_clear(irq);
+ return;
+ }
raw_spin_lock_irqsave(&mcip_lock, flags);
/* Who sent the IPI */
__mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
- copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
+ cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
/*
* In rare case, multiple concurrent IPIs sent to same target can
@@ -82,12 +85,6 @@ static void mcip_ipi_clear(int irq)
} while (cpu);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
-
-#ifdef CONFIG_ARC_IPI_DBG
- if (c != __ffs(copy))
- pr_info("IPIs from %x coalesced to %x\n",
- copy, raw_smp_processor_id());
-#endif
}
static void mcip_probe_n_setup(void)
@@ -111,10 +108,11 @@ static void mcip_probe_n_setup(void)
READ_BCR(ARC_REG_MCIP_BCR, mp);
sprintf(smp_cpuinfo_buf,
- "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
+ "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
mp.ver, mp.num_cores,
IS_AVAIL1(mp.ipi, "IPI "),
IS_AVAIL1(mp.idu, "IDU "),
+ IS_AVAIL1(mp.llm, "LLM "),
IS_AVAIL1(mp.dbg, "DEBUG "),
IS_AVAIL1(mp.gfrc, "GFRC"));
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index a7edceba5f84..cdc821df1809 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -42,6 +42,53 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
+{
+ if (is_isa_arcompact()) {
+ struct bcr_iccm_arcompact iccm;
+ struct bcr_dccm_arcompact dccm;
+
+ READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+ if (iccm.ver) {
+ cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
+ cpu->iccm.base_addr = iccm.base << 16;
+ }
+
+ READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+ if (dccm.ver) {
+ unsigned long base;
+ cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
+
+ base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
+ cpu->dccm.base_addr = base & ~0xF;
+ }
+ } else {
+ struct bcr_iccm_arcv2 iccm;
+ struct bcr_dccm_arcv2 dccm;
+ unsigned long region;
+
+ READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+ if (iccm.ver) {
+ cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
+ if (iccm.sz00 == 0xF && iccm.sz01 > 0)
+ cpu->iccm.sz <<= iccm.sz01;
+
+ region = read_aux_reg(ARC_REG_AUX_ICCM);
+ cpu->iccm.base_addr = region & 0xF0000000;
+ }
+
+ READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+ if (dccm.ver) {
+ cpu->dccm.sz = 256 << dccm.sz0;
+ if (dccm.sz0 == 0xF && dccm.sz1 > 0)
+ cpu->dccm.sz <<= dccm.sz1;
+
+ region = read_aux_reg(ARC_REG_AUX_DCCM);
+ cpu->dccm.base_addr = region & 0xF0000000;
+ }
+ }
+}
+
static void read_arc_build_cfg_regs(void)
{
struct bcr_perip uncached_space;
@@ -76,36 +123,11 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
-
- /* Note that we read the CCM BCRs independent of kernel config
- * This is to catch the cases where user doesn't know that
- * CCMs are present in hardware build
- */
- {
- struct bcr_iccm iccm;
- struct bcr_dccm dccm;
- struct bcr_dccm_base dccm_base;
- unsigned int bcr_32bit_val;
-
- bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
- if (bcr_32bit_val) {
- iccm = *((struct bcr_iccm *)&bcr_32bit_val);
- cpu->iccm.base_addr = iccm.base << 16;
- cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
- }
-
- bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
- if (bcr_32bit_val) {
- dccm = *((struct bcr_dccm *)&bcr_32bit_val);
- cpu->dccm.sz = 0x800 << (dccm.sz);
-
- READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
- cpu->dccm.base_addr = dccm_base.addr << 8;
- }
- }
-
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
+ /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
+ read_decode_ccm_bcr(cpu);
+
read_decode_mmu_bcr();
read_decode_cache_bcr();
@@ -237,8 +259,6 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
}
- n += scnprintf(buf + n, len - n, "%s",
- IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
}
n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index ef6e9e15b82a..424e937da5c8 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -336,11 +336,8 @@ irqreturn_t do_IPI(int irq, void *dev_id)
int rc;
rc = __do_IPI(msg);
-#ifdef CONFIG_ARC_IPI_DBG
- /* IPI received but no valid @msg */
if (rc)
pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
-#endif
pending &= ~(1U << msg);
} while (pending);
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 7a6a58ef8aaf..43788b1a1ac5 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -195,5 +195,7 @@ CFLAGS_font.o := -Dstatic=
$(obj)/font.c: $(FONTC)
$(call cmd,shipped)
+AFLAGS_hyp-stub.o := -Wa,-march=armv7-a
+
$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
$(call cmd,shipped)
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index f3db13d2d90e..0cc150b87b86 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -285,8 +285,10 @@
};
};
+
+/include/ "tps65217.dtsi"
+
&tps {
- compatible = "ti,tps65217";
/*
* Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
* mode") at poweroff. Most BeagleBone versions do not support RTC-only
@@ -307,17 +309,12 @@
ti,pmic-shutdown-controller;
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
regulator-name = "vdds_dpr";
regulator-always-on;
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
@@ -327,7 +324,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <925000>;
@@ -337,25 +333,21 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
regulator-name = "vio,vrtc,vdds";
regulator-always-on;
};
ldo2_reg: regulator@4 {
- reg = <4>;
regulator-name = "vdd_3v3aux";
regulator-always-on;
};
ldo3_reg: regulator@5 {
- reg = <5>;
regulator-name = "vdd_1v8";
regulator-always-on;
};
ldo4_reg: regulator@6 {
- reg = <6>;
regulator-name = "vdd_3v3a";
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi
index fda457b07e15..857d9894103a 100644
--- a/arch/arm/boot/dts/am335x-chilisom.dtsi
+++ b/arch/arm/boot/dts/am335x-chilisom.dtsi
@@ -128,21 +128,16 @@
};
-&tps {
- compatible = "ti,tps65217";
+/include/ "tps65217.dtsi"
+&tps {
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
regulator-name = "vdds_dpr";
regulator-always-on;
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
@@ -152,7 +147,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <925000>;
@@ -162,28 +156,24 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
regulator-name = "vio,vrtc,vdds";
regulator-boot-on;
regulator-always-on;
};
ldo2_reg: regulator@4 {
- reg = <4>;
regulator-name = "vdd_3v3aux";
regulator-boot-on;
regulator-always-on;
};
ldo3_reg: regulator@5 {
- reg = <5>;
regulator-name = "vdd_1v8";
regulator-boot-on;
regulator-always-on;
};
ldo4_reg: regulator@6 {
- reg = <6>;
regulator-name = "vdd_3v3d";
regulator-boot-on;
regulator-always-on;
diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts
index 77559a1ded60..f313999c503e 100644
--- a/arch/arm/boot/dts/am335x-nano.dts
+++ b/arch/arm/boot/dts/am335x-nano.dts
@@ -375,15 +375,11 @@
wp-gpios = <&gpio3 18 0>;
};
-&tps {
- compatible = "ti,tps65217";
+#include "tps65217.dtsi"
+&tps {
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
/* +1.5V voltage with ±4% tolerance */
regulator-min-microvolt = <1450000>;
regulator-max-microvolt = <1550000>;
@@ -392,7 +388,6 @@
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_MPU voltage limits 0.95V - 1.1V with ±4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <915000>;
@@ -402,7 +397,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_CORE voltage limits 0.95V - 1.1V with ±4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <915000>;
@@ -412,7 +406,6 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
/* +1.8V voltage with ±4% tolerance */
regulator-min-microvolt = <1750000>;
regulator-max-microvolt = <1870000>;
@@ -421,7 +414,6 @@
};
ldo2_reg: regulator@4 {
- reg = <4>;
/* +3.3V voltage with ±4% tolerance */
regulator-min-microvolt = <3175000>;
regulator-max-microvolt = <3430000>;
@@ -430,7 +422,6 @@
};
ldo3_reg: regulator@5 {
- reg = <5>;
/* +1.8V voltage with ±4% tolerance */
regulator-min-microvolt = <1750000>;
regulator-max-microvolt = <1870000>;
@@ -439,7 +430,6 @@
};
ldo4_reg: regulator@6 {
- reg = <6>;
/* +3.3V voltage with ±4% tolerance */
regulator-min-microvolt = <3175000>;
regulator-max-microvolt = <3430000>;
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 471a3a70ea1f..8867aaaec54d 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -420,9 +420,9 @@
vin-supply = <&vbat>;
};
-&tps {
- compatible = "ti,tps65217";
+/include/ "tps65217.dtsi"
+&tps {
backlight {
isel = <1>; /* ISET1 */
fdim = <200>; /* TPS65217_BL_FDIM_200HZ */
@@ -430,17 +430,12 @@
};
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
/* VDD_1V8 system supply */
regulator-always-on;
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <925000>;
@@ -450,7 +445,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
@@ -460,21 +454,18 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
/* VRTC 1.8V always-on supply */
regulator-name = "vrtc,vdds";
regulator-always-on;
};
ldo2_reg: regulator@4 {
- reg = <4>;
/* 3.3V rail */
regulator-name = "vdd_3v3aux";
regulator-always-on;
};
ldo3_reg: regulator@5 {
- reg = <5>;
/* VDD_3V3A 3.3V rail */
regulator-name = "vdd_3v3a";
regulator-min-microvolt = <3300000>;
@@ -482,7 +473,6 @@
};
ldo4_reg: regulator@6 {
- reg = <6>;
/* VDD_3V3B 3.3V rail */
regulator-name = "vdd_3v3b";
regulator-always-on;
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index 1b5b044fcd91..865de8500f1c 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -46,7 +46,7 @@
gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_BACK>;
debounce-interval = <1000>;
- gpio-key,wakeup;
+ wakeup-source;
};
front_button {
@@ -54,7 +54,7 @@
gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_FRONT>;
debounce-interval = <1000>;
- gpio-key,wakeup;
+ wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index d38edfa53bb9..3303c281697b 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -375,19 +375,16 @@
pinctrl-0 = <&uart4_pins>;
};
+#include "tps65217.dtsi"
+
&tps {
- compatible = "ti,tps65217";
ti,pmic-shutdown-controller;
interrupt-parent = <&intc>;
interrupts = <7>; /* NNMI */
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
/* VDDS_DDR */
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
@@ -395,7 +392,6 @@
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
@@ -405,7 +401,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <925000>;
@@ -415,7 +410,6 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
/* VRTC / VIO / VDDS*/
regulator-always-on;
regulator-min-microvolt = <1800000>;
@@ -423,7 +417,6 @@
};
ldo2_reg: regulator@4 {
- reg = <4>;
/* VDD_3V3AUX */
regulator-always-on;
regulator-min-microvolt = <3300000>;
@@ -431,7 +424,6 @@
};
ldo3_reg: regulator@5 {
- reg = <5>;
/* VDD_1V8 */
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -439,7 +431,6 @@
};
ldo4_reg: regulator@6 {
- reg = <6>;
/* VDD_3V3A */
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 36c0fa6c362a..a0986c65be0c 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -173,6 +173,8 @@
sound0_master: simple-audio-card,codec {
sound-dai = <&tlv320aic3104>;
+ assigned-clocks = <&clkoutmux2_clk_mux>;
+ assigned-clock-parents = <&sys_clk2_dclk_div>;
clocks = <&clkout2_clk>;
};
};
@@ -796,6 +798,8 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&mcasp3_pins_default>;
pinctrl-1 = <&mcasp3_pins_sleep>;
+ assigned-clocks = <&mcasp3_ahclkx_mux>;
+ assigned-clock-parents = <&sys_clkin2>;
status = "okay";
op-mode = <0>; /* MCASP_IIS_MODE */
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index 8d93882dc8d5..1c06cb76da07 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -545,7 +545,7 @@
ti,debounce-tol = /bits/ 16 <10>;
ti,debounce-rep = /bits/ 16 <1>;
- linux,wakeup;
+ wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 4f6ae921656f..f74d3db4846d 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -896,7 +896,6 @@
#size-cells = <1>;
reg = <0x2100000 0x10000>;
ranges = <0 0x2100000 0x10000>;
- interrupt-parent = <&intc>;
clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
<&clks IMX6QDL_CLK_CAAM_ACLK>,
<&clks IMX6QDL_CLK_CAAM_IPG>,
diff --git a/arch/arm/boot/dts/kirkwood-ds112.dts b/arch/arm/boot/dts/kirkwood-ds112.dts
index bf4143c6cb8f..b84af3da8c84 100644
--- a/arch/arm/boot/dts/kirkwood-ds112.dts
+++ b/arch/arm/boot/dts/kirkwood-ds112.dts
@@ -14,7 +14,7 @@
#include "kirkwood-synology.dtsi"
/ {
- model = "Synology DS111";
+ model = "Synology DS112";
compatible = "synology,ds111", "marvell,kirkwood";
memory {
diff --git a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
index 420788229e6f..aae8a7aceab7 100644
--- a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
+++ b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
@@ -228,6 +228,37 @@
};
};
+&devbus_bootcs {
+ status = "okay";
+ devbus,keep-config;
+
+ flash@0 {
+ compatible = "jedec-flash";
+ reg = <0 0x40000>;
+ bank-width = <1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ header@0 {
+ reg = <0 0x30000>;
+ read-only;
+ };
+
+ uboot@30000 {
+ reg = <0x30000 0xF000>;
+ read-only;
+ };
+
+ uboot_env@3F000 {
+ reg = <0x3F000 0x1000>;
+ };
+ };
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6713b1ea732b..01d239c3eaaa 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -283,7 +283,6 @@
pinctrl-names = "default";
status = "okay";
- renesas,enable-gpio = <&gpio5 31 GPIO_ACTIVE_HIGH>;
};
&usbphy {
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
index 1afe24629d1f..b0c912feaa2f 100644
--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
@@ -90,7 +90,7 @@
#define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
#define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
#define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
-#define PIN_PA15 14
+#define PIN_PA15 15
#define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
#define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
#define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)
diff --git a/arch/arm/boot/dts/tps65217.dtsi b/arch/arm/boot/dts/tps65217.dtsi
new file mode 100644
index 000000000000..a63272422d76
--- /dev/null
+++ b/arch/arm/boot/dts/tps65217.dtsi
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Integrated Power Management Chip
+ * http://www.ti.com/lit/ds/symlink/tps65217.pdf
+ */
+
+&tps {
+ compatible = "ti,tps65217";
+
+ regulators {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dcdc1_reg: regulator@0 {
+ reg = <0>;
+ regulator-compatible = "dcdc1";
+ };
+
+ dcdc2_reg: regulator@1 {
+ reg = <1>;
+ regulator-compatible = "dcdc2";
+ };
+
+ dcdc3_reg: regulator@2 {
+ reg = <2>;
+ regulator-compatible = "dcdc3";
+ };
+
+ ldo1_reg: regulator@3 {
+ reg = <3>;
+ regulator-compatible = "ldo1";
+ };
+
+ ldo2_reg: regulator@4 {
+ reg = <4>;
+ regulator-compatible = "ldo2";
+ };
+
+ ldo3_reg: regulator@5 {
+ reg = <5>;
+ regulator-compatible = "ldo3";
+ };
+
+ ldo4_reg: regulator@6 {
+ reg = <6>;
+ regulator-compatible = "ldo4";
+ };
+ };
+};
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 7da5503c0591..e08d15184056 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -117,6 +117,7 @@ static inline u32 gic_read_iar(void)
u32 irqstat;
asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
+ dsb(sy);
return irqstat;
}
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 0375c8caa061..9408a994cc91 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
- bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
+ unsigned long page_pfn = page_to_xen_pfn(page);
+ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+ unsigned long compound_pages =
+ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+ bool local = (page_pfn <= dev_pfn) &&
+ (dev_pfn - page_pfn < compound_pages);
+
/*
- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
- * multiple Xen page, it's not possible to have a mix of local and
- * foreign Xen page. So if the first xen_pfn == mfn the page is local
- * otherwise it's a foreign page grant-mapped in dom0. If the page is
- * local we can safely call the native dma_ops function, otherwise we
- * call the xen specific function.
+ * Dom0 is mapped 1:1, while the Linux page can span across
+ * multiple Xen pages, it's not possible for it to contain a
+ * mix of local and foreign Xen pages. So if the first xen_pfn
+ * == mfn the page is local otherwise it's a foreign page
+ * grant-mapped in dom0. If the page is local we can safely
+ * call the native dma_ops function, otherwise we call the xen
+ * specific function.
*/
if (local)
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 2c5f160be65e..ad325a8c7e1e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
+AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a
ifeq ($(CONFIG_ARM_PSCI),y)
obj-$(CONFIG_SMP) += psci_smp.o
endif
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 5fa69d7bae58..99361f11354a 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -161,7 +161,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
static unsigned long num_core_regs(void)
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 7f33b2056ae6..0f6600f05137 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -206,7 +206,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
- memcpy(run->mmio.data, data_buf, len);
+ if (is_write)
+ memcpy(run->mmio.data, data_buf, len);
if (!ret) {
/* We handled the access successfully in the kernel. */
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 809827265fb3..bab814d2f37d 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -18,6 +18,7 @@
#include <asm/setup.h>
#include <asm/mach/arch.h>
+#include <asm/system_info.h>
#include "common.h"
@@ -77,12 +78,31 @@ static const char *const n900_boards_compat[] __initconst = {
NULL,
};
+/* Set system_rev from atags */
+static void __init rx51_set_system_rev(const struct tag *tags)
+{
+ const struct tag *tag;
+
+ if (tags->hdr.tag != ATAG_CORE)
+ return;
+
+ for_each_tag(tag, tags) {
+ if (tag->hdr.tag == ATAG_REVISION) {
+ system_rev = tag->u.revision.rev;
+ break;
+ }
+ }
+}
+
/* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags,
* save them while the data is still not overwritten
*/
static void __init rx51_reserve(void)
{
- save_atags((const struct tag *)(PAGE_OFFSET + 0x100));
+ const struct tag *tags = (const struct tag *)(PAGE_OFFSET + 0x100);
+
+ save_atags(tags);
+ rx51_set_system_rev(tags);
omap_reserve();
}
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 7b76ce01c21d..8633c703546a 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
static void set_onenand_cfg(void __iomem *onenand_base)
{
- u32 reg;
+ u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
- reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
- reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
ONENAND_SYS_CFG1_BL_16;
if (onenand_flags & ONENAND_FLAG_SYNCREAD)
@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
reg |= ONENAND_SYS_CFG1_VHF;
else
reg &= ~ONENAND_SYS_CFG1_VHF;
+
writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
}
@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
}
}
+ onenand_async.sync_write = true;
omap2_onenand_calc_async_timings(&t);
ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 0437537751bc..f7ff3b9dad87 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -191,12 +191,22 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od;
+ int err;
switch (event) {
case BUS_NOTIFY_DEL_DEVICE:
if (pdev->archdata.od)
omap_device_delete(pdev->archdata.od);
break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ od = to_omap_device(pdev);
+ if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED)) {
+ dev_info(dev, "enabled after unload, idling\n");
+ err = omap_device_idle(pdev);
+ if (err)
+ dev_err(dev, "failed to idle\n");
+ }
+ break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
@@ -602,8 +612,10 @@ static int _od_runtime_resume(struct device *dev)
int ret;
ret = omap_device_enable(pdev);
- if (ret)
+ if (ret) {
+ dev_err(dev, "use pm_runtime_put_sync_suspend() in driver?\n");
return ret;
+ }
return pm_generic_runtime_resume(dev);
}
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 9cb11215ceba..b3a4ed5289ec 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -4,7 +4,6 @@
extern void shmobile_init_delay(void);
extern void shmobile_boot_vector(void);
extern unsigned long shmobile_boot_fn;
-extern unsigned long shmobile_boot_arg;
extern unsigned long shmobile_boot_size;
extern void shmobile_smp_boot(void);
extern void shmobile_smp_sleep(void);
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index fa5248c52399..5e503d91ad70 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -38,9 +38,3 @@ ENTRY(shmobile_boot_scu)
b secondary_startup
ENDPROC(shmobile_boot_scu)
-
- .text
- .align 2
- .globl shmobile_scu_base
-shmobile_scu_base:
- .space 4
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 330c1fc63197..32e0bf6e3ccb 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -24,7 +24,6 @@
.arm
.align 12
ENTRY(shmobile_boot_vector)
- ldr r0, 2f
ldr r1, 1f
bx r1
@@ -34,9 +33,6 @@ ENDPROC(shmobile_boot_vector)
.globl shmobile_boot_fn
shmobile_boot_fn:
1: .space 4
- .globl shmobile_boot_arg
-shmobile_boot_arg:
-2: .space 4
.globl shmobile_boot_size
shmobile_boot_size:
.long . - shmobile_boot_vector
@@ -46,13 +42,15 @@ shmobile_boot_size:
*/
ENTRY(shmobile_smp_boot)
- @ r0 = MPIDR_HWID_BITMASK
mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR
- and r0, r1, r0 @ r0 = cpu_logical_map() value
+ and r0, r1, #0xffffff @ MPIDR_HWID_BITMASK
+ @ r0 = cpu_logical_map() value
mov r1, #0 @ r1 = CPU index
- adr r5, 1f @ array of per-cpu mpidr values
- adr r6, 2f @ array of per-cpu functions
- adr r7, 3f @ array of per-cpu arguments
+ adr r2, 1f
+ ldmia r2, {r5, r6, r7}
+ add r5, r5, r2 @ array of per-cpu mpidr values
+ add r6, r6, r2 @ array of per-cpu functions
+ add r7, r7, r2 @ array of per-cpu arguments
shmobile_smp_boot_find_mpidr:
ldr r8, [r5, r1, lsl #2]
@@ -80,12 +78,18 @@ ENTRY(shmobile_smp_sleep)
b shmobile_smp_boot
ENDPROC(shmobile_smp_sleep)
+ .align 2
+1: .long shmobile_smp_mpidr - .
+ .long shmobile_smp_fn - 1b
+ .long shmobile_smp_arg - 1b
+
+ .bss
.globl shmobile_smp_mpidr
shmobile_smp_mpidr:
-1: .space NR_CPUS * 4
+ .space NR_CPUS * 4
.globl shmobile_smp_fn
shmobile_smp_fn:
-2: .space NR_CPUS * 4
+ .space NR_CPUS * 4
.globl shmobile_smp_arg
shmobile_smp_arg:
-3: .space NR_CPUS * 4
+ .space NR_CPUS * 4
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
index 911884f7e28b..aba75c89f9c1 100644
--- a/arch/arm/mach-shmobile/platsmp-apmu.c
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -123,7 +123,6 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
{
/* install boot code shared by all CPUs */
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
- shmobile_boot_arg = MPIDR_HWID_BITMASK;
/* perform per-cpu setup */
apmu_parse_cfg(apmu_init_cpu, apmu_config, num);
diff --git a/arch/arm/mach-shmobile/platsmp-scu.c b/arch/arm/mach-shmobile/platsmp-scu.c
index 64663110ab6c..081a097c9219 100644
--- a/arch/arm/mach-shmobile/platsmp-scu.c
+++ b/arch/arm/mach-shmobile/platsmp-scu.c
@@ -17,6 +17,9 @@
#include <asm/smp_scu.h>
#include "common.h"
+
+void __iomem *shmobile_scu_base;
+
static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
@@ -41,7 +44,6 @@ void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
{
/* install boot code shared by all CPUs */
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
- shmobile_boot_arg = MPIDR_HWID_BITMASK;
/* enable SCU and cache coherency on booting CPU */
scu_enable(shmobile_scu_base);
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index b854fe2095ad..0b024a9dbd43 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -92,8 +92,6 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
{
/* Map the reset vector (in headsmp-scu.S, headsmp.S) */
__raw_writel(__pa(shmobile_boot_vector), AVECR);
- shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
- shmobile_boot_arg = (unsigned long)shmobile_scu_base;
/* setup r8a7779 specific SCU bits */
shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index 49d1110cff53..52db8bf7e153 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -17,23 +17,25 @@
*
*/
+#include <linux/property.h>
#include <linux/gpio/machine.h>
#include <linux/platform_device.h>
-#include <linux/rfkill-gpio.h>
#include "board.h"
-static struct rfkill_gpio_platform_data wifi_rfkill_platform_data = {
- .name = "wifi_rfkill",
- .type = RFKILL_TYPE_WLAN,
+static struct property_entry __initdata wifi_rfkill_prop[] = {
+ PROPERTY_ENTRY_STRING("name", "wifi_rfkill"),
+ PROPERTY_ENTRY_STRING("type", "wlan"),
+ { },
+};
+
+static struct property_set __initdata wifi_rfkill_pset = {
+ .properties = wifi_rfkill_prop,
};
static struct platform_device wifi_rfkill_device = {
.name = "rfkill_gpio",
.id = -1,
- .dev = {
- .platform_data = &wifi_rfkill_platform_data,
- },
};
static struct gpiod_lookup_table wifi_gpio_lookup = {
@@ -47,6 +49,7 @@ static struct gpiod_lookup_table wifi_gpio_lookup = {
void __init tegra_paz00_wifikill_init(void)
{
+ platform_device_add_properties(&wifi_rfkill_device, &wifi_rfkill_pset);
gpiod_add_lookup_table(&wifi_gpio_lookup);
platform_device_register(&wifi_rfkill_device);
}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 4b4058db0781..66353caa35b9 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -173,7 +173,7 @@ unsigned long arch_mmap_rnd(void)
{
unsigned long rnd;
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index cf30daff8932..d19b1ad29b07 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -49,6 +49,9 @@ static int change_memory_common(unsigned long addr, int numpages,
WARN_ON_ONCE(1);
}
+ if (!numpages)
+ return 0;
+
if (start < MODULES_VADDR || start >= MODULES_END)
return -EINVAL;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index bf464de33f52..f50608674580 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -34,13 +34,13 @@
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*
- * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
+ * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
* (rounded up to PUD_SIZE).
* VMALLOC_START: beginning of the kernel VA space
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules
*/
-#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
+#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
#ifndef CONFIG_KASAN
#define VMALLOC_START (VA_START)
@@ -51,7 +51,8 @@
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
-#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
+#define VMEMMAP_START (VMALLOC_END + SZ_64K)
+#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index fcb778899a38..9e54ad7c240a 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -194,7 +194,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
/**
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 9142e082f5f3..5dd2a26444ec 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -149,16 +149,6 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
switch (nr_pri_bits) {
case 7:
- write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
- write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
- case 6:
- write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
- default:
- write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
- }
-
- switch (nr_pri_bits) {
- case 7:
write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
case 6:
@@ -167,6 +157,16 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
}
+ switch (nr_pri_bits) {
+ case 7:
+ write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
+ write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
+ case 6:
+ write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
+ default:
+ write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
+ }
+
switch (max_lr_idx) {
case 15:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f3b061e67bfe..7802f216a67a 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -319,8 +319,8 @@ void __init mem_init(void)
#endif
MLG(VMALLOC_START, VMALLOC_END),
#ifdef CONFIG_SPARSEMEM_VMEMMAP
- MLG((unsigned long)vmemmap,
- (unsigned long)vmemmap + VMEMMAP_SIZE),
+ MLG(VMEMMAP_START,
+ VMEMMAP_START + VMEMMAP_SIZE),
MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)),
#endif
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 4c893b5189dd..232f787a088a 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -53,10 +53,10 @@ unsigned long arch_mmap_rnd(void)
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT))
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else
#endif
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 8c6d76c9b2d6..d9907e57e9b9 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -270,7 +270,7 @@ uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
}
EXPORT_SYMBOL(jz_gpio_port_get_value);
-#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
+#define IRQ_TO_BIT(irq) BIT((irq - JZ4740_IRQ_GPIO(0)) & 0x1f)
static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
{
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index 5ce3b746cedc..b4ac6374a38f 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -125,7 +125,7 @@ LEAF(_restore_fp_context)
END(_restore_fp_context)
.set reorder
- .type fault@function
+ .type fault, @function
.ent fault
fault: li v0, -EFAULT
jr ra
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index f09546ee2cdc..17732f876eff 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -358,7 +358,7 @@ LEAF(_restore_msa_all_upper)
.set reorder
- .type fault@function
+ .type fault, @function
.ent fault
fault: li v0, -EFAULT # failure
jr ra
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index ae790c575d4f..bf14da9f3e33 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
asmlinkage void do_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
- siginfo_t info;
+ siginfo_t info = {
+ .si_signo = SIGFPE,
+ .si_code = FPE_INTOVF,
+ .si_addr = (void __user *)regs->cp0_epc,
+ };
prev_state = exception_enter();
die_if_kernel("Integer overflow", regs);
- info.si_code = FPE_INTOVF;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
exception_exit(prev_state);
}
@@ -874,7 +874,7 @@ out:
void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str)
{
- siginfo_t info;
+ siginfo_t info = { 0 };
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
else
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
- info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 8bc3977576e6..3110447ab1e9 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr;
- return copy_to_user(uaddr, vs, 16);
+ return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
} else {
return -EINVAL;
}
@@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr;
- return copy_from_user(vs, uaddr, 16);
+ return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
} else {
return -EINVAL;
}
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 5c81fdd032c3..353037699512 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -146,7 +146,7 @@ unsigned long arch_mmap_rnd(void)
{
unsigned long rnd;
- rnd = (unsigned long)get_random_int();
+ rnd = get_random_long();
rnd <<= PAGE_SHIFT;
if (TASK_IS_32BIT_ADDR)
rnd &= 0xfffffful;
@@ -174,7 +174,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
static inline unsigned long brk_rnd(void)
{
- unsigned long rnd = get_random_int();
+ unsigned long rnd = get_random_long();
rnd = rnd << PAGE_SHIFT;
/* 8MB for 32bit, 256MB for 64bit */
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 249647578e58..91dec32c77b7 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void)
sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
- c->scache.sets = 64 << sets;
+ if (sets)
+ c->scache.sets = 64 << sets;
line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
- c->scache.linesz = 2 << line_sz;
+ if (line_sz)
+ c->scache.linesz = 2 << line_sz;
assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
@@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void)
c->scache.waysize = c->scache.sets * c->scache.linesz;
c->scache.waybit = __ffs(c->scache.waysize);
- c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+ if (c->scache.linesz) {
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+ return 1;
+ }
- return 1;
+ return 0;
}
static inline int __init mips_sc_probe(void)
diff --git a/arch/parisc/include/asm/floppy.h b/arch/parisc/include/asm/floppy.h
index f84ff12574b7..6d8276cd25ca 100644
--- a/arch/parisc/include/asm/floppy.h
+++ b/arch/parisc/include/asm/floppy.h
@@ -33,7 +33,7 @@
* floppy accesses go through the track buffer.
*/
#define _CROSS_64KB(a,s,vdma) \
-(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 35bdccbb2036..b75039f92116 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -361,8 +361,9 @@
#define __NR_membarrier (__NR_Linux + 343)
#define __NR_userfaultfd (__NR_Linux + 344)
#define __NR_mlock2 (__NR_Linux + 345)
+#define __NR_copy_file_range (__NR_Linux + 346)
-#define __NR_Linux_syscalls (__NR_mlock2 + 1)
+#define __NR_Linux_syscalls (__NR_copy_file_range + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 9585c81f755f..ce0b2b4075c7 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret = 0;
-
/* Do the secure computing check first. */
secure_computing_strict(regs->gr[20]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- ret = -1L;
+ tracehook_report_syscall_entry(regs)) {
+ /*
+ * Tracing decided this syscall should not happen or the
+ * debugger stored an invalid system call number. Skip
+ * the system call and the system call restart handling.
+ */
+ regs->gr[20] = -1UL;
+ goto out;
+ }
#ifdef CONFIG_64BIT
if (!is_compat_task())
@@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
- return ret ? : regs->gr[20];
+out:
+ return regs->gr[20];
}
void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 3fbd7252a4b2..fbafa0d0e2bf 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -343,7 +343,7 @@ tracesys_next:
#endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0
- b,n .Lsyscall_nosys
+ b,n .Ltracesys_nosys
LDREGX %r20(%r19), %r19
@@ -359,6 +359,9 @@ tracesys_next:
be 0(%sr7,%r19)
ldo R%tracesys_exit(%r2),%r2
+.Ltracesys_nosys:
+ ldo -ENOSYS(%r0),%r28 /* set errno */
+
/* Do *not* call this function on the gateway page, because it
makes a direct call to syscall_trace. */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index d4ffcfbc9885..585d50fc75c0 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -441,6 +441,7 @@
ENTRY_SAME(membarrier)
ENTRY_SAME(userfaultfd)
ENTRY_SAME(mlock2) /* 345 */
+ ENTRY_SAME(copy_file_range)
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 301be3126ae3..650cfb31ea3d 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
eeh_pcid_put(dev);
if (driver->err_handler &&
driver->err_handler->error_detected &&
- driver->err_handler->slot_reset &&
- driver->err_handler->resume)
+ driver->err_handler->slot_reset)
return NULL;
}
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 05e804cdecaa..aec9a1b1d25b 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -109,8 +109,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
* If the breakpoint is unregistered between a hw_breakpoint_handler()
* and the single_step_dabr_instruction(), then cleanup the breakpoint
* restoration variables to prevent dangling pointers.
+ * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
*/
- if (bp->ctx && bp->ctx->task)
+ if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
bp->ctx->task->thread.last_hit_ubp = NULL;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index dccc87e8fee5..3c5736e52a14 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1768,9 +1768,9 @@ static inline unsigned long brk_rnd(void)
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+ rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
else
- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+ rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
return rnd << PAGE_SHIFT;
}
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 0762c1e08c88..edb09912f0c9 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -111,7 +111,13 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
*/
if (!(old_pte & _PAGE_COMBO)) {
flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
- old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND;
+ /*
+ * clear the old slot details from the old and new pte.
+ * On hash insert failure we use old pte value and we don't
+ * want slot information there if we have a insert failure.
+ */
+ old_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
+ new_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
goto htab_insert_hpte;
}
/*
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 49b152b0f926..eb2accdd76fd 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -78,9 +78,19 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* base page size. This is because demote_segment won't flush
* hash page table entries.
*/
- if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
+ if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) {
flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
ssize, flags);
+ /*
+ * With THP, we also clear the slot information with
+ * respect to all the 64K hash pte mapping the 16MB
+ * page. They are all invalid now. This make sure we
+ * don't find the slot valid when we fault with 4k
+ * base page size.
+ *
+ */
+ memset(hpte_slot_array, 0, PTE_FRAG_SIZE);
+ }
}
valid = hpte_valid(hpte_slot_array, index);
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 7e6d0880813f..83a8be791e06 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -8,6 +8,8 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
+#include <asm/mmu.h>
+
#ifdef CONFIG_PPC_FSL_BOOK3E
#ifdef CONFIG_PPC64
static inline int tlb1_next(void)
@@ -60,6 +62,14 @@ static inline void book3e_tlb_lock(void)
unsigned long tmp;
int token = smp_processor_id() + 1;
+ /*
+ * Besides being unnecessary in the absence of SMT, this
+ * check prevents trying to do lbarx/stbcx. on e5500 which
+ * doesn't implement either feature.
+ */
+ if (!cpu_has_feature(CPU_FTR_SMT))
+ return;
+
asm volatile("1: lbarx %0, 0, %1;"
"cmpwi %0, 0;"
"bne 2f;"
@@ -80,6 +90,9 @@ static inline void book3e_tlb_unlock(void)
{
struct paca_struct *paca = get_paca();
+ if (!cpu_has_feature(CPU_FTR_SMT))
+ return;
+
isync();
paca->tcd_ptr->lock = 0;
}
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 0f0502e12f6c..4087705ba90f 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -59,9 +59,9 @@ unsigned long arch_mmap_rnd(void)
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
- rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT));
+ rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
else
- rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT));
+ rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
return rnd << PAGE_SHIFT;
}
diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
index ea91ddfe54eb..629c90865a07 100644
--- a/arch/s390/include/asm/fpu/internal.h
+++ b/arch/s390/include/asm/fpu/internal.h
@@ -40,6 +40,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
{
fpregs->pad = 0;
+ fpregs->fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
else
@@ -49,6 +50,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
{
+ fpu->fpc = fpregs->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
else
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 66c94417c0ba..4af60374eba0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
/* Restore high gprs from signal stack */
if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
- sizeof(&sregs_ext->gprs_high)))
+ sizeof(sregs_ext->gprs_high)))
return -EFAULT;
for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)&regs->gprs[i] = gprs_high[i];
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index eaee14637d93..8496a074bd0e 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -24,7 +24,13 @@ LDFLAGS := -m elf32_sparc
export BITS := 32
UTS_MACHINE := sparc
+# We are adding -Wa,-Av8 to KBUILD_CFLAGS to deal with a specs bug in some
+# versions of gcc. Some gcc versions won't pass -Av8 to binutils when you
+# give -mcpu=v8. This silently worked with older bintutils versions but
+# does not any more.
KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
+KBUILD_CFLAGS += -Wa,-Av8
+
KBUILD_AFLAGS += -m32 -Wa,-Av8
else
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index 1c26d440d288..b6de8b10a55b 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -422,8 +422,9 @@
#define __NR_listen 354
#define __NR_setsockopt 355
#define __NR_mlock2 356
+#define __NR_copy_file_range 357
-#define NR_syscalls 357
+#define NR_syscalls 358
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 33c02b15f478..a83707c83be8 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -948,7 +948,24 @@ linux_syscall_trace:
cmp %o0, 0
bne 3f
mov -ENOSYS, %o0
+
+ /* Syscall tracing can modify the registers. */
+ ld [%sp + STACKFRAME_SZ + PT_G1], %g1
+ sethi %hi(sys_call_table), %l7
+ ld [%sp + STACKFRAME_SZ + PT_I0], %i0
+ or %l7, %lo(sys_call_table), %l7
+ ld [%sp + STACKFRAME_SZ + PT_I1], %i1
+ ld [%sp + STACKFRAME_SZ + PT_I2], %i2
+ ld [%sp + STACKFRAME_SZ + PT_I3], %i3
+ ld [%sp + STACKFRAME_SZ + PT_I4], %i4
+ ld [%sp + STACKFRAME_SZ + PT_I5], %i5
+ cmp %g1, NR_syscalls
+ bgeu 3f
+ mov -ENOSYS, %o0
+
+ sll %g1, 2, %l4
mov %i0, %o0
+ ld [%l7 + %l4], %l7
mov %i1, %o1
mov %i2, %o2
mov %i3, %o3
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index afbaba52d2f1..d127130bf424 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -338,8 +338,9 @@ ENTRY(sun4v_mach_set_watchdog)
mov %o1, %o4
mov HV_FAST_MACH_SET_WATCHDOG, %o5
ta HV_FAST_TRAP
+ brnz,a,pn %o4, 0f
stx %o1, [%o4]
- retl
+0: retl
nop
ENDPROC(sun4v_mach_set_watchdog)
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index d88beff47bab..39aaec173f66 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
unsigned char fenab;
int err;
- flush_user_windows();
+ synchronize_user_stack();
if (get_thread_wsaved() ||
(((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
(!__access_ok(ucp, sizeof(*ucp))))
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
index a92d5d2c46a3..9e034f29dcc5 100644
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ b/arch/sparc/kernel/sparc_ksyms_64.c
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(sun4v_niagara_getperf);
EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
+EXPORT_SYMBOL(sun4v_mach_set_watchdog);
/* from hweight.S */
EXPORT_SYMBOL(__arch_hweight8);
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c690c8e16a96..b489e9759518 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -264,7 +264,7 @@ static unsigned long mmap_rnd(void)
unsigned long rnd = 0UL;
if (current->flags & PF_RANDOMIZE) {
- unsigned long val = get_random_int();
+ unsigned long val = get_random_long();
if (test_thread_flag(TIF_32BIT))
rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
else
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index bb0008927598..c4a1b5c40e4e 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -158,7 +158,25 @@ linux_syscall_trace32:
add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f
mov -ENOSYS, %o0
+
+ /* Syscall tracing can modify the registers. */
+ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
+ sethi %hi(sys_call_table32), %l7
+ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
+ or %l7, %lo(sys_call_table32), %l7
+ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
+ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
+ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
+ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
+ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+ cmp %g1, NR_syscalls
+ bgeu,pn %xcc, 3f
+ mov -ENOSYS, %o0
+
+ sll %g1, 2, %l4
srl %i0, 0, %o0
+ lduw [%l7 + %l4], %l7
srl %i4, 0, %o4
srl %i1, 0, %o1
srl %i2, 0, %o2
@@ -170,7 +188,25 @@ linux_syscall_trace:
add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f
mov -ENOSYS, %o0
+
+ /* Syscall tracing can modify the registers. */
+ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
+ sethi %hi(sys_call_table64), %l7
+ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
+ or %l7, %lo(sys_call_table64), %l7
+ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
+ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
+ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
+ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
+ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+ cmp %g1, NR_syscalls
+ bgeu,pn %xcc, 3f
+ mov -ENOSYS, %o0
+
+ sll %g1, 2, %l4
mov %i0, %o0
+ lduw [%l7 + %l4], %l7
mov %i1, %o1
mov %i2, %o2
mov %i3, %o3
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index e663b6c78de2..6c3dd6c52f8b 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -88,4 +88,4 @@ sys_call_table:
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/ .long sys_setsockopt, sys_mlock2
+/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 1557121f4cdc..12b524cfcfa0 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -89,7 +89,7 @@ sys_call_table32:
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
- .word compat_sys_setsockopt, sys_mlock2
+ .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
#endif /* CONFIG_COMPAT */
@@ -170,4 +170,4 @@ sys_call_table:
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
- .word sys_setsockopt, sys_mlock2
+ .word sys_setsockopt, sys_mlock2, sys_copy_file_range
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 9bdf67a092a5..b60a9f8cda75 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -12,6 +12,7 @@
#include <skas.h>
void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
static void kill_off_processes(void)
{
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index fc8be0e3a4ff..57acbd67d85d 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
struct ksignal ksig;
int handled_sig = 0;
- if (get_signal(&ksig)) {
+ while (get_signal(&ksig)) {
handled_sig = 1;
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 77d8c5112900..bb3e376d0f33 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -294,6 +294,7 @@ sysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
pushl %ebp /* pt_regs->sp (stashed in bp) */
pushfl /* pt_regs->flags (except IF = 0) */
+ ASM_CLAC /* Clear AC after saving FLAGS */
orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
pushl $__USER_CS /* pt_regs->cs */
pushl $0 /* pt_regs->ip = 0 (placeholder) */
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index ff1c6d61f332..3c990eeee40b 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -261,6 +261,7 @@ ENTRY(entry_INT80_compat)
* Interrupts are off on entry.
*/
PARAVIRT_ADJUST_EXCEPTION_FRAME
+ ASM_CLAC /* Do this early to minimize exposure */
SWAPGS
/*
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 46873fbd44e1..d08eacd298c2 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+extern bool mp_should_keep_irq(struct device *dev);
+
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index f5dcb5204dcd..3fe0eac59462 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -48,20 +48,28 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
switch (n) {
case 1:
+ __uaccess_begin();
__put_user_size(*(u8 *)from, (u8 __user *)to,
1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__put_user_size(*(u16 *)from, (u16 __user *)to,
2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__put_user_size(*(u32 *)from, (u32 __user *)to,
4, ret, 4);
+ __uaccess_end();
return ret;
case 8:
+ __uaccess_begin();
__put_user_size(*(u64 *)from, (u64 __user *)to,
8, ret, 8);
+ __uaccess_end();
return ret;
}
}
@@ -103,13 +111,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
switch (n) {
case 1:
+ __uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
@@ -148,13 +162,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
switch (n) {
case 1:
+ __uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
@@ -170,13 +190,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
switch (n) {
case 1:
+ __uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 968d57dd54c9..f320ee32d5a1 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -57,7 +57,7 @@ static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
{
if (xen_pci_frontend && xen_pci_frontend->enable_msi)
return xen_pci_frontend->enable_msi(dev, vectors);
- return -ENODEV;
+ return -ENOSYS;
}
static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
{
@@ -69,7 +69,7 @@ static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
{
if (xen_pci_frontend && xen_pci_frontend->enable_msix)
return xen_pci_frontend->enable_msix(dev, vectors, nvec);
- return -ENODEV;
+ return -ENOSYS;
}
static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
{
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index d1daead5fcdd..adb3eaf8fe2a 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -16,6 +16,7 @@
#include <asm/cacheflush.h>
#include <asm/realmode.h>
+#include <linux/ftrace.h>
#include "../../realmode/rm/wakeup.h"
#include "sleep.h"
@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */
+ /*
+ * Pause/unpause graph tracing around do_suspend_lowlevel as it has
+ * inconsistent call/return info after it jumps to the wakeup vector.
+ */
+ pause_graph_tracing();
do_suspend_lowlevel();
+ unpause_graph_tracing();
return 0;
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1505587d06e9..b9b09fec173b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea;
- *linear = la;
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
+ *linear = la;
if (is_noncanonical_address(la))
goto bad;
@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
goto bad;
break;
default:
+ *linear = la = (u32)la;
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg);
if (!usable)
@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
if (size > *max_size)
goto bad;
}
- la &= (u32)-1;
break;
}
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c9fed957cce..2ce4f05e81d3 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
return ret;
kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
- walker->ptes[level] = pte;
+ walker->ptes[level - 1] = pte;
}
return 0;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e2951b6edbbc..0ff453749a90 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -596,6 +596,8 @@ struct vcpu_vmx {
/* Support for PML */
#define PML_ENTITY_NUM 512
struct page *pml_pg;
+
+ u64 current_tsc_ratio;
};
enum segment_cache_field {
@@ -2127,14 +2129,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
- /* Setup TSC multiplier */
- if (cpu_has_vmx_tsc_scaling())
- vmcs_write64(TSC_MULTIPLIER,
- vcpu->arch.tsc_scaling_ratio);
-
vmx->loaded_vmcs->cpu = cpu;
}
+ /* Setup TSC multiplier */
+ if (kvm_has_tsc_control &&
+ vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
+ vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
+ vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+ }
+
vmx_vcpu_pi_load(vcpu, cpu);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4244c2baf57d..eaf6ee8c28b8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6618,12 +6618,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* KVM_DEBUGREG_WONT_EXIT again.
*/
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
- int i;
-
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
kvm_x86_ops->sync_dirty_debug_regs(vcpu);
- for (i = 0; i < KVM_NR_DB_REGS; i++)
- vcpu->arch.eff_db[i] = vcpu->arch.db[i];
+ kvm_update_dr0123(vcpu);
+ kvm_update_dr6(vcpu);
+ kvm_update_dr7(vcpu);
+ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
/*
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 96bd1e2bffaf..72bb52f93c3d 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -71,12 +71,12 @@ unsigned long arch_mmap_rnd(void)
if (mmap_is_ia32())
#ifdef CONFIG_COMPAT
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
#else
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
#endif
else
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b2fd67da1701..ef05755a1900 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
break;
}
- if (regno > nr_registers) {
+ if (regno >= nr_registers) {
WARN_ONCE(1, "decoded an instruction with an invalid register");
return -EINVAL;
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 2440814b0069..9cf96d82147a 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -419,24 +419,30 @@ pmd_t *lookup_pmd_address(unsigned long address)
phys_addr_t slow_virt_to_phys(void *__virt_addr)
{
unsigned long virt_addr = (unsigned long)__virt_addr;
- unsigned long phys_addr, offset;
+ phys_addr_t phys_addr;
+ unsigned long offset;
enum pg_level level;
pte_t *pte;
pte = lookup_address(virt_addr, &level);
BUG_ON(!pte);
+ /*
+ * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
+ * before being left-shifted PAGE_SHIFT bits -- this trick is to
+ * make 32-PAE kernel work correctly.
+ */
switch (level) {
case PG_LEVEL_1G:
- phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
+ phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PUD_PAGE_MASK;
break;
case PG_LEVEL_2M:
- phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
+ phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PMD_PAGE_MASK;
break;
default:
- phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
+ phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
offset = virt_addr & ~PAGE_MASK;
}
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2879efc73a96..d34b5118b4e8 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -711,28 +711,22 @@ int pcibios_add_device(struct pci_dev *dev)
return 0;
}
-int pcibios_alloc_irq(struct pci_dev *dev)
+int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- /*
- * If the PCI device was already claimed by core code and has
- * MSI enabled, probing of the pcibios IRQ will overwrite
- * dev->irq. So bail out if MSI is already enabled.
- */
- if (pci_dev_msi_enabled(dev))
- return -EBUSY;
+ int err;
- return pcibios_enable_irq(dev);
-}
+ if ((err = pci_enable_resources(dev, mask)) < 0)
+ return err;
-void pcibios_free_irq(struct pci_dev *dev)
-{
- if (pcibios_disable_irq)
- pcibios_disable_irq(dev);
+ if (!pci_dev_msi_enabled(dev))
+ return pcibios_enable_irq(dev);
+ return 0;
}
-int pcibios_enable_device(struct pci_dev *dev, int mask)
+void pcibios_disable_device (struct pci_dev *dev)
{
- return pci_enable_resources(dev, mask);
+ if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
+ pcibios_disable_irq(dev);
}
int pci_ext_cfg_avail(void)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 0d24e7c10145..8b93e634af84 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -215,7 +215,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
int polarity;
int ret;
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
switch (intel_mid_identify_cpu()) {
@@ -256,13 +256,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
- if (pci_has_managed_irq(dev)) {
+ if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
+ dev->irq > 0) {
mp_unmap_irq(dev->irq);
dev->irq_managed = 0;
- /*
- * Don't reset dev->irq here, otherwise
- * intel_mid_pci_irq_enable() will fail on next call.
- */
}
}
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 32e70343e6fd..9bd115484745 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1202,7 +1202,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
struct pci_dev *temp_dev;
int irq;
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
@@ -1230,7 +1230,8 @@ static int pirq_enable_irq(struct pci_dev *dev)
}
dev = temp_dev;
if (irq >= 0) {
- pci_set_managed_irq(dev, irq);
+ dev->irq_managed = 1;
+ dev->irq = irq;
dev_info(&dev->dev, "PCI->APIC IRQ transform: "
"INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
return 0;
@@ -1256,10 +1257,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0;
}
+bool mp_should_keep_irq(struct device *dev)
+{
+ if (dev->power.is_prepared)
+ return true;
+#ifdef CONFIG_PM
+ if (dev->power.runtime_status == RPM_SUSPENDING)
+ return true;
+#endif
+
+ return false;
+}
+
static void pirq_disable_irq(struct pci_dev *dev)
{
- if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) {
+ if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
+ dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
- pci_reset_managed_irq(dev);
+ dev->irq = 0;
+ dev->irq_managed = 0;
}
}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index ff31ab464213..beac4dfdade6 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -196,7 +196,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 0;
error:
- dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+ if (ret == -ENOSYS)
+ dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+ else if (ret)
+ dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
free:
kfree(v);
return ret;
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
index c61b6c332e97..bfadcd0f4944 100644
--- a/arch/x86/platform/intel-quark/imr.c
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -592,14 +592,14 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
end = (unsigned long)__end_rodata - 1;
/*
- * Setup a locked IMR around the physical extent of the kernel
+ * Setup an unlocked IMR around the physical extent of the kernel
* from the beginning of the .text secton to the end of the
* .rodata section as one physically contiguous block.
*
* We don't round up @size since it is already PAGE_SIZE aligned.
* See vmlinux.lds.S for details.
*/
- ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
+ ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
if (ret < 0) {
pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
size / 1024, start, end);
diff --git a/arch/x86/um/os-Linux/task_size.c b/arch/x86/um/os-Linux/task_size.c
index 8502ad30e61b..5adb6a2fd117 100644
--- a/arch/x86/um/os-Linux/task_size.c
+++ b/arch/x86/um/os-Linux/task_size.c
@@ -109,7 +109,7 @@ unsigned long os_get_top_address(void)
exit(1);
}
- printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT);
+ printf("0x%lx\n", bottom << UM_KERN_PAGE_SHIFT);
printf("Locating the top of the address space ... ");
fflush(stdout);
@@ -134,7 +134,7 @@ out:
exit(1);
}
top <<= UM_KERN_PAGE_SHIFT;
- printf("0x%x\n", top);
+ printf("0x%lx\n", top);
return top;
}
diff --git a/block/Kconfig b/block/Kconfig
index 161491d0a879..0363cd731320 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -88,6 +88,19 @@ config BLK_DEV_INTEGRITY
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
+config BLK_DEV_DAX
+ bool "Block device DAX support"
+ depends on FS_DAX
+ depends on BROKEN
+ help
+ When DAX support is available (CONFIG_FS_DAX) raw block
+ devices can also support direct userspace access to the
+ storage capacity via MMAP(2) similar to a file on a
+ DAX-enabled filesystem. However, the DAX I/O-path disables
+ some standard I/O-statistics, and the MMAP(2) path has some
+ operational differences due to bypassing the page
+ cache. If in doubt, say N.
+
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
diff --git a/block/blk-map.c b/block/blk-map.c
index f565e11f465a..a54f0543b956 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -57,6 +57,49 @@ static int __blk_rq_unmap_user(struct bio *bio)
return ret;
}
+static int __blk_rq_map_user_iov(struct request *rq,
+ struct rq_map_data *map_data, struct iov_iter *iter,
+ gfp_t gfp_mask, bool copy)
+{
+ struct request_queue *q = rq->q;
+ struct bio *bio, *orig_bio;
+ int ret;
+
+ if (copy)
+ bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
+ else
+ bio = bio_map_user_iov(q, iter, gfp_mask);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ if (map_data && map_data->null_mapped)
+ bio_set_flag(bio, BIO_NULL_MAPPED);
+
+ iov_iter_advance(iter, bio->bi_iter.bi_size);
+ if (map_data)
+ map_data->offset += bio->bi_iter.bi_size;
+
+ orig_bio = bio;
+ blk_queue_bounce(q, &bio);
+
+ /*
+ * We link the bounce buffer in and could have to traverse it
+ * later so we have to get a ref to prevent it from being freed
+ */
+ bio_get(bio);
+
+ ret = blk_rq_append_bio(q, rq, bio);
+ if (ret) {
+ bio_endio(bio);
+ __blk_rq_unmap_user(orig_bio);
+ bio_put(bio);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
@@ -82,10 +125,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)
{
- struct bio *bio;
- int unaligned = 0;
- struct iov_iter i;
struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
+ bool copy = (q->dma_pad_mask & iter->count) || map_data;
+ struct bio *bio = NULL;
+ struct iov_iter i;
+ int ret;
if (!iter || !iter->count)
return -EINVAL;
@@ -101,42 +145,29 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
*/
if ((uaddr & queue_dma_alignment(q)) ||
iovec_gap_to_prv(q, &prv, &iov))
- unaligned = 1;
+ copy = true;
prv.iov_base = iov.iov_base;
prv.iov_len = iov.iov_len;
}
- if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
- bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
- else
- bio = bio_map_user_iov(q, iter, gfp_mask);
-
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- if (map_data && map_data->null_mapped)
- bio_set_flag(bio, BIO_NULL_MAPPED);
-
- if (bio->bi_iter.bi_size != iter->count) {
- /*
- * Grab an extra reference to this bio, as bio_unmap_user()
- * expects to be able to drop it twice as it happens on the
- * normal IO completion path
- */
- bio_get(bio);
- bio_endio(bio);
- __blk_rq_unmap_user(bio);
- return -EINVAL;
- }
+ i = *iter;
+ do {
+ ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
+ if (ret)
+ goto unmap_rq;
+ if (!bio)
+ bio = rq->bio;
+ } while (iov_iter_count(&i));
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
-
- blk_queue_bounce(q, &bio);
- bio_get(bio);
- blk_rq_bio_prep(q, rq, bio);
return 0;
+
+unmap_rq:
+ __blk_rq_unmap_user(bio);
+ rq->bio = NULL;
+ return -EINVAL;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 888a7fec81f7..261353166dcf 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -304,7 +304,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
struct bio_vec end_bv = { NULL }, nxt_bv;
- struct bvec_iter iter;
if (!blk_queue_cluster(q))
return 0;
@@ -316,11 +315,8 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
if (!bio_has_data(bio))
return 1;
- bio_for_each_segment(end_bv, bio, iter)
- if (end_bv.bv_len == iter.bi_size)
- break;
-
- nxt_bv = bio_iovec(nxt);
+ bio_get_last_bvec(bio, &end_bv);
+ bio_get_first_bvec(nxt, &nxt_bv);
if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
return 0;
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index ad6d8c6b777e..35947ac87644 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -469,37 +469,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
nfit_mem->bdw = NULL;
}
-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
{
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
struct nfit_memdev *nfit_memdev;
struct nfit_flush *nfit_flush;
- struct nfit_dcr *nfit_dcr;
struct nfit_bdw *nfit_bdw;
struct nfit_idt *nfit_idt;
u16 idt_idx, range_index;
- list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
- if (nfit_dcr->dcr->region_index != dcr)
- continue;
- nfit_mem->dcr = nfit_dcr->dcr;
- break;
- }
-
- if (!nfit_mem->dcr) {
- dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
- spa->range_index, __to_nfit_memdev(nfit_mem)
- ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
- return -ENODEV;
- }
-
- /*
- * We've found enough to create an nvdimm, optionally
- * find an associated BDW
- */
- list_add(&nfit_mem->list, &acpi_desc->dimms);
-
list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
if (nfit_bdw->bdw->region_index != dcr)
continue;
@@ -508,12 +487,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
}
if (!nfit_mem->bdw)
- return 0;
+ return;
nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
if (!nfit_mem->spa_bdw)
- return 0;
+ return;
range_index = nfit_mem->spa_bdw->range_index;
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -538,8 +517,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
}
break;
}
-
- return 0;
}
static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -548,7 +525,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, *found;
struct nfit_memdev *nfit_memdev;
int type = nfit_spa_type(spa);
- u16 dcr;
switch (type) {
case NFIT_SPA_DCR:
@@ -559,14 +535,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
}
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
- int rc;
+ struct nfit_dcr *nfit_dcr;
+ u32 device_handle;
+ u16 dcr;
if (nfit_memdev->memdev->range_index != spa->range_index)
continue;
found = NULL;
dcr = nfit_memdev->memdev->region_index;
+ device_handle = nfit_memdev->memdev->device_handle;
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
- if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
+ if (__to_nfit_memdev(nfit_mem)->device_handle
+ == device_handle) {
found = nfit_mem;
break;
}
@@ -579,6 +559,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
if (!nfit_mem)
return -ENOMEM;
INIT_LIST_HEAD(&nfit_mem->list);
+ list_add(&nfit_mem->list, &acpi_desc->dimms);
+ }
+
+ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+ if (nfit_dcr->dcr->region_index != dcr)
+ continue;
+ /*
+ * Record the control region for the dimm. For
+ * the ACPI 6.1 case, where there are separate
+ * control regions for the pmem vs blk
+ * interfaces, be sure to record the extended
+ * blk details.
+ */
+ if (!nfit_mem->dcr)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ else if (nfit_mem->dcr->windows == 0
+ && nfit_dcr->dcr->windows)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ break;
+ }
+
+ if (dcr && !nfit_mem->dcr) {
+ dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
+ spa->range_index, dcr);
+ return -ENODEV;
}
if (type == NFIT_SPA_DCR) {
@@ -595,6 +600,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
nfit_mem->idt_dcr = nfit_idt->idt;
break;
}
+ nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
} else {
/*
* A single dimm may belong to multiple SPA-PM
@@ -603,13 +609,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
*/
nfit_mem->memdev_pmem = nfit_memdev->memdev;
}
-
- if (found)
- continue;
-
- rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
- if (rc)
- return rc;
}
return 0;
@@ -1504,9 +1503,7 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
case 1:
/* ARS unsupported, but we should never get here */
return 0;
- case 2:
- return -EINVAL;
- case 3:
+ case 6:
/* ARS is in progress */
msleep(1000);
break;
@@ -1517,13 +1514,13 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
}
static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
- struct nd_cmd_ars_status *cmd)
+ struct nd_cmd_ars_status *cmd, u32 size)
{
int rc;
while (1) {
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
- sizeof(*cmd));
+ size);
if (rc || cmd->status & 0xffff)
return -ENXIO;
@@ -1538,6 +1535,8 @@ static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
case 2:
/* No ARS performed for the current boot */
return 0;
+ case 3:
+ /* TODO: error list overflow support */
default:
return -ENXIO;
}
@@ -1581,6 +1580,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
struct nd_cmd_ars_start *ars_start = NULL;
struct nd_cmd_ars_cap *ars_cap = NULL;
u64 start, len, cur, remaining;
+ u32 ars_status_size;
int rc;
ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
@@ -1590,14 +1590,21 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
start = ndr_desc->res->start;
len = ndr_desc->res->end - ndr_desc->res->start + 1;
+ /*
+ * If ARS is unimplemented, unsupported, or if the 'Persistent Memory
+ * Scrub' flag in extended status is not set, skip this but continue
+ * initialization
+ */
rc = ars_get_cap(nd_desc, ars_cap, start, len);
+ if (rc == -ENOTTY) {
+ dev_dbg(acpi_desc->dev,
+ "Address Range Scrub is not implemented, won't create an error list\n");
+ rc = 0;
+ goto out;
+ }
if (rc)
goto out;
- /*
- * If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
- * extended status is not set, skip this but continue initialization
- */
if ((ars_cap->status & 0xffff) ||
!(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
dev_warn(acpi_desc->dev,
@@ -1610,14 +1617,14 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
* Check if a full-range ARS has been run. If so, use those results
* without having to start a new ARS.
*/
- ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status),
- GFP_KERNEL);
+ ars_status_size = ars_cap->max_ars_out;
+ ars_status = kzalloc(ars_status_size, GFP_KERNEL);
if (!ars_status) {
rc = -ENOMEM;
goto out;
}
- rc = ars_get_status(nd_desc, ars_status);
+ rc = ars_get_status(nd_desc, ars_status, ars_status_size);
if (rc)
goto out;
@@ -1647,7 +1654,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
if (rc)
goto out;
- rc = ars_get_status(nd_desc, ars_status);
+ rc = ars_get_status(nd_desc, ars_status, ars_status_size);
if (rc)
goto out;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index d30184c7f3bc..c8e169e46673 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -406,7 +406,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
entry = acpi_pci_irq_lookup(dev, pin);
@@ -451,7 +451,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
kfree(entry);
return rc;
}
- pci_set_managed_irq(dev, rc);
+ dev->irq = rc;
+ dev->irq_managed = 1;
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -474,9 +475,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
u8 pin;
pin = dev->pin;
- if (!pin || !pci_has_managed_irq(dev))
+ if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
+ /* Keep IOAPIC pin configuration when suspending */
+ if (dev->dev.power.is_prepared)
+ return;
+#ifdef CONFIG_PM
+ if (dev->dev.power.runtime_status == RPM_SUSPENDING)
+ return;
+#endif
+
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
return;
@@ -496,6 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- pci_reset_managed_irq(dev);
+ dev->irq_managed = 0;
}
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index fa2863567eed..ededa909df2f 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -4,7 +4,6 @@
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2002 Dominik Brodowski <[email protected]>
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -438,6 +437,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
* enabled system.
*/
+#define ACPI_MAX_IRQS 256
#define ACPI_MAX_ISA_IRQ 16
#define PIRQ_PENALTY_PCI_AVAILABLE (0)
@@ -447,7 +447,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
-static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
+static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
@@ -464,68 +464,9 @@ static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */
+ /* >IRQ15 */
};
-struct irq_penalty_info {
- int irq;
- int penalty;
- struct list_head node;
-};
-
-static LIST_HEAD(acpi_irq_penalty_list);
-
-static int acpi_irq_get_penalty(int irq)
-{
- struct irq_penalty_info *irq_info;
-
- if (irq < ACPI_MAX_ISA_IRQ)
- return acpi_irq_isa_penalty[irq];
-
- list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
- if (irq_info->irq == irq)
- return irq_info->penalty;
- }
-
- return 0;
-}
-
-static int acpi_irq_set_penalty(int irq, int new_penalty)
-{
- struct irq_penalty_info *irq_info;
-
- /* see if this is a ISA IRQ */
- if (irq < ACPI_MAX_ISA_IRQ) {
- acpi_irq_isa_penalty[irq] = new_penalty;
- return 0;
- }
-
- /* next, try to locate from the dynamic list */
- list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
- if (irq_info->irq == irq) {
- irq_info->penalty = new_penalty;
- return 0;
- }
- }
-
- /* nope, let's allocate a slot for this IRQ */
- irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL);
- if (!irq_info)
- return -ENOMEM;
-
- irq_info->irq = irq;
- irq_info->penalty = new_penalty;
- list_add_tail(&irq_info->node, &acpi_irq_penalty_list);
-
- return 0;
-}
-
-static void acpi_irq_add_penalty(int irq, int penalty)
-{
- int curpen = acpi_irq_get_penalty(irq);
-
- acpi_irq_set_penalty(irq, curpen + penalty);
-}
-
int __init acpi_irq_penalty_init(void)
{
struct acpi_pci_link *link;
@@ -546,16 +487,15 @@ int __init acpi_irq_penalty_init(void)
link->irq.possible_count;
for (i = 0; i < link->irq.possible_count; i++) {
- if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) {
- int irqpos = link->irq.possible[i];
-
- acpi_irq_add_penalty(irqpos, penalty);
- }
+ if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
+ acpi_irq_penalty[link->irq.
+ possible[i]] +=
+ penalty;
}
} else if (link->irq.active) {
- acpi_irq_add_penalty(link->irq.active,
- PIRQ_PENALTY_PCI_POSSIBLE);
+ acpi_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_POSSIBLE;
}
}
@@ -607,12 +547,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
* the use of IRQs 9, 10, 11, and >15.
*/
for (i = (link->irq.possible_count - 1); i >= 0; i--) {
- if (acpi_irq_get_penalty(irq) >
- acpi_irq_get_penalty(link->irq.possible[i]))
+ if (acpi_irq_penalty[irq] >
+ acpi_irq_penalty[link->irq.possible[i]])
irq = link->irq.possible[i];
}
}
- if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) {
+ if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
"Try pci=noacpi or acpi=off\n",
acpi_device_name(link->device),
@@ -628,8 +568,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device));
return -ENODEV;
} else {
- acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING);
-
+ acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -839,7 +778,7 @@ static void acpi_pci_link_remove(struct acpi_device *device)
}
/*
- * modify penalty from cmdline
+ * modify acpi_irq_penalty[] from cmdline
*/
static int __init acpi_irq_penalty_update(char *str, int used)
{
@@ -857,10 +796,13 @@ static int __init acpi_irq_penalty_update(char *str, int used)
if (irq < 0)
continue;
+ if (irq >= ARRAY_SIZE(acpi_irq_penalty))
+ continue;
+
if (used)
- acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED);
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
else
- acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE);
+ acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
if (retval != 2) /* no next number */
break;
@@ -877,15 +819,18 @@ static int __init acpi_irq_penalty_update(char *str, int used)
*/
void acpi_penalize_isa_irq(int irq, int active)
{
- if (irq >= 0)
- acpi_irq_add_penalty(irq, active ?
- PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (active)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
}
bool acpi_isa_irq_available(int irq)
{
- return irq >= 0 &&
- (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
+ return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+ acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
}
/*
@@ -895,18 +840,13 @@ bool acpi_isa_irq_available(int irq)
*/
void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
{
- int penalty;
-
- if (irq < 0)
- return;
-
- if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
- polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
- penalty = PIRQ_PENALTY_ISA_ALWAYS;
- else
- penalty = PIRQ_PENALTY_PCI_USING;
-
- acpi_irq_add_penalty(irq, penalty);
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
+ polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
}
/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a39e85f9efa9..7d00b7a015ea 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(cookie);
list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 546a3692774f..146dc0b8ec61 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1325,6 +1331,44 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{}
#endif
+#ifdef CONFIG_ARM64
+/*
+ * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
+ * Workaround is to make sure all pending IRQs are served before leaving
+ * handler.
+ */
+static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int rc = 0;
+ void __iomem *mmio;
+ u32 irq_stat, irq_masked;
+ unsigned int handled = 1;
+
+ VPRINTK("ENTER\n");
+ hpriv = host->private_data;
+ mmio = hpriv->mmio;
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ do {
+ irq_masked = irq_stat & hpriv->port_map;
+ spin_lock(&host->lock);
+ rc = ahci_handle_port_intr(host, irq_masked);
+ if (!rc)
+ handled = 0;
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ spin_unlock(&host->lock);
+ } while (irq_stat);
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+#endif
+
/*
* ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
* to single msi.
@@ -1560,6 +1604,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+#ifdef CONFIG_ARM64
+ if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
+ hpriv->irq_handler = ahci_thunderx_irq_handler;
+#endif
+
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index a44c75d4c284..167ba7e3b92e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -240,8 +240,7 @@ enum {
error-handling stage) */
AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
- AHCI_HFLAG_EDGE_IRQ = (1 << 19), /* HOST_IRQ_STAT behaves as
- Edge Triggered */
+
#ifdef CONFIG_PCI_MSI
AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
@@ -361,6 +360,7 @@ struct ahci_host_priv {
* be overridden anytime before the host is activated.
*/
void (*start_engine)(struct ata_port *ap);
+ irqreturn_t (*irq_handler)(int irq, void *dev_instance);
};
#ifdef CONFIG_PCI_MSI
@@ -424,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
void ahci_print_info(struct ata_host *host, const char *scc_s);
int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
void ahci_error_handler(struct ata_port *ap);
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no)
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index e2c6d9e0c5ac..8e3f7faf00d3 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -548,6 +548,88 @@ softreset_retry:
return rc;
}
+/**
+ * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
+ * @ata_host: Host that recieved the irq
+ * @irq_masked: HOST_IRQ_STAT value
+ *
+ * For hardware with broken edge trigger latch
+ * the HOST_IRQ_STAT register misses the edge interrupt
+ * when clearing of HOST_IRQ_STAT register and hardware
+ * reporting the PORT_IRQ_STAT register at the
+ * same clock cycle.
+ * As such, the algorithm below outlines the workaround.
+ *
+ * 1. Read HOST_IRQ_STAT register and save the state.
+ * 2. Clear the HOST_IRQ_STAT register.
+ * 3. Read back the HOST_IRQ_STAT register.
+ * 4. If HOST_IRQ_STAT register equals to zero, then
+ * traverse the rest of port's PORT_IRQ_STAT register
+ * to check if an interrupt is triggered at that point else
+ * go to step 6.
+ * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
+ * then update the state of HOST_IRQ_STAT saved in step 1.
+ * 6. Handle port interrupts.
+ * 7. Exit
+ */
+static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
+ u32 irq_masked)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *port_mmio;
+ int i;
+
+ if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
+ for (i = 0; i < host->n_ports; i++) {
+ if (irq_masked & (1 << i))
+ continue;
+
+ port_mmio = ahci_port_base(host->ports[i]);
+ if (readl(port_mmio + PORT_IRQ_STAT))
+ irq_masked |= (1 << i);
+ }
+ }
+
+ return ahci_handle_port_intr(host, irq_masked);
+}
+
+static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int rc = 0;
+ void __iomem *mmio;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ hpriv = host->private_data;
+ mmio = hpriv->mmio;
+
+ /* sigh. 0xffffffff is a valid return from h/w */
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ spin_lock(&host->lock);
+
+ /*
+ * HOST_IRQ_STAT behaves as edge triggered latch meaning that
+ * it should be cleared before all the port events are cleared.
+ */
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(rc);
+}
+
static struct ata_port_operations xgene_ahci_v1_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
@@ -779,7 +861,8 @@ skip_clk_phy:
hpriv->flags = AHCI_HFLAG_NO_NCQ;
break;
case XGENE_AHCI_V2:
- hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ;
+ hpriv->flags |= AHCI_HFLAG_YES_FBS;
+ hpriv->irq_handler = xgene_ahci_irq_intr;
break;
default:
break;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 402967902cbe..85ea5142a095 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
const char *buf, size_t size);
static ssize_t ahci_show_em_supported(struct device *dev,
struct device_attribute *attr, char *buf);
+static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
if (!hpriv->start_engine)
hpriv->start_engine = ahci_start_engine;
+
+ if (!hpriv->irq_handler)
+ hpriv->irq_handler = ahci_single_level_irq_intr;
}
EXPORT_SYMBOL_GPL(ahci_save_initial_config);
@@ -1164,8 +1168,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
- if ((tmp & PORT_CMD_HPCP) ||
- ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
+ if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
ap->pflags |= ATA_PFLAG_EXTERNAL;
}
@@ -1846,7 +1849,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
{
unsigned int i, handled = 0;
@@ -1872,43 +1875,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
return handled;
}
-
-static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct ahci_host_priv *hpriv;
- unsigned int rc = 0;
- void __iomem *mmio;
- u32 irq_stat, irq_masked;
-
- VPRINTK("ENTER\n");
-
- hpriv = host->private_data;
- mmio = hpriv->mmio;
-
- /* sigh. 0xffffffff is a valid return from h/w */
- irq_stat = readl(mmio + HOST_IRQ_STAT);
- if (!irq_stat)
- return IRQ_NONE;
-
- irq_masked = irq_stat & hpriv->port_map;
-
- spin_lock(&host->lock);
-
- /*
- * HOST_IRQ_STAT behaves as edge triggered latch meaning that
- * it should be cleared before all the port events are cleared.
- */
- writel(irq_stat, mmio + HOST_IRQ_STAT);
-
- rc = ahci_handle_port_intr(host, irq_masked);
-
- spin_unlock(&host->lock);
-
- VPRINTK("EXIT\n");
-
- return IRQ_RETVAL(rc);
-}
+EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
{
@@ -2535,14 +2502,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
int irq = hpriv->irq;
int rc;
- if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX))
+ if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+ if (hpriv->irq_handler)
+ dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
+ and custom irq handler implemented\n");
+
rc = ahci_host_activate_multi_irqs(host, sht);
- else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ)
- rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr,
- IRQF_SHARED, sht);
- else
- rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
+ } else {
+ rc = ata_host_activate(host, irq, hpriv->irq_handler,
IRQF_SHARED, sht);
+ }
+
+
return rc;
}
EXPORT_SYMBOL_GPL(ahci_host_activate);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7e959f90c020..e417e1a1d02c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
int cmd, void __user *arg)
{
- int val = -EINVAL, rc = -EINVAL;
+ unsigned long val;
+ int rc = -EINVAL;
unsigned long flags;
switch (cmd) {
- case ATA_IOC_GET_IO32:
+ case HDIO_GET_32BIT:
spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags);
- if (copy_to_user(arg, &val, 1))
- return -EFAULT;
- return 0;
+ return put_user(val, (unsigned long __user *)arg);
- case ATA_IOC_SET_IO32:
+ case HDIO_SET_32BIT:
val = (unsigned long) arg;
rc = 0;
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 12fe0f3bb7e9..c8b6a780a290 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -32,6 +32,8 @@
#include <linux/libata.h>
#include <scsi/scsi_host.h>
+#include <asm/mach-rc32434/rb.h>
+
#define DRV_NAME "pata-rb532-cf"
#define DRV_VERSION "0.1.0"
#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
@@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
int gpio;
struct resource *res;
struct ata_host *ah;
+ struct cf_device *pdata;
struct rb532_cf_info *info;
int ret;
@@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return -ENOENT;
}
- gpio = irq_to_gpio(irq);
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ return -EINVAL;
+ }
+
+ gpio = pdata->gpio_pin;
if (gpio < 0) {
dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
return -ENOENT;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index ec6af1595062..cf50fd2e96df 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -169,6 +169,17 @@ config BT_HCIUART_QCA
Say Y here to compile support for QCA protocol.
+config BT_HCIUART_AG6XX
+ bool "Intel AG6XX protocol support"
+ depends on BT_HCIUART
+ select BT_HCIUART_H4
+ select BT_INTEL
+ help
+ The Intel/AG6XX protocol support enables Bluetooth HCI over serial
+ port interface for Intel ibt 2.1 Bluetooth controllers.
+
+ Say Y here to compile support for Intel AG6XX protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 07c9cf381e5a..9c18939fc5c9 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -36,6 +36,7 @@ hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
+hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index fa893c3ec408..93747389dd28 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe05f) },
{ USB_DEVICE(0x0489, 0xe076) },
{ USB_DEVICE(0x0489, 0xe078) },
+ { USB_DEVICE(0x0489, 0xe095) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x300d) },
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
+ { USB_DEVICE(0x04CA, 0x3014) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) },
@@ -113,6 +115,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3395) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
{ USB_DEVICE(0x13d3, 0x3423) },
@@ -144,6 +147,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -154,6 +158,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -175,6 +180,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
@@ -497,6 +503,7 @@ static int ath3k_probe(struct usb_interface *intf,
/* match device ID in ath3k blacklist table */
if (!id->driver_info) {
const struct usb_device_id *match;
+
match = usb_match_id(intf, ath3k_blist_tbl);
if (match)
id = match;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 0b697946e9bc..fdb44829ab6f 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -467,7 +467,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
err = request_firmware(&fw, fw_name, &hdev->dev);
if (err < 0) {
BT_INFO("%s: BCM: Patch %s not found", hdev->name, fw_name);
- return 0;
+ goto done;
}
btbcm_patchram(hdev, fw);
@@ -501,6 +501,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
kfree_skb(skb);
+done:
btbcm_check_bdaddr(hdev);
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a191e318fab8..97f3bba93a8e 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -227,6 +229,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
new file mode 100644
index 000000000000..6923d17a022f
--- /dev/null
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -0,0 +1,337 @@
+/*
+ *
+ * Bluetooth HCI UART driver for Intel/AG6xx devices
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+#include "btintel.h"
+
+struct ag6xx_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+};
+
+struct pbn_entry {
+ __le32 addr;
+ __le32 plen;
+ __u8 data[0];
+} __packed;
+
+static int ag6xx_open(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx;
+
+ BT_DBG("hu %p", hu);
+
+ ag6xx = kzalloc(sizeof(*ag6xx), GFP_KERNEL);
+ if (!ag6xx)
+ return -ENOMEM;
+
+ skb_queue_head_init(&ag6xx->txq);
+
+ hu->priv = ag6xx;
+ return 0;
+}
+
+static int ag6xx_close(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&ag6xx->txq);
+ kfree_skb(ag6xx->rx_skb);
+ kfree(ag6xx);
+
+ hu->priv = NULL;
+ return 0;
+}
+
+static int ag6xx_flush(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&ag6xx->txq);
+ return 0;
+}
+
+static struct sk_buff *ag6xx_dequeue(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&ag6xx->txq);
+ if (!skb)
+ return skb;
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ return skb;
+}
+
+static int ag6xx_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ skb_queue_tail(&ag6xx->txq, skb);
+ return 0;
+}
+
+static const struct h4_recv_pkt ag6xx_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
+static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
+ ag6xx_recv_pkts,
+ ARRAY_SIZE(ag6xx_recv_pkts));
+ if (IS_ERR(ag6xx->rx_skb)) {
+ int err = PTR_ERR(ag6xx->rx_skb);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
+ ag6xx->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static int intel_mem_write(struct hci_dev *hdev, u32 addr, u32 plen,
+ const void *data)
+{
+ /* Can write a maximum of 247 bytes per HCI command.
+ * HCI cmd Header (3), Intel mem write header (6), data (247).
+ */
+ while (plen > 0) {
+ struct sk_buff *skb;
+ u8 cmd_param[253], fragment_len = (plen > 247) ? 247 : plen;
+ __le32 leaddr = cpu_to_le32(addr);
+
+ memcpy(cmd_param, &leaddr, 4);
+ cmd_param[4] = 0;
+ cmd_param[5] = fragment_len;
+ memcpy(cmd_param + 6, data, fragment_len);
+
+ skb = __hci_cmd_sync(hdev, 0xfc8e, fragment_len + 6, cmd_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+ kfree_skb(skb);
+
+ plen -= fragment_len;
+ data += fragment_len;
+ addr += fragment_len;
+ }
+
+ return 0;
+}
+
+static int ag6xx_setup(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+ struct intel_version ver;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ char fwname[64];
+ bool patched = false;
+ int err;
+
+ hu->hdev->set_diag = btintel_set_diag;
+ hu->hdev->set_bdaddr = btintel_set_bdaddr;
+
+ err = btintel_enter_mfg(hdev);
+ if (err)
+ return err;
+
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+
+ btintel_version_info(hdev, &ver);
+
+ /* The hardware platform number has a fixed value of 0x37 and
+ * for now only accept this single value.
+ */
+ if (ver.hw_platform != 0x37) {
+ bt_dev_err(hdev, "Unsupported Intel hardware platform: 0x%X",
+ ver.hw_platform);
+ return -EINVAL;
+ }
+
+ /* Only the hardware variant iBT 2.1 (AG6XX) is supported by this
+ * firmware setup method.
+ */
+ if (ver.hw_variant != 0x0a) {
+ bt_dev_err(hdev, "Unsupported Intel hardware variant: 0x%x",
+ ver.hw_variant);
+ return -EINVAL;
+ }
+
+ snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bddata",
+ ver.hw_platform, ver.hw_variant);
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to open Intel bddata file: %s (%d)",
+ fwname, err);
+ goto patch;
+ }
+ fw_ptr = fw->data;
+
+ bt_dev_info(hdev, "Applying bddata (%s)", fwname);
+
+ skb = __hci_cmd_sync_ev(hdev, 0xfc2f, fw->size, fw->data,
+ HCI_EV_CMD_STATUS, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Applying bddata failed (%ld)", PTR_ERR(skb));
+ release_firmware(fw);
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ release_firmware(fw);
+
+patch:
+ /* If there is no applied patch, fw_patch_num is always 0x00. In other
+ * cases, current firmware is already patched. No need to patch it.
+ */
+ if (ver.fw_patch_num) {
+ bt_dev_info(hdev, "Device is already patched. patch num: %02x",
+ ver.fw_patch_num);
+ patched = true;
+ goto complete;
+ }
+
+ snprintf(fwname, sizeof(fwname),
+ "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.pbn",
+ ver.hw_platform, ver.hw_variant, ver.hw_revision,
+ ver.fw_variant, ver.fw_revision, ver.fw_build_num,
+ ver.fw_build_ww, ver.fw_build_yy);
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to open Intel patch file: %s(%d)",
+ fwname, err);
+ goto complete;
+ }
+ fw_ptr = fw->data;
+
+ bt_dev_info(hdev, "Patching firmware file (%s)", fwname);
+
+ /* PBN patch file contains a list of binary patches to be applied on top
+ * of the embedded firmware. Each patch entry header contains the target
+ * address and patch size.
+ *
+ * Patch entry:
+ * | addr(le) | patch_len(le) | patch_data |
+ * | 4 Bytes | 4 Bytes | n Bytes |
+ *
+ * PBN file is terminated by a patch entry whose address is 0xffffffff.
+ */
+ while (fw->size > fw_ptr - fw->data) {
+ struct pbn_entry *pbn = (void *)fw_ptr;
+ u32 addr, plen;
+
+ if (pbn->addr == 0xffffffff) {
+ bt_dev_info(hdev, "Patching complete");
+ patched = true;
+ break;
+ }
+
+ addr = le32_to_cpu(pbn->addr);
+ plen = le32_to_cpu(pbn->plen);
+
+ if (fw->data + fw->size <= pbn->data + plen) {
+ bt_dev_info(hdev, "Invalid patch len (%d)", plen);
+ break;
+ }
+
+ bt_dev_info(hdev, "Patching %td/%zu", (fw_ptr - fw->data),
+ fw->size);
+
+ err = intel_mem_write(hdev, addr, plen, pbn->data);
+ if (err) {
+ bt_dev_err(hdev, "Patching failed");
+ break;
+ }
+
+ fw_ptr = pbn->data + plen;
+ }
+
+ release_firmware(fw);
+
+complete:
+ /* Exit manufacturing mode and reset */
+ err = btintel_exit_mfg(hdev, true, patched);
+ if (err)
+ return err;
+
+ /* Set the event mask for Intel specific vendor events. This enables
+ * a few extra events that are useful during general operation.
+ */
+ btintel_set_event_mask_mfg(hdev, false);
+
+ btintel_check_bdaddr(hdev);
+ return 0;
+}
+
+static const struct hci_uart_proto ag6xx_proto = {
+ .id = HCI_UART_AG6XX,
+ .name = "AG6XX",
+ .manufacturer = 2,
+ .open = ag6xx_open,
+ .close = ag6xx_close,
+ .flush = ag6xx_flush,
+ .setup = ag6xx_setup,
+ .recv = ag6xx_recv,
+ .enqueue = ag6xx_enqueue,
+ .dequeue = ag6xx_dequeue,
+};
+
+int __init ag6xx_init(void)
+{
+ return hci_uart_register_proto(&ag6xx_proto);
+}
+
+int __exit ag6xx_deinit(void)
+{
+ return hci_uart_unregister_proto(&ag6xx_proto);
+}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5f3de181e744..bb4c5a00aea0 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -820,10 +820,12 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E3D", 0 },
{ "BCM2E3F", 0 },
{ "BCM2E40", 0 },
+ { "BCM2E54", 0 },
{ "BCM2E64", 0 },
{ "BCM2E65", 0 },
{ "BCM2E67", 0 },
{ "BCM2E7B", 0 },
+ { "BCM2E7C", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 3d63ea37bd4c..91d605147b10 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -488,7 +488,7 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
clear_bit(STATE_BOOTING, &intel->flags);
/* In case of timeout, try to continue anyway */
- if (err && err != ETIMEDOUT)
+ if (err && err != -ETIMEDOUT)
return err;
bt_dev_info(hdev, "Change controller speed to %d", speed);
@@ -581,7 +581,7 @@ static int intel_setup(struct hci_uart *hu)
clear_bit(STATE_BOOTING, &intel->flags);
/* In case of timeout, try to continue anyway */
- if (err && err != ETIMEDOUT)
+ if (err && err != -ETIMEDOUT)
return err;
set_bit(STATE_BOOTLOADER, &intel->flags);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 73202624133b..c00168a5bb80 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -804,6 +804,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_QCA
qca_init();
#endif
+#ifdef CONFIG_BT_HCIUART_AG6XX
+ ag6xx_init();
+#endif
return 0;
}
@@ -836,6 +839,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_QCA
qca_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_AG6XX
+ ag6xx_deinit();
+#endif
/* Release tty registration of line discipline */
err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 82c92f1b65b4..4814ff08f427 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 9
+#define HCI_UART_MAX_PROTO 10
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -46,6 +46,7 @@
#define HCI_UART_INTEL 6
#define HCI_UART_BCM 7
#define HCI_UART_QCA 8
+#define HCI_UART_AG6XX 9
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
@@ -182,3 +183,8 @@ int bcm_deinit(void);
int qca_init(void);
int qca_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_AG6XX
+int ag6xx_init(void);
+int ag6xx_deinit(void);
+#endif
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d0da5d852d41..b583e5336630 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1819,6 +1819,28 @@ unsigned int get_random_int(void)
EXPORT_SYMBOL(get_random_int);
/*
+ * Same as get_random_int(), but returns unsigned long.
+ */
+unsigned long get_random_long(void)
+{
+ __u32 *hash;
+ unsigned long ret;
+
+ if (arch_get_random_long(&ret))
+ return ret;
+
+ hash = get_cpu_var(get_random_int_hash);
+
+ hash[0] += current->pid + jiffies + random_get_entropy();
+ md5_transform(hash, random_int_secret);
+ ret = *(unsigned long *)hash;
+ put_cpu_var(get_random_int_hash);
+
+ return ret;
+}
+EXPORT_SYMBOL(get_random_long);
+
+/*
* randomize_range() returns a start address such that
*
* [...... <range> .....]
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 1c300388782b..cc739291a3ce 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -460,7 +460,8 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
parent = clk_hw_get_parent(hw);
- if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) {
+ if (clk_hw_get_rate(hw) ==
+ clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
r = _omap3_noncore_dpll_bypass(clk);
} else {
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 659879a56dba..f93511031177 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -296,6 +296,7 @@ endif
config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
+ depends on !CPU_THERMAL || THERMAL
select CLK_QORIQ
help
This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0031069b64c9..14b1f9393b05 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,10 +84,10 @@ config ARM_KIRKWOOD_CPUFREQ
SoCs.
config ARM_MT8173_CPUFREQ
- bool "Mediatek MT8173 CPUFreq support"
+ tristate "Mediatek MT8173 CPUFreq support"
depends on ARCH_MEDIATEK && REGULATOR
depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
- depends on !CPU_THERMAL || THERMAL=y
+ depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This adds the CPUFreq driver support for Mediatek MT8173 SoC.
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 1efba340456d..2058e6d292ce 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -17,6 +17,7 @@
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 848b93ee930f..fe9dce0245bf 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -500,6 +500,8 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
clk_set_min_rate(tegra->emc_clock, rate);
clk_set_rate(tegra->emc_clock, 0);
+ *freq = rate;
+
return 0;
}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index f2a0310ae771..debca824bed6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
(PXA_DCMD_LENGTH & sizeof(u32));
if (flags & DMA_PREP_INTERRUPT)
updater->dcmd |= PXA_DCMD_ENDIRQEN;
+ if (sw_desc->cyclic)
+ sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
}
static bool is_desc_completed(struct virt_dma_desc *vd)
@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
dev_dbg(&chan->vc.chan.dev->device,
"%s(): checking txd %p[%x]: completed=%d\n",
__func__, vd, vd->tx.cookie, is_desc_completed(vd));
+ if (to_pxad_sw_desc(vd)->cyclic) {
+ vchan_cyclic_callback(vd);
+ break;
+ }
if (is_desc_completed(vd)) {
list_del(&vd->node);
vchan_cookie_complete(vd);
@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
return NULL;
pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
- dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
+ dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
dev_dbg(&chan->vc.chan.dev->device,
"%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
__func__, (unsigned long)buf_addr, len, period_len, dir, flags);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index cf41440aff91..d9ab0cd1d205 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,6 +196,44 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
+static void gpio_rcar_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+
+ pm_runtime_get_sync(&p->pdev->dev);
+}
+
+static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+
+ pm_runtime_put(&p->pdev->dev);
+}
+
+
+static int gpio_rcar_irq_request_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+ int error;
+
+ error = pm_runtime_get_sync(&p->pdev->dev);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static void gpio_rcar_irq_release_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = gpiochip_get_data(gc);
+
+ pm_runtime_put(&p->pdev->dev);
+}
+
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
@@ -450,6 +488,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
+ irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
+ irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
+ irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
+ irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add_data(gpio_chip, p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 89c3dd62ba21..119cdc2c43e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we
* have the dpcd */
- if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return;
/* set it to OFF so that drm_helper_connector_dpms()
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index acd066d0a805..8297bc319369 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
- unsigned i;
- int vpos, hpos, stat, min_udelay;
+ unsigned i, repcnt = 4;
+ int vpos, hpos, stat, min_udelay = 0;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (amdgpuCrtc->enabled && repcnt--) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7380f782cd14..d20c2a8929cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -596,7 +596,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
break;
}
ttm_eu_backoff_reservation(&ticket, &list);
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
+ !amdgpu_vm_debug)
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7d8d84eaea4a..95a4a25d8df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -113,6 +113,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+
if (adev->pp_enabled) {
enum amd_dpm_forced_level level;
@@ -140,6 +144,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
enum amdgpu_dpm_forced_level level;
int ret = 0;
+ /* Can't force performance level when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMDGPU_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -157,6 +166,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
+ mutex_unlock(&adev->pm.mutex);
goto fail;
}
ret = amdgpu_dpm_force_performance_level(adev, level);
@@ -167,8 +177,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
mutex_unlock(&adev->pm.mutex);
}
fail:
- mutex_unlock(&adev->pm.mutex);
-
return count;
}
@@ -182,8 +190,14 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ struct drm_device *ddev = adev->ddev;
int temp;
+ /* Can't get temperature when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
temp = 0;
else
@@ -634,11 +648,6 @@ force:
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
/* wait for the rings to drain */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -655,6 +664,12 @@ force:
amdgpu_dpm_post_set_power_state(adev);
+ /* update displays */
+ amdgpu_dpm_display_configuration_changed(adev);
+
+ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -847,12 +862,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ struct drm_device *ddev = adev->ddev;
if (!adev->pm.dpm_enabled) {
seq_printf(m, "dpm not enabled\n");
return 0;
}
- if (adev->pp_enabled) {
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+ seq_printf(m, "PX asic powered off\n");
+ } else if (adev->pp_enabled) {
amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
} else {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index b9d0d55f6b47..3cb6d6c413c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled)
+ if (adev->pp_enabled) {
amdgpu_pm_sysfs_init(adev);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
+ }
#endif
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 9056355309d1..e7ef2261ff4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
AMD_PG_STATE_GATE);
cz_enable_vce_dpm(adev, false);
- /* TODO: to figure out why vce can't be poweroff. */
- /* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
+ cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
pi->vce_power_gated = true;
} else {
cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
@@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
} else { /*pi->caps_vce_pg*/
cz_update_vce_dpm(adev);
- cz_enable_vce_dpm(adev, true);
+ cz_enable_vce_dpm(adev, !gate);
}
-
- return;
}
const struct amd_ip_funcs cz_dpm_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 7732059ae30f..06602df707f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, 4); /* poll interval */
+
if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 8f8ec37ecd88..7086ac17abee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4809,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
- WAIT_REG_MEM_FUNCTION(3))); /* equal */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
@@ -4995,7 +4996,7 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
case AMDGPU_IRQ_STATE_ENABLE:
cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 0);
+ PRIV_REG_INT_ENABLE, 1);
WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
default:
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index aa67244a77ae..589599f66fcc 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -402,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
data.requested_ui_label = power_state_convert(ps);
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+ break;
}
- break;
+ case AMD_PP_EVENT_COMPLETE_INIT:
+ ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 83be3cf210e0..6b52c78cb404 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
};
static const pem_event_action *complete_init_event[] = {
+ unblock_adjust_power_state_tasks,
adjust_power_state_tasks,
enable_gfx_clock_gating_tasks,
enable_gfx_voltage_island_power_gating_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 52a3efc97f05..46410e3c7349 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -31,7 +31,7 @@
static int pem_init(struct pp_eventmgr *eventmgr)
{
int result = 0;
- struct pem_event_data event_data;
+ struct pem_event_data event_data = { {0} };
/* Initialize PowerPlay feature info */
pem_init_feature_info(eventmgr);
@@ -52,7 +52,7 @@ static int pem_init(struct pp_eventmgr *eventmgr)
static void pem_fini(struct pp_eventmgr *eventmgr)
{
- struct pem_event_data event_data;
+ struct pem_event_data event_data = { {0} };
pem_uninit_featureInfo(eventmgr);
pem_unregister_interrupts(eventmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index ad7700822a1c..ff08ce41bde9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
}
} else {
cz_dpm_update_vce_dpm(hwmgr);
- cz_enable_disable_vce_dpm(hwmgr, true);
+ cz_enable_disable_vce_dpm(hwmgr, !bgate);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 9759009d1da3..b1480acbb3c3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
- if (data & 0x400)
+ if (data & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0fc38bb7276c..cf39ed3133d6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -825,8 +825,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(pipe))) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -840,6 +843,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Pipe %c IER:\t%08x\n",
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IER(pipe)));
+
+ intel_display_power_put(dev_priv, power_domain);
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -3985,6 +3990,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
pipe));
+ enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -3995,7 +4001,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (pipe_crc->source && source)
return -EINVAL;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -4012,7 +4019,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
if (ret != 0)
- return ret;
+ goto out;
/* none -> real source transition */
if (source) {
@@ -4024,8 +4031,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
sizeof(pipe_crc->entries[0]),
GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
+ if (!entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -4081,7 +4090,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
hsw_enable_ips(crtc);
}
- return 0;
+ ret = 0;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e7cd311e9fbb..b0847b915545 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -751,6 +751,7 @@ struct intel_csr {
uint32_t mmio_count;
i915_reg_t mmioaddr[8];
uint32_t mmiodata[8];
+ uint32_t dc_state;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9c89df1af036..a7b4a524fadd 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9bb63a85997a..647d85e77c2f 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -240,6 +240,8 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
I915_WRITE(dev_priv->csr.mmioaddr[i],
dev_priv->csr.mmiodata[i]);
}
+
+ dev_priv->csr.dc_state = 0;
}
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 54a165b9c92d..0f3df2c39f7c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1969,13 +1969,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
enum transcoder cpu_transcoder;
enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(intel_encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
- return false;
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
+ ret = false;
+ goto out;
+ }
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
@@ -1987,23 +1990,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
case TRANS_DDI_MODE_SELECT_DVI:
- return (type == DRM_MODE_CONNECTOR_HDMIA);
+ ret = type == DRM_MODE_CONNECTOR_HDMIA;
+ break;
case TRANS_DDI_MODE_SELECT_DP_SST:
- if (type == DRM_MODE_CONNECTOR_eDP)
- return true;
- return (type == DRM_MODE_CONNECTOR_DisplayPort);
+ ret = type == DRM_MODE_CONNECTOR_eDP ||
+ type == DRM_MODE_CONNECTOR_DisplayPort;
+ break;
+
case TRANS_DDI_MODE_SELECT_DP_MST:
/* if the transcoder is in MST state then
* connector isn't connected */
- return false;
+ ret = false;
+ break;
case TRANS_DDI_MODE_SELECT_FDI:
- return (type == DRM_MODE_CONNECTOR_VGA);
+ ret = type == DRM_MODE_CONNECTOR_VGA;
+ break;
default:
- return false;
+ ret = false;
+ break;
}
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2015,15 +2028,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum intel_display_power_domain power_domain;
u32 tmp;
int i;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
- return false;
+ goto out;
if (port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -2041,25 +2057,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
break;
}
- return true;
- } else {
- for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+ ret = true;
- if ((tmp & TRANS_DDI_PORT_MASK)
- == TRANS_DDI_SELECT_PORT(port)) {
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
- return false;
+ goto out;
+ }
- *pipe = i;
- return true;
- }
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+ TRANS_DDI_MODE_SELECT_DP_MST)
+ goto out;
+
+ *pipe = i;
+ ret = true;
+
+ goto out;
}
}
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
- return false;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@@ -2508,12 +2531,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(WRPLL_CTL(pll->id));
hw_state->wrpll = val;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & WRPLL_PLL_ENABLE;
}
@@ -2523,12 +2548,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & SPLL_PLL_ENABLE;
}
@@ -2645,16 +2672,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
uint32_t val;
unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ ret = false;
+
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1;
val = I915_READ(regs[pll->id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
- return false;
+ goto out;
val = I915_READ(DPLL_CTRL1);
hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@@ -2664,8 +2694,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
}
+ ret = true;
- return true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
}
static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -2932,13 +2966,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
{
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ ret = false;
+
val = I915_READ(BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
- return false;
+ goto out;
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@@ -2985,7 +3022,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
}
static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -3120,11 +3162,15 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
{
u32 temp;
- if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
return true;
}
+
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5feb65725c04..46947fffd599 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1351,18 +1351,21 @@ void assert_pipe(struct drm_i915_private *dev_priv,
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
+ enum intel_display_power_domain power_domain;
/* if we need the pipe quirk it must be always on */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
- cur_state = false;
- } else {
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
+
+ intel_display_power_put(dev_priv, power_domain);
+ } else {
+ cur_state = false;
}
I915_STATE_WARN(cur_state != state,
@@ -8171,18 +8174,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ ret = false;
+
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
switch (tmp & PIPECONF_BPC_MASK) {
@@ -8262,7 +8269,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->base.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void ironlake_init_pch_refclk(struct drm_device *dev)
@@ -9366,18 +9378,21 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
@@ -9440,7 +9455,12 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_pfit_config(crtc, pipe_config);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -9950,12 +9970,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum intel_display_power_domain pfit_domain;
+ enum intel_display_power_domain power_domain;
+ unsigned long power_domain_mask;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ power_domain_mask = BIT(power_domain);
+
+ ret = false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -9982,13 +10007,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
- return false;
+ power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ goto out;
+ power_domain_mask |= BIT(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
haswell_get_ddi_port_state(crtc, pipe_config);
@@ -9998,14 +10024,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
skl_init_scalers(dev, crtc, pipe_config);
}
- pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-
if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
- if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+ power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+ if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ power_domain_mask |= BIT(power_domain);
if (INTEL_INFO(dev)->gen >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
@@ -10023,7 +10049,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- return true;
+ ret = true;
+
+out:
+ for_each_power_domain(power_domain, power_domain_mask)
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
@@ -13630,7 +13662,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(PCH_DPLL(pll->id));
@@ -13638,6 +13670,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & DPLL_VCO_ENABLE;
}
@@ -15568,10 +15602,12 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
i915_redisable_vga_power_on(dev);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
static bool primary_get_hw_state(struct intel_plane *plane)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1bbd67b046da..1d8de43bed56 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2362,15 +2362,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(intel_dp->output_reg);
if (!(tmp & DP_PORT_EN))
- return false;
+ goto out;
if (IS_GEN7(dev) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
@@ -2381,7 +2384,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
*pipe = p;
- return true;
+ ret = true;
+
+ goto out;
}
}
@@ -2393,7 +2398,12 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
*pipe = PORT_TO_PIPE(tmp);
}
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_dp_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ea5415851c6e..df7f3cb66056 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1428,6 +1428,8 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
@@ -1514,6 +1516,7 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
enable_rpm_wakeref_asserts(dev_priv)
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 44742fa2f616..0193c62a53ef 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -664,13 +664,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
+ bool ret;
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
@@ -691,12 +694,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
*pipe = port == PORT_A ? PIPE_A : PIPE_B;
- return true;
+ ret = true;
+
+ goto out;
}
}
}
+out:
+ intel_display_power_put(dev_priv, power_domain);
- return false;
+ return ret;
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4a77639a489d..cb5d1b15755c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -880,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
@@ -897,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_hdmi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0da0240caf81..bc04d8d29acb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -75,22 +75,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a234687792f0..b28c29f20e75 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2829,7 +2829,10 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
memset(ddb, 0, sizeof(*ddb));
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
for_each_plane(dev_priv, pipe, plane) {
@@ -2841,6 +2844,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
val = I915_READ(CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
val);
+
+ intel_display_power_put(dev_priv, power_domain);
}
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ddbdbffe829a..4f43d9b32e66 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -470,6 +470,43 @@ static void gen9_set_dc_state_debugmask_memory_up(
}
}
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ int rewrites = 0;
+ int rereads = 0;
+ u32 v;
+
+ I915_WRITE(DC_STATE_EN, state);
+
+ /* It has been observed that disabling the dc6 state sometimes
+ * doesn't stick and dmc keeps returning old value. Make sure
+ * the write really sticks enough times and also force rewrite until
+ * we are confident that state is exactly what we want.
+ */
+ do {
+ v = I915_READ(DC_STATE_EN);
+
+ if (v != state) {
+ I915_WRITE(DC_STATE_EN, state);
+ rewrites++;
+ rereads = 0;
+ } else if (rereads++ > 5) {
+ break;
+ }
+
+ } while (rewrites < 100);
+
+ if (v != state)
+ DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
+
+ /* Most of the times we need one retry, avoid spam */
+ if (rewrites > 1)
+ DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
+}
+
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
{
uint32_t val;
@@ -494,10 +531,18 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
val = I915_READ(DC_STATE_EN);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
+
+ /* Check if DMC is ignoring our DC state requests */
+ if ((val & mask) != dev_priv->csr.dc_state)
+ DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->csr.dc_state, val & mask);
+
val &= ~mask;
val |= state;
- I915_WRITE(DC_STATE_EN, val);
- POSTING_READ(DC_STATE_EN);
+
+ gen9_write_dc_state(dev_priv, val);
+
+ dev_priv->csr.dc_state = val & mask;
}
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -1442,6 +1487,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
chv_set_pipe_power_well(dev_priv, power_well, false);
}
+static void
+__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
+ }
+
+ power_domains->domain_use_count[domain]++;
+}
+
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
@@ -1457,24 +1518,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
intel_runtime_pm_get(dev_priv);
- power_domains = &dev_priv->power_domains;
+ mutex_lock(&power_domains->lock);
+
+ __intel_display_power_get_domain(dev_priv, domain);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool is_enabled;
+
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return false;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++)
- intel_power_well_enable(dev_priv, power_well);
+ if (__intel_display_power_is_enabled(dev_priv, domain)) {
+ __intel_display_power_get_domain(dev_priv, domain);
+ is_enabled = true;
+ } else {
+ is_enabled = false;
}
- power_domains->domain_use_count[domain]++;
-
mutex_unlock(&power_domains->lock);
+
+ if (!is_enabled)
+ intel_runtime_pm_put(dev_priv);
+
+ return is_enabled;
}
/**
@@ -2213,15 +2303,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
*/
void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
{
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- skl_display_core_uninit(dev_priv);
-
/*
* Even if power well support was disabled we still want to disable
* power wells while we are system suspended.
*/
if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ skl_display_core_uninit(dev_priv);
}
/**
@@ -2246,6 +2336,41 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
}
/**
+ * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference if the device is
+ * already in use and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ int ret = pm_runtime_get_if_in_use(device);
+
+ /*
+ * In cases runtime PM is disabled by the RPM core and we get
+ * an -EINVAL return value we are not supposed to call this
+ * function, since the power state is undefined. This applies
+ * atm to the late/early system suspend/resume handlers.
+ */
+ WARN_ON_ONCE(ret < 0);
+ if (ret <= 0)
+ return false;
+ }
+
+ atomic_inc(&dev_priv->pm.wakeref_count);
+ assert_rpm_wakelock_held(dev_priv);
+
+ return true;
+}
+
+/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 8a70cec59bcd..2dfe58af12e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -24,7 +24,7 @@
static int nouveau_platform_probe(struct platform_device *pdev)
{
const struct nvkm_device_tegra_func *func;
- struct nvkm_device *device;
+ struct nvkm_device *device = NULL;
struct drm_device *drm;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 7f8a42721eb2..e7e581d6a8ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
return -ENOMEM;
- *pdevice = &tdev->device;
+
tdev->func = func;
tdev->pdev = pdev;
tdev->irq = -1;
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(tdev->vdd))
- return PTR_ERR(tdev->vdd);
+ if (IS_ERR(tdev->vdd)) {
+ ret = PTR_ERR(tdev->vdd);
+ goto free;
+ }
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->rst))
- return PTR_ERR(tdev->rst);
+ if (IS_ERR(tdev->rst)) {
+ ret = PTR_ERR(tdev->rst);
+ goto free;
+ }
tdev->clk = devm_clk_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->clk))
- return PTR_ERR(tdev->clk);
+ if (IS_ERR(tdev->clk)) {
+ ret = PTR_ERR(tdev->clk);
+ goto free;
+ }
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
- if (IS_ERR(tdev->clk_pwr))
- return PTR_ERR(tdev->clk_pwr);
+ if (IS_ERR(tdev->clk_pwr)) {
+ ret = PTR_ERR(tdev->clk_pwr);
+ goto free;
+ }
nvkm_device_tegra_probe_iommu(tdev);
ret = nvkm_device_tegra_power_up(tdev);
if (ret)
- return ret;
+ goto remove;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
cfg, dbg, detect, mmio, subdev_mask,
&tdev->device);
if (ret)
- return ret;
+ goto powerdown;
+
+ *pdevice = &tdev->device;
return 0;
+
+powerdown:
+ nvkm_device_tegra_power_down(tdev);
+remove:
+ nvkm_device_tegra_remove_iommu(tdev);
+free:
+ kfree(tdev);
+ return ret;
}
#else
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 74e2f7c6c07e..9688970eca47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
.outp = outp,
}, *dp = &_dp;
u32 datarate = 0;
+ u8 pwr;
int ret;
if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
/* disable link interrupt handling during link training */
nvkm_notify_put(&outp->irq);
+ /* ensure sink is not in a low-power state */
+ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
+ if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
+ pwr &= ~DPCD_SC00_SET_POWER;
+ pwr |= DPCD_SC00_SET_POWER_D0;
+ nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
+ }
+ }
+
/* enable down-spreading and execute pre-train script from vbios */
dp_link_train_init(dp, outp->dpcd[3] & 0x01);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 9596290329c7..6e10c5e0ef11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -71,5 +71,11 @@
#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
+/* DPCD Sink Control */
+#define DPCD_SC00 0x00600
+#define DPCD_SC00_SET_POWER 0x03
+#define DPCD_SC00_SET_POWER_D0 0x01
+#define DPCD_SC00_SET_POWER_D3 0x03
+
void nvkm_dp_train(struct work_struct *);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 902b59cebac5..4197ca1bb1e4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
- drm_helper_hpd_irq_event(dev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 298ea1c453c3..2b9ba03a7c1a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &radeon_crtc->base;
unsigned long flags;
int r;
- int vpos, hpos, stat, min_udelay;
+ int vpos, hpos, stat, min_udelay = 0;
+ unsigned repcnt = 4;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (radeon_crtc->enabled && repcnt--) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
/* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 248c5a9fb0b6..0f14d897baf9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1079,12 +1079,6 @@ force:
/* update display watermarks based on new power state */
radeon_bandwidth_update(rdev);
- /* update displays */
- radeon_dpm_display_configuration_changed(rdev);
-
- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
- rdev->pm.dpm.single_display = single_display;
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1101,6 +1095,13 @@ force:
radeon_dpm_post_set_power_state(rdev);
+ /* update displays */
+ radeon_dpm_display_configuration_changed(rdev);
+
+ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+ rdev->pm.dpm.single_display = single_display;
+
if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) {
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 58704f0a4607..531d22025fec 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -25,6 +25,8 @@
*
**************************************************************************/
+#include <linux/kernel.h>
+
#ifdef __KERNEL__
#include <drm/vmwgfx_drm.h>
@@ -36,7 +38,6 @@
#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
#endif /* ARRAY_SIZE */
-#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
@@ -987,12 +988,12 @@ svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
const surf_size_struct *pixel_size,
surf_size_struct *block_size)
{
- block_size->width = DIV_ROUND_UP(pixel_size->width,
- desc->block_size.width);
- block_size->height = DIV_ROUND_UP(pixel_size->height,
- desc->block_size.height);
- block_size->depth = DIV_ROUND_UP(pixel_size->depth,
- desc->block_size.depth);
+ block_size->width = __KERNEL_DIV_ROUND_UP(pixel_size->width,
+ desc->block_size.width);
+ block_size->height = __KERNEL_DIV_ROUND_UP(pixel_size->height,
+ desc->block_size.height);
+ block_size->depth = __KERNEL_DIV_ROUND_UP(pixel_size->depth,
+ desc->block_size.depth);
}
static inline bool
@@ -1100,8 +1101,9 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
const u32 bw = desc->block_size.width, bh = desc->block_size.height;
const u32 bd = desc->block_size.depth;
- const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
- const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+ const u32 rowstride = __KERNEL_DIV_ROUND_UP(width, bw) *
+ desc->bytes_per_block;
+ const u32 imgstride = __KERNEL_DIV_ROUND_UP(height, bh) * rowstride;
const u32 offset = (z / bd * imgstride +
y / bh * rowstride +
x / bw * desc->bytes_per_block);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index da462afcb225..dd2dbb9746ce 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -18,6 +18,7 @@
#include <linux/host1x.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
#include "bus.h"
#include "dev.h"
@@ -394,6 +395,7 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
dev_set_name(&device->dev, "%s", driver->driver.name);
+ of_dma_configure(&device->dev, host1x->dev->of_node);
device->dev.release = host1x_device_release;
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 314bf3718cc7..ff348690df94 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/dma-mapping.h>
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
@@ -68,6 +69,7 @@ static const struct host1x_info host1x01_info = {
.nb_bases = 8,
.init = host1x01_init,
.sync_offset = 0x3000,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct host1x_info host1x02_info = {
@@ -77,6 +79,7 @@ static const struct host1x_info host1x02_info = {
.nb_bases = 12,
.init = host1x02_init,
.sync_offset = 0x3000,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct host1x_info host1x04_info = {
@@ -86,6 +89,7 @@ static const struct host1x_info host1x04_info = {
.nb_bases = 64,
.init = host1x04_init,
.sync_offset = 0x2100,
+ .dma_mask = DMA_BIT_MASK(34),
};
static const struct host1x_info host1x05_info = {
@@ -95,6 +99,7 @@ static const struct host1x_info host1x05_info = {
.nb_bases = 64,
.init = host1x05_init,
.sync_offset = 0x2100,
+ .dma_mask = DMA_BIT_MASK(34),
};
static struct of_device_id host1x_of_match[] = {
@@ -148,6 +153,8 @@ static int host1x_probe(struct platform_device *pdev)
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
+ dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
+
if (host->info->init) {
err = host->info->init(host);
if (err)
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 0b6e8e9629c5..dace124994bb 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -96,6 +96,7 @@ struct host1x_info {
int nb_mlocks; /* host1x: number of mlocks */
int (*init)(struct host1x *); /* initialize per SoC ops */
int sync_offset;
+ u64 dma_mask; /* mask of addressable memory */
};
struct host1x {
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 3711df1d4526..4a45408dd820 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -586,8 +586,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *),
- GFP_KERNEL);
+ dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
if (!dev->bsc_regmap)
return -ENOMEM;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 00da80e02154..94b80a51ab68 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -358,6 +358,7 @@ int ib_register_device(struct ib_device *device,
ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
printk(KERN_WARNING "Couldn't query the device attributes\n");
+ ib_cache_cleanup_one(device);
goto out;
}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index f334090bb612..1e37f3515d98 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1071,7 +1071,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
}
}
- if (rec->hop_limit > 1 || use_roce) {
+ if (rec->hop_limit > 0 || use_roce) {
ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.dgid = rec->dgid;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6ffc9c4e93af..6c6fbff19752 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1970,7 +1970,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
resp_size);
INIT_UDATA(&uhw, buf + sizeof(cmd),
(unsigned long)cmd.response + resp_size,
- in_len - sizeof(cmd), out_len - resp_size);
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - resp_size);
memset(&cmd_ex, 0, sizeof(cmd_ex));
cmd_ex.user_handle = cmd.user_handle;
@@ -3413,7 +3414,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
if (ret)
@@ -3439,7 +3441,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
if (ret)
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index fc01deac1d3c..db4aa13ebae0 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,7 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
+ depends on MAY_USE_DEVLINK
select NET_VENDOR_MELLANOX
select MLX4_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1c7ab6cabbb8..a15a7b37d386 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -41,6 +41,7 @@
#include <linux/if_vlan.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/devlink.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
@@ -2519,6 +2520,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
ibdev->ib_active = true;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
+ &ibdev->ib_dev);
if (mlx4_is_mfunc(ibdev->dev))
init_pkeys(ibdev);
@@ -2643,7 +2647,10 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
{
struct mlx4_ib_dev *ibdev = ibdev_ptr;
int p;
+ int i;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
ibdev->ib_active = false;
flush_workqueue(wq);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4659256cd95e..3b2ddd64a371 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in,
- struct ib_udata *udata, int buf_size, int *inlen)
+ struct ib_udata *udata, int buf_size, int *inlen,
+ int is_xrc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {};
@@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
int ncont;
u32 offset;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
- int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
- if (drv_data < 0)
- return -EINVAL;
-
- ucmdlen = (drv_data < sizeof(ucmd)) ?
- drv_data : sizeof(ucmd);
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
mlx5_ib_dbg(dev, "failed copy udata\n");
@@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
- if (drv_data > sizeof(ucmd) &&
+ if (udata->inlen > sizeof(ucmd) &&
!ib_is_udata_cleared(udata, sizeof(ucmd),
- drv_data - sizeof(ucmd)))
+ udata->inlen - sizeof(ucmd)))
return -EINVAL;
- err = get_srq_user_index(to_mucontext(pd->uobject->context),
- &ucmd, udata->inlen, &uidx);
- if (err)
- return err;
+ if (is_xrc) {
+ err = get_srq_user_index(to_mucontext(pd->uobject->context),
+ &ucmd, udata->inlen, &uidx);
+ if (err)
+ return err;
+ }
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
@@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
- if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+ if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+ is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@@ -170,7 +169,7 @@ err_umem:
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, int buf_size,
- int *inlen)
+ int *inlen, int is_xrc)
{
int err;
int i;
@@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+ if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+ is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
/* 0xffffff means we ask to work with cqe version 0 */
@@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
+ is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
+
if (pd->uobject)
- err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
+ err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
+ is_xrc);
else
- err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
+ err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
+ is_xrc);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
- is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
in->ctx.state_log_sz = ilog2(srq->msrq.max);
flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
xrcdn = 0;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e5e223938eec..374c129219ef 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -114,6 +114,7 @@ struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
+static void detach_device(struct device *dev);
/*
* For dynamic growth the aperture size is split into ranges of 128MB of
@@ -384,6 +385,9 @@ static void iommu_uninit_device(struct device *dev)
if (!dev_data)
return;
+ if (dev_data->domain)
+ detach_device(dev);
+
iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
dev);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 013bdfff2d4d..bf4959f4225b 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+ u8 bank, u8 cntr, u8 fxn,
+ u64 *value, bool is_write);
+
static inline void update_last_devid(u16 devid)
{
if (devid > amd_iommu_last_bdf)
@@ -1016,6 +1020,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
}
/*
+ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
+ * Workaround:
+ * BIOS should enable ATS write permission check by setting
+ * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
+ */
+static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
+{
+ u32 value;
+
+ if ((boot_cpu_data.x86 != 0x15) ||
+ (boot_cpu_data.x86_model < 0x30) ||
+ (boot_cpu_data.x86_model > 0x3f))
+ return;
+
+ /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
+ value = iommu_read_l2(iommu, 0x47);
+
+ if (value & BIT(0))
+ return;
+
+ /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
+ iommu_write_l2(iommu, 0x47, value | BIT(0));
+
+ pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
+ dev_name(&iommu->dev->dev));
+}
+
+/*
* This function clues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* Check if the performance counters can be written to */
- if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
- (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
+ if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
+ (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
(val != val2)) {
pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
amd_iommu_pc_present = false;
@@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
}
amd_iommu_erratum_746_workaround(iommu);
+ amd_iommu_ats_write_check_workaround(iommu);
iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
amd_iommu_groups, "ivhd%d",
@@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+ u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write)
{
- struct amd_iommu *iommu;
u32 offset;
u32 max_offset_lim;
- /* Make sure the IOMMU PC resource is available */
- if (!amd_iommu_pc_present)
- return -ENODEV;
-
- /* Locate the iommu associated with the device ID */
- iommu = amd_iommu_rlookup_table[devid];
-
/* Check for valid iommu and pc register indexing */
- if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
+ if (WARN_ON((fxn > 0x28) || (fxn & 7)))
return -ENODEV;
offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
+
+int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+ u64 *value, bool is_write)
+{
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+ /* Make sure the IOMMU PC resource is available */
+ if (!amd_iommu_pc_present || iommu == NULL)
+ return -ENODEV;
+
+ return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
+ value, is_write);
+}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index fb092f3f11cb..8ffd7568fc91 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
/* Only care about add/remove events for physical functions */
if (pdev->is_virtfn)
return NOTIFY_DONE;
- if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
+ if (action != BUS_NOTIFY_ADD_DEVICE &&
+ action != BUS_NOTIFY_REMOVED_DEVICE)
return NOTIFY_DONE;
info = dmar_alloc_pci_notify_info(pdev, action);
@@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
down_write(&dmar_global_lock);
if (action == BUS_NOTIFY_ADD_DEVICE)
dmar_pci_bus_add_dev(info);
- else if (action == BUS_NOTIFY_DEL_DEVICE)
+ else if (action == BUS_NOTIFY_REMOVED_DEVICE)
dmar_pci_bus_del_dev(info);
up_write(&dmar_global_lock);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 986a53e3eb96..a2e1b7f14df2 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
rmrru->devices_cnt);
if(ret < 0)
return ret;
- } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
dmar_remove_dev_scope(info, rmrr->segment,
rmrru->devices, rmrru->devices_cnt);
}
@@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
break;
else if(ret < 0)
return ret;
- } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
if (dmar_remove_dev_scope(info, atsr->segment,
atsru->devices, atsru->devices_cnt))
break;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 0a73632b28d5..43dfd15c1dd2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -78,6 +78,9 @@ struct its_node {
#define ITS_ITT_ALIGN SZ_256
+/* Convert page order to size in bytes */
+#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
+
struct event_lpi_map {
unsigned long *lpi_map;
u16 *col_map;
@@ -600,11 +603,6 @@ static void its_unmask_irq(struct irq_data *d)
lpi_set_config(d, true);
}
-static void its_eoi_irq(struct irq_data *d)
-{
- gic_write_eoir(d->hwirq);
-}
-
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
@@ -641,7 +639,7 @@ static struct irq_chip its_irq_chip = {
.name = "ITS",
.irq_mask = its_mask_irq,
.irq_unmask = its_unmask_irq,
- .irq_eoi = its_eoi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_set_affinity,
.irq_compose_msi_msg = its_irq_compose_msi_msg,
};
@@ -846,7 +844,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
u64 type = GITS_BASER_TYPE(val);
u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
int order = get_order(psz);
- int alloc_size;
int alloc_pages;
u64 tmp;
void *base;
@@ -878,9 +875,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
}
}
- alloc_size = (1 << order) * PAGE_SIZE;
retry_alloc_baser:
- alloc_pages = (alloc_size / psz);
+ alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
alloc_pages = GITS_BASER_PAGES_MAX;
order = get_order(GITS_BASER_PAGES_MAX * psz);
@@ -933,7 +929,7 @@ retry_baser:
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
- __flush_dcache_area(base, alloc_size);
+ __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
@@ -966,7 +962,7 @@ retry_baser:
}
pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
- (int)(alloc_size / entry_size),
+ (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5df40480228b..dd834927bc66 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
if (clone)
free_rq_clone(clone);
+ else if (!tio->md->queue->mq_ops)
+ free_rq_tio(tio);
}
/*
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 7e9cbf757e95..fb7ed730d932 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -497,7 +497,7 @@ static int adp1653_probe(struct i2c_client *client,
if (!client->dev.platform_data) {
dev_err(&client->dev,
"Neither DT not platform data provided\n");
- return EINVAL;
+ return -EINVAL;
}
flash->platform_data = client->dev.platform_data;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index f8dd7505b529..e1719ffdfb3d 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
}
/* tx 5v detect */
- tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
+ tx_5v = irq_reg_0x70 & info->cable_det_mask;
if (tx_5v) {
v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
- io_write(sd, 0x71, tx_5v);
adv76xx_s_detect_tx_5v_ctrl(sd);
if (handled)
*handled = true;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 8c54fd21022e..a13625722848 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -1843,8 +1843,7 @@ static void au0828_analog_create_entities(struct au0828_dev *dev)
ent->function = MEDIA_ENT_F_CONN_RF;
break;
default: /* AU0828_VMUX_DEBUG */
- ent->function = MEDIA_ENT_F_CONN_TEST;
- break;
+ continue;
}
ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 4c1903f781fc..0c6c17a1c59e 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -415,7 +415,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
delta = mftb() - psl_tb;
if (delta < 0)
delta = -delta;
- } while (cputime_to_usecs(delta) > 16);
+ } while (tb_to_ns(delta) > 16000);
return 0;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b6639ea0bf18..f6e4d9718035 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2232,6 +2232,7 @@ err_irq:
dma_release_channel(host->tx_chan);
if (host->rx_chan)
dma_release_channel(host->rx_chan);
+ pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
if (host->dbclk)
@@ -2253,6 +2254,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
dma_release_channel(host->tx_chan);
dma_release_channel(host->rx_chan);
+ pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 2a1b6e037e1a..0134ba32a057 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
- vol->upd_buf = vmalloc(req->bytes);
+ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
if (!vol->upd_buf)
return -ENOMEM;
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index bc46be39549d..ad3d2e0cb191 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -904,6 +904,8 @@ static const struct of_device_id rcar_can_of_table[] __maybe_unused = {
{ .compatible = "renesas,can-r8a7779" },
{ .compatible = "renesas,can-r8a7790" },
{ .compatible = "renesas,can-r8a7791" },
+ { .compatible = "renesas,rcar-gen1-can" },
+ { .compatible = "renesas,rcar-gen2-can" },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_can_of_table);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 575790e8a75a..74a7dfecee27 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -843,7 +843,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (clear_intf)
mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
- if (eflag)
+ if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR))
mcp251x_write_bits(spi, EFLG, eflag, 0x00);
/* Update can state */
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index eb7192fab593..3400fd1cada7 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -281,11 +281,9 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
switch (urb->status) {
case 0:
dev->free_slots = dev->intr_in_buffer[1];
- if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
- if (netif_queue_stopped(netdev)){
- netif_wake_queue(netdev);
- }
- }
+ if (dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH &&
+ netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
break;
case -ECONNRESET: /* unlink */
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 5eee62badf45..cbc99d5649af 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
static void gs_destroy_candev(struct gs_can *dev)
{
unregister_candev(dev->netdev);
- free_candev(dev->netdev);
usb_kill_anchored_urbs(&dev->tx_submitted);
- kfree(dev);
+ free_candev(dev->netdev);
}
static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
for (i = 0; i < icount; i++) {
dev->canch[i] = gs_make_candev(i, intf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
+ /* save error code to return later */
+ rc = PTR_ERR(dev->canch[i]);
+
/* on failure destroy previously created candevs */
icount = i;
- for (i = 0; i < icount; i++) {
+ for (i = 0; i < icount; i++)
gs_destroy_candev(dev->canch[i]);
- dev->canch[i] = NULL;
- }
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dev);
return rc;
}
@@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return;
}
- for (i = 0; i < GS_MAX_INTF; i++) {
- struct gs_can *can = dev->canch[i];
-
- if (!can)
- continue;
-
- gs_destroy_candev(can);
- }
+ for (i = 0; i < GS_MAX_INTF; i++)
+ if (dev->canch[i])
+ gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
+ kfree(dev);
}
static const struct usb_device_id gs_usb_table[] = {
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index dd1ebaf48077..d72ccbdf53ec 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -106,6 +106,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.port_join_bridge = mv88e6xxx_port_bridge_join,
.port_leave_bridge = mv88e6xxx_port_bridge_leave,
.port_stp_update = mv88e6xxx_port_stp_update,
+ .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index bbca36ac4f77..a41fa5043d77 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -327,6 +327,7 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
.port_join_bridge = mv88e6xxx_port_bridge_join,
.port_leave_bridge = mv88e6xxx_port_bridge_leave,
.port_stp_update = mv88e6xxx_port_stp_update,
+ .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index d98dc635b00b..d11c9d58cf10 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1087,12 +1087,32 @@ abort:
return ret;
}
-static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch *ds, int port,
- u16 output_ports)
+static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct net_device *bridge = ps->ports[port].bridge_dev;
const u16 mask = (1 << ps->num_ports) - 1;
+ u16 output_ports = 0;
int reg;
+ int i;
+
+ /* allow CPU port or DSA link(s) to send frames to every port */
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ output_ports = mask;
+ } else {
+ for (i = 0; i < ps->num_ports; ++i) {
+ /* allow sending frames to every group member */
+ if (bridge && ps->ports[i].bridge_dev == bridge)
+ output_ports |= BIT(i);
+
+ /* allow sending frames to CPU port and DSA link(s) */
+ if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
+ output_ports |= BIT(i);
+ }
+ }
+
+ /* prevent frames from going back out of the port they came in on */
+ output_ports &= ~BIT(port);
reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
if (reg < 0)
@@ -1458,16 +1478,122 @@ loadpurge:
return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
-static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
- struct mv88e6xxx_vtu_stu_entry *entry)
+static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
+ u16 *old)
+{
+ u16 fid;
+ int ret;
+
+ /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+ if (ret < 0)
+ return ret;
+
+ fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
+
+ if (new) {
+ ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
+ ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN,
+ ret);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1);
+ if (ret < 0)
+ return ret;
+
+ fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4;
+
+ if (new) {
+ ret &= ~PORT_CONTROL_1_FID_11_4_MASK;
+ ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
+ ret);
+ if (ret < 0)
+ return ret;
+
+ netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
+ }
+
+ if (old)
+ *old = fid;
+
+ return 0;
+}
+
+static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid)
+{
+ return _mv88e6xxx_port_fid(ds, port, NULL, fid);
+}
+
+static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid)
+{
+ return _mv88e6xxx_port_fid(ds, port, &fid, NULL);
+}
+
+static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int i, err;
+
+ bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
+
+ /* Set every FID bit used by the (un)bridged ports */
+ for (i = 0; i < ps->num_ports; ++i) {
+ err = _mv88e6xxx_port_fid_get(ds, i, fid);
+ if (err)
+ return err;
+
+ set_bit(*fid, fid_bitmap);
+ }
+
+ /* Set every FID bit used by the VLAN entries */
+ err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+ if (err)
+ return err;
+
+ do {
+ err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ if (err)
+ return err;
+
+ if (!vlan.valid)
+ break;
+
+ set_bit(vlan.fid, fid_bitmap);
+ } while (vlan.vid < GLOBAL_VTU_VID_MASK);
+
+ /* The reset value 0x000 is used to indicate that multiple address
+ * databases are not needed. Return the next positive available.
+ */
+ *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
+ if (unlikely(*fid == MV88E6XXX_N_FID))
+ return -ENOSPC;
+
+ /* Clear the database */
+ return _mv88e6xxx_atu_flush(ds, *fid, true);
+}
+
+static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
+ struct mv88e6xxx_vtu_stu_entry *entry)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_vtu_stu_entry vlan = {
.valid = true,
.vid = vid,
- .fid = vid, /* We use one FID per VLAN */
};
- int i;
+ int i, err;
+
+ err = _mv88e6xxx_fid_new(ds, &vlan.fid);
+ if (err)
+ return err;
/* exclude all ports except the CPU and DSA ports */
for (i = 0; i < ps->num_ports; ++i)
@@ -1478,7 +1604,6 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
struct mv88e6xxx_vtu_stu_entry vstp;
- int err;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
* implemented, only one STU entry is needed to cover all VTU
@@ -1498,17 +1623,41 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
if (err)
return err;
}
-
- /* Clear all MAC addresses from the new database */
- err = _mv88e6xxx_atu_flush(ds, vlan.fid, true);
- if (err)
- return err;
}
*entry = vlan;
return 0;
}
+static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
+ struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
+{
+ int err;
+
+ if (!vid)
+ return -EINVAL;
+
+ err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+ if (err)
+ return err;
+
+ err = _mv88e6xxx_vtu_getnext(ds, entry);
+ if (err)
+ return err;
+
+ if (entry->vid != vid || !entry->valid) {
+ if (!creat)
+ return -EOPNOTSUPP;
+ /* -ENOENT would've been more appropriate, but switchdev expects
+ * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
+ */
+
+ err = _mv88e6xxx_vtu_new(ds, vid, entry);
+ }
+
+ return err;
+}
+
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
@@ -1563,16 +1712,51 @@ unlock:
return err;
}
+static const char * const mv88e6xxx_port_8021q_mode_names[] = {
+ [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
+ [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
+ [PORT_CONTROL_2_8021Q_CHECK] = "Check",
+ [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
+};
+
+int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
+ PORT_CONTROL_2_8021Q_DISABLED;
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2);
+ if (ret < 0)
+ goto unlock;
+
+ old = ret & PORT_CONTROL_2_8021Q_MASK;
+
+ ret &= ~PORT_CONTROL_2_8021Q_MASK;
+ ret |= new & PORT_CONTROL_2_8021Q_MASK;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2, ret);
+ if (ret < 0)
+ goto unlock;
+
+ netdev_dbg(ds->ports[port], "802.1Q Mode: %s (was %s)\n",
+ mv88e6xxx_port_8021q_mode_names[new],
+ mv88e6xxx_port_8021q_mode_names[old]);
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
{
int err;
- /* We reserve a few VLANs to isolate unbridged ports */
- if (vlan->vid_end >= 4000)
- return -EOPNOTSUPP;
-
/* If the requested port doesn't belong to the same bridge as the VLAN
* members, do not support it (yet) and fallback to software VLAN.
*/
@@ -1593,20 +1777,10 @@ static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
struct mv88e6xxx_vtu_stu_entry vlan;
int err;
- err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
- if (err)
- return err;
-
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true);
if (err)
return err;
- if (vlan.vid != vid || !vlan.valid) {
- err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
- if (err)
- return err;
- }
-
vlan.data[port] = untagged ?
GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
@@ -1647,16 +1821,12 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
struct mv88e6xxx_vtu_stu_entry vlan;
int i, err;
- err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+ err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
if (err)
return err;
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
- if (err)
- return err;
-
- if (vlan.vid != vid || !vlan.valid ||
- vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ /* Tell switchdev if this VLAN is handled in software */
+ if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
return -EOPNOTSUPP;
vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
@@ -1684,7 +1854,6 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- const u16 defpvid = 4000 + ds->index * DSA_MAX_PORTS + port;
u16 pvid, vid;
int err = 0;
@@ -1700,8 +1869,7 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
goto unlock;
if (vid == pvid) {
- /* restore reserved VLAN ID */
- err = _mv88e6xxx_port_pvid_set(ds, port, defpvid);
+ err = _mv88e6xxx_port_pvid_set(ds, port, 0);
if (err)
goto unlock;
}
@@ -1774,8 +1942,18 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
u8 state)
{
struct mv88e6xxx_atu_entry entry = { 0 };
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int err;
- entry.fid = vid; /* We use one FID per VLAN */
+ /* Null VLAN ID corresponds to the port private database */
+ if (vid == 0)
+ err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid);
+ else
+ err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+ if (err)
+ return err;
+
+ entry.fid = vlan.fid;
entry.state = state;
ether_addr_copy(entry.mac, addr);
if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
@@ -1790,10 +1968,6 @@ int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
{
- /* We don't use per-port FDB */
- if (fdb->vid == 0)
- return -EOPNOTSUPP;
-
/* We don't need any dynamic resource from the kernel (yet),
* so skip the prepare phase.
*/
@@ -1880,6 +2054,47 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
return 0;
}
+static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
+ int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct mv88e6xxx_atu_entry addr = {
+ .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ };
+ int err;
+
+ err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+ if (err)
+ return err;
+
+ do {
+ err = _mv88e6xxx_atu_getnext(ds, fid, &addr);
+ if (err)
+ break;
+
+ if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ break;
+
+ if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
+ bool is_static = addr.state ==
+ (is_multicast_ether_addr(addr.mac) ?
+ GLOBAL_ATU_DATA_STATE_MC_STATIC :
+ GLOBAL_ATU_DATA_STATE_UC_STATIC);
+
+ fdb->vid = vid;
+ ether_addr_copy(fdb->addr, addr.mac);
+ fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ err = cb(&fdb->obj);
+ if (err)
+ break;
+ }
+ } while (!is_broadcast_ether_addr(addr.mac));
+
+ return err;
+}
+
int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj))
@@ -1888,55 +2103,37 @@ int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
struct mv88e6xxx_vtu_stu_entry vlan = {
.vid = GLOBAL_VTU_VID_MASK, /* all ones */
};
+ u16 fid;
int err;
mutex_lock(&ps->smi_mutex);
+ /* Dump port's default Filtering Information Database (VLAN ID 0) */
+ err = _mv88e6xxx_port_fid_get(ds, port, &fid);
+ if (err)
+ goto unlock;
+
+ err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb);
+ if (err)
+ goto unlock;
+
+ /* Dump VLANs' Filtering Information Databases */
err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
if (err)
goto unlock;
do {
- struct mv88e6xxx_atu_entry addr = {
- .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- };
-
err = _mv88e6xxx_vtu_getnext(ds, &vlan);
if (err)
- goto unlock;
+ break;
if (!vlan.valid)
break;
- err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+ err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port,
+ fdb, cb);
if (err)
- goto unlock;
-
- do {
- err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr);
- if (err)
- goto unlock;
-
- if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
- break;
-
- if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
- bool is_static = addr.state ==
- (is_multicast_ether_addr(addr.mac) ?
- GLOBAL_ATU_DATA_STATE_MC_STATIC :
- GLOBAL_ATU_DATA_STATE_UC_STATIC);
-
- fdb->vid = vlan.vid;
- ether_addr_copy(fdb->addr, addr.mac);
- fdb->ndm_state = is_static ? NUD_NOARP :
- NUD_REACHABLE;
-
- err = cb(&fdb->obj);
- if (err)
- goto unlock;
- }
- } while (!is_broadcast_ether_addr(addr.mac));
-
+ break;
} while (vlan.vid < GLOBAL_VTU_VID_MASK);
unlock:
@@ -1949,32 +2146,76 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u16 fid;
+ int i, err;
+
+ mutex_lock(&ps->smi_mutex);
+
+ /* Get or create the bridge FID and assign it to the port */
+ for (i = 0; i < ps->num_ports; ++i)
+ if (ps->ports[i].bridge_dev == bridge)
+ break;
+
+ if (i < ps->num_ports)
+ err = _mv88e6xxx_port_fid_get(ds, i, &fid);
+ else
+ err = _mv88e6xxx_fid_new(ds, &fid);
+ if (err)
+ goto unlock;
+
+ err = _mv88e6xxx_port_fid_set(ds, port, fid);
+ if (err)
+ goto unlock;
+ /* Assign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = bridge;
- return 0;
+ for (i = 0; i < ps->num_ports; ++i) {
+ if (ps->ports[i].bridge_dev == bridge) {
+ err = _mv88e6xxx_port_based_vlan_map(ds, i);
+ if (err)
+ break;
+ }
+ }
+
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return err;
}
int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct net_device *bridge = ps->ports[port].bridge_dev;
+ u16 fid;
+ int i, err;
- ps->ports[port].bridge_dev = NULL;
+ mutex_lock(&ps->smi_mutex);
- return 0;
-}
+ /* Give the port a fresh Filtering Information Database */
+ err = _mv88e6xxx_fid_new(ds, &fid);
+ if (err)
+ goto unlock;
-static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
- int err;
+ err = _mv88e6xxx_port_fid_set(ds, port, fid);
+ if (err)
+ goto unlock;
- mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
- if (!err)
- err = _mv88e6xxx_port_pvid_set(ds, port, pvid);
+ /* Unassign the bridge and remap each port's VLANTable */
+ ps->ports[port].bridge_dev = NULL;
+
+ for (i = 0; i < ps->num_ports; ++i) {
+ if (i == port || ps->ports[i].bridge_dev == bridge) {
+ err = _mv88e6xxx_port_based_vlan_map(ds, i);
+ if (err)
+ break;
+ }
+ }
+
+unlock:
mutex_unlock(&ps->smi_mutex);
+
return err;
}
@@ -2098,7 +2339,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
}
/* Port Control 2: don't force a good FCS, set the maximum frame size to
- * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
+ * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
* untagged frames on this port, do a destination address lookup on all
* received packets as usual, disable ARP mirroring and don't send a
* copy of all transmitted/received frames on this port to the CPU.
@@ -2123,7 +2364,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
}
- reg |= PORT_CONTROL_2_8021Q_SECURE;
+ reg |= PORT_CONTROL_2_8021Q_DISABLED;
if (reg) {
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
@@ -2220,12 +2461,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (ret)
goto abort;
- /* Port based VLAN map: do not give each port its own address
- * database, and allow every port to egress frames on all other ports.
+ /* Port based VLAN map: give each port its own address
+ * database, and allow bidirectional communication between the
+ * CPU and DSA port(s), and the other ports.
*/
- reg = BIT(ps->num_ports) - 1; /* all ports */
- reg &= ~BIT(port); /* except itself */
- ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg);
+ ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
+ if (ret)
+ goto abort;
+
+ ret = _mv88e6xxx_port_based_vlan_map(ds, port);
if (ret)
goto abort;
@@ -2249,13 +2493,6 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds)
ret = mv88e6xxx_setup_port(ds, i);
if (ret < 0)
return ret;
-
- if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
- continue;
-
- ret = mv88e6xxx_setup_port_default_vlan(ds, i);
- if (ret < 0)
- return ret;
}
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 6a30bda63a2f..d7b088dd8e16 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -133,7 +133,9 @@
#define PORT_CONTROL_STATE_LEARNING 0x02
#define PORT_CONTROL_STATE_FORWARDING 0x03
#define PORT_CONTROL_1 0x05
+#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0)
#define PORT_BASE_VLAN 0x06
+#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12)
#define PORT_DEFAULT_VLAN 0x07
#define PORT_DEFAULT_VLAN_MASK 0xfff
#define PORT_CONTROL_2 0x08
@@ -355,6 +357,8 @@
#define GLOBAL2_QOS_WEIGHT 0x1c
#define GLOBAL2_MISC 0x1d
+#define MV88E6XXX_N_FID 4096
+
struct mv88e6xxx_switch_id {
u16 id;
char *name;
@@ -486,6 +490,8 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge);
int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port);
int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
+int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering);
int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 7b881edc0b0a..d81fceddbe0e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2455,7 +2455,7 @@ boomerang_interrupt(int irq, void *dev_id)
int i;
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
- le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+ le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
PCI_DMA_TODEVICE);
for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 17472851674f..f749e4d389eb 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev)
priv->mdio->id);
mdiobus_unregister(priv->mdio);
- kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
priv->mdio = NULL;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 33606840ae15..ebf9224b2d31 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1632,7 +1632,7 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
u8 tc;
- if (handle != TC_H_ROOT || tc_to_netdev->type != TC_SETUP_MQPRIO)
+ if (tc_to_netdev->type != TC_SETUP_MQPRIO)
return -EINVAL;
tc = tc_to_netdev->tc;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 8b5988e210d5..d0084d4d1a9b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -65,10 +65,6 @@ static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
static int atl1c_configure(struct atl1c_adapter *adapter);
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter);
-static const u16 atl1c_pay_load_size[] = {
- 128, 256, 512, 1024, 2048, 4096,
-};
-
static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index f71ab2647a3b..08a23e6b60e9 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (of_phy_is_fixed_link(pdev->dev.of_node)) {
+ ret = of_phy_register_fixed_link(pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "bad fixed-link spec\n");
+ goto err_free_bus;
+ }
+ priv->phy_node = of_node_get(pdev->dev.of_node);
+ }
+
+ if (!priv->phy_node)
+ priv->phy_node = of_parse_phandle(pdev->dev.of_node,
+ "phy-handle", 0);
+
if (!priv->phy_node) {
dev_err(&pdev->dev, "no PHY specified\n");
ret = -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 45843d150868..a949783c8fc3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4275,7 +4275,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
{
- if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
return bnx2x_setup_tc(dev, tc->tc);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 04523d41b873..f8b810313094 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -4901,9 +4901,9 @@ struct c2s_pri_trans_table_entry {
* cfc delete event data
*/
struct cfc_del_event_data {
- u32 cid;
- u32 reserved0;
- u32 reserved1;
+ __le32 cid;
+ __le32 reserved0;
+ __le32 reserved1;
};
@@ -5119,15 +5119,9 @@ struct vf_pf_channel_zone_trigger {
* zone that triggers the in-bound interrupt
*/
struct trigger_vf_zone {
-#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u8 reserved0;
- struct vf_pf_channel_zone_trigger vf_pf_channel;
-#elif defined(__LITTLE_ENDIAN)
struct vf_pf_channel_zone_trigger vf_pf_channel;
u8 reserved0;
u16 reserved1;
-#endif
u32 reserved2;
};
@@ -5212,9 +5206,9 @@ struct e2_integ_data {
* set mac event data
*/
struct eth_event_data {
- u32 echo;
- u32 reserved0;
- u32 reserved1;
+ __le32 echo;
+ __le32 reserved0;
+ __le32 reserved1;
};
@@ -5224,9 +5218,9 @@ struct eth_event_data {
struct vf_pf_event_data {
u8 vf_id;
u8 reserved0;
- u16 reserved1;
- u32 msg_addr_lo;
- u32 msg_addr_hi;
+ __le16 reserved1;
+ __le32 msg_addr_lo;
+ __le32 msg_addr_hi;
};
/*
@@ -5235,9 +5229,9 @@ struct vf_pf_event_data {
struct vf_flr_event_data {
u8 vf_id;
u8 reserved0;
- u16 reserved1;
- u32 reserved2;
- u32 reserved3;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
};
/*
@@ -5246,9 +5240,9 @@ struct vf_flr_event_data {
struct malicious_vf_event_data {
u8 vf_id;
u8 err_id;
- u16 reserved1;
- u32 reserved2;
- u32 reserved3;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
};
/*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5c95d0c3b076..b597c32275aa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -5282,14 +5282,14 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
{
unsigned long ramrod_flags = 0;
int rc = 0;
- u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+ u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
+ u32 cid = echo & BNX2X_SWCID_MASK;
struct bnx2x_vlan_mac_obj *vlan_mac_obj;
/* Always push next commands out, don't wait here */
__set_bit(RAMROD_CONT, &ramrod_flags);
- switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
- >> BNX2X_SWCID_SHIFT) {
+ switch (echo >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -5310,8 +5310,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
bnx2x_handle_mcast_eqe(bp);
return;
default:
- BNX2X_ERR("Unsupported classification command: %d\n",
- elem->message.data.eth_event.echo);
+ BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
return;
}
@@ -5480,9 +5479,6 @@ static void bnx2x_eq_int(struct bnx2x *bp)
goto next_spqe;
}
- /* elem CID originates from FW; actually LE */
- cid = SW_CID((__force __le32)
- elem->message.data.cfc_del_event.cid);
opcode = elem->message.opcode;
/* handle eq element */
@@ -5505,6 +5501,10 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* we may want to verify here that the bp state is
* HALTING
*/
+
+ /* elem CID originates from FW; actually LE */
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
+
DP(BNX2X_MSG_SP,
"got delete ramrod for MULTI[%d]\n", cid);
@@ -5598,10 +5598,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
BNX2X_STATE_OPENING_WAIT4_PORT):
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
BNX2X_STATE_CLOSING_WAIT4_HALT):
- cid = elem->message.data.eth_event.echo &
- BNX2X_SWCID_MASK;
DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
- cid);
+ SW_CID(elem->message.data.eth_event.echo));
rss_raw->clear_pending(rss_raw);
break;
@@ -5686,7 +5684,7 @@ static void bnx2x_sp_task(struct work_struct *work)
if (status & BNX2X_DEF_SB_IDX) {
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
- if (FCOE_INIT(bp) &&
+ if (FCOE_INIT(bp) &&
(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
/* Prevent local bottom-halves from running as
* we are going to change the local NAPI list.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9d027348cd09..632daff117d3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1672,11 +1672,12 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
{
unsigned long ramrod_flags = 0;
int rc = 0;
+ u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
/* Always push next commands out, don't wait here */
set_bit(RAMROD_CONT, &ramrod_flags);
- switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ switch (echo >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
&ramrod_flags);
@@ -1686,8 +1687,7 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
&ramrod_flags);
break;
default:
- BNX2X_ERR("Unsupported classification command: %d\n",
- elem->message.data.eth_event.echo);
+ BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
return;
}
if (rc < 0)
@@ -1747,16 +1747,14 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
switch (opcode) {
case EVENT_RING_OPCODE_CFC_DEL:
- cid = SW_CID((__force __le32)
- elem->message.data.cfc_del_event.cid);
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
break;
case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
case EVENT_RING_OPCODE_MULTICAST_RULES:
case EVENT_RING_OPCODE_FILTERS_RULES:
case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
- cid = (elem->message.data.eth_event.echo &
- BNX2X_SWCID_MASK);
+ cid = SW_CID(elem->message.data.eth_event.echo);
DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
break;
case EVENT_RING_OPCODE_VF_FLR:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 1374e5394a79..bfae300cf25f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -2187,8 +2187,10 @@ void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
/* Update VFDB with current message and schedule its handling */
mutex_lock(&BP_VFDB(bp)->event_mutex);
- BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
- BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
+ BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
+ le32_to_cpu(vfpf_event->msg_addr_hi);
+ BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
+ le32_to_cpu(vfpf_event->msg_addr_lo);
BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
mutex_unlock(&BP_VFDB(bp)->event_mutex);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ff1507f3e226..4dfc25f042e8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -248,7 +248,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
- end = PTR_ALIGN(pdata + length + 1, 8) - 1;
+ end = pdata + length;
+ end = PTR_ALIGN(end, 8) - 1;
*end = 0;
skb_copy_from_linear_data(skb, pdata, len);
@@ -1239,13 +1240,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
switch (event_id) {
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
- break;
+ goto async_event_process_exit;
}
+ schedule_work(&bp->sp_task);
+async_event_process_exit:
return 0;
}
@@ -2596,28 +2601,27 @@ alloc_mem_err:
void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
u16 cmpl_ring, u16 target_id)
{
- struct hwrm_cmd_req_hdr *req = request;
+ struct input *req = request;
- req->cmpl_ring_req_type =
- cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
- req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
+ req->req_type = cpu_to_le16(req_type);
+ req->cmpl_ring = cpu_to_le16(cmpl_ring);
+ req->target_id = cpu_to_le16(target_id);
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
-int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
+ int timeout, bool silent)
{
int i, intr_process, rc;
- struct hwrm_cmd_req_hdr *req = msg;
+ struct input *req = msg;
u32 *data = msg;
__le32 *resp_len, *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
- req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
+ req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
- cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
- HWRM_CMPL_RING_MASK) >>
- HWRM_CMPL_RING_SFT;
+ cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
/* Write request msg to hwrm channel */
@@ -2628,12 +2632,14 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
/* currently supports only one outstanding message */
if (intr_process)
- bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
- HWRM_SEQ_ID_MASK;
+ bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
/* Ring channel doorbell */
writel(1, bp->bar0 + 0x100);
+ if (!timeout)
+ timeout = DFLT_HWRM_CMD_TIMEOUT;
+
i = 0;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
@@ -2644,7 +2650,7 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- req->cmpl_ring_req_type);
+ le16_to_cpu(req->req_type));
return -1;
}
} else {
@@ -2660,8 +2666,8 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
if (i >= timeout) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
- timeout, req->cmpl_ring_req_type,
- req->target_id_seq_id, *resp_len);
+ timeout, le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), *resp_len);
return -1;
}
@@ -2675,20 +2681,23 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
if (i >= timeout) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
- timeout, req->cmpl_ring_req_type,
- req->target_id_seq_id, len, *valid);
+ timeout, le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len, *valid);
return -1;
}
}
rc = le16_to_cpu(resp->error_code);
- if (rc) {
+ if (rc && !silent)
netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
le16_to_cpu(resp->req_type),
le16_to_cpu(resp->seq_id), rc);
- return rc;
- }
- return 0;
+ return rc;
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
}
int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
@@ -2701,6 +2710,17 @@ int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
return rc;
}
+int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
+ int timeout)
+{
+ int rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_input req = {0};
@@ -3517,47 +3537,82 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
+ u32 buf_tmrs, u16 flags,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ req->flags = cpu_to_le16(flags);
+ req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
+ req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
+ req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
+ req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
+ /* Minimum time between 2 interrupts set to buf_tmr x 2 */
+ req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
+ req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
+ req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
int i, rc = 0;
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
+ req_tx = {0}, *req;
u16 max_buf, max_buf_irq;
u16 buf_tmr, buf_tmr_irq;
u32 flags;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
- -1, -1);
+ bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+ bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- /* Each rx completion (2 records) should be DMAed immediately */
- max_buf = min_t(u16, bp->coal_bufs / 4, 2);
+ /* Each rx completion (2 records) should be DMAed immediately.
+ * DMA 1/4 of the completion buffers at a time.
+ */
+ max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
/* max_buf must not be zero */
max_buf = clamp_t(u16, max_buf, 1, 63);
- max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
- buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
- buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
+ max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
+ buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
+ /* buf timer set to 1/4 of interrupt timer */
+ buf_tmr = max_t(u16, buf_tmr / 4, 1);
+ buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
+ buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
/* RING_IDLE generates more IRQs for lower latency. Enable it only
* if coal_ticks is less than 25 us.
*/
- if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
+ if (bp->rx_coal_ticks < 25)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
- req.flags = cpu_to_le16(flags);
- req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
- req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
- req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
- req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
- req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
- req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
- req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
+ bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
+ buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
+
+ /* max_buf must not be zero */
+ max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
+ max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
+ buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
+ /* buf timer set to 1/4 of interrupt timer */
+ buf_tmr = max_t(u16, buf_tmr / 4, 1);
+ buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
+ buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
+
+ flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
+ buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
- req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+ struct bnxt_napi *bnapi = bp->bnapi[i];
- rc = _hwrm_send_message(bp, &req, sizeof(req),
+ req = &req_rx;
+ if (!bnapi->rx_ring)
+ req = &req_tx;
+ req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+ rc = _hwrm_send_message(bp, req, sizeof(*req),
HWRM_CMD_TIMEOUT);
if (rc)
break;
@@ -3766,10 +3821,14 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_upd);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
- snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
+ snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+ bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
+ if (!bp->hwrm_cmd_timeout)
+ bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -5291,10 +5350,16 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
- bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
- bp->coal_bufs = 20;
- bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
- bp->coal_bufs_irq = 2;
+ /* tick values in micro seconds */
+ bp->rx_coal_ticks = 12;
+ bp->rx_coal_bufs = 30;
+ bp->rx_coal_ticks_irq = 1;
+ bp->rx_coal_bufs_irq = 2;
+
+ bp->tx_coal_ticks = 25;
+ bp->tx_coal_bufs = 30;
+ bp->tx_coal_ticks_irq = 2;
+ bp->tx_coal_bufs_irq = 2;
init_timer(&bp->timer);
bp->timer.data = (unsigned long)bp;
@@ -5383,7 +5448,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct bnxt *bp = netdev_priv(dev);
u8 tc;
- if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO)
+ if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
tc = ntc->tc;
@@ -5559,6 +5624,8 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
}
+ if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+ netdev_info(bp->dev, "Receive PF driver unload event!");
}
#else
@@ -5674,7 +5741,6 @@ static int bnxt_probe_phy(struct bnxt *bp)
{
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
- char phy_ver[PHY_VER_STR_LEN];
rc = bnxt_update_link(bp, false);
if (rc) {
@@ -5694,11 +5760,6 @@ static int bnxt_probe_phy(struct bnxt *bp)
link_info->req_duplex = link_info->duplex_setting;
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
- snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
- link_info->phy_ver[0],
- link_info->phy_ver[1],
- link_info->phy_ver[2]);
- strcat(bp->fw_ver_str, phy_ver);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2be51b332652..9aa38f57601b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -477,12 +477,15 @@ struct rx_tpa_end_cmp_ext {
#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
-#define HWRM_CMD_TIMEOUT 500
+#define DFLT_HWRM_CMD_TIMEOUT 500
+#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_RESP_ERR_CODE_MASK 0xffff
+#define HWRM_RESP_LEN_OFFSET 4
#define HWRM_RESP_LEN_MASK 0xffff0000
#define HWRM_RESP_LEN_SFT 16
#define HWRM_RESP_VALID_MASK 0xff000000
+#define HWRM_SEQ_ID_INVALID -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
@@ -644,19 +647,6 @@ struct bnxt_irq {
#define INVALID_STATS_CTX_ID -1
-struct hwrm_cmd_req_hdr {
-#define HWRM_CMPL_RING_MASK 0xffff0000
-#define HWRM_CMPL_RING_SFT 16
- __le32 cmpl_ring_req_type;
-#define HWRM_SEQ_ID_MASK 0xffff
-#define HWRM_SEQ_ID_INVALID -1
-#define HWRM_RESP_LEN_OFFSET 4
-#define HWRM_TARGET_FID_MASK 0xffff0000
-#define HWRM_TARGET_FID_SFT 16
- __le32 target_id_seq_id;
- __le64 resp_addr;
-};
-
struct bnxt_ring_grp_info {
u16 fw_stats_ctx;
u16 fw_grp_id;
@@ -957,6 +947,7 @@ struct bnxt {
void *hwrm_dbg_resp_addr;
dma_addr_t hwrm_dbg_resp_dma_addr;
#define HWRM_DBG_REG_BUF_SIZE 128
+ int hwrm_cmd_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
#define FW_VER_STR_LEN 32
@@ -968,13 +959,17 @@ struct bnxt {
__le16 vxlan_fw_dst_port_id;
u8 nge_port_cnt;
__le16 nge_fw_dst_port_id;
- u16 coal_ticks;
- u16 coal_ticks_irq;
- u16 coal_bufs;
- u16 coal_bufs_irq;
+
+ u16 rx_coal_ticks;
+ u16 rx_coal_ticks_irq;
+ u16 rx_coal_bufs;
+ u16 rx_coal_bufs_irq;
+ u16 tx_coal_ticks;
+ u16 tx_coal_ticks_irq;
+ u16 tx_coal_bufs;
+ u16 tx_coal_bufs_irq;
#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
-#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
struct work_struct sp_task;
unsigned long sp_event;
@@ -986,6 +981,7 @@ struct bnxt {
#define BNXT_VXLAN_DEL_PORT_SP_EVENT 5
#define BNXT_RESET_TASK_SP_EVENT 6
#define BNXT_RST_RING_SP_EVENT 7
+#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
@@ -1099,6 +1095,7 @@ void bnxt_set_ring_params(struct bnxt *);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 3238817dfd5f..84ea26d6f3ff 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -7,6 +7,7 @@
* the Free Software Foundation.
*/
+#include <linux/ctype.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -20,6 +21,8 @@
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
+
static u32 bnxt_get_msglevel(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -41,12 +44,16 @@ static int bnxt_get_coalesce(struct net_device *dev,
memset(coal, 0, sizeof(*coal));
- coal->rx_coalesce_usecs =
- max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
- coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
- coal->rx_coalesce_usecs_irq =
- max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
- coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
+ coal->rx_coalesce_usecs = bp->rx_coal_ticks;
+ /* 2 completion records per rx packet */
+ coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2;
+ coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq;
+ coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2;
+
+ coal->tx_coalesce_usecs = bp->tx_coal_ticks;
+ coal->tx_max_coalesced_frames = bp->tx_coal_bufs;
+ coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
+ coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
return 0;
}
@@ -57,11 +64,16 @@ static int bnxt_set_coalesce(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
- bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
- bp->coal_ticks_irq =
- BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
- bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+ bp->rx_coal_ticks = coal->rx_coalesce_usecs;
+ /* 2 completion records per rx packet */
+ bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2;
+ bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq;
+ bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+ bp->tx_coal_ticks = coal->tx_coalesce_usecs;
+ bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
+ bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
+ bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
if (netif_running(dev))
rc = bnxt_hwrm_set_coal(bp);
@@ -460,10 +472,20 @@ static void bnxt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnxt *bp = netdev_priv(dev);
+ char *pkglog;
+ char *pkgver = NULL;
+ pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
+ if (pkglog)
+ pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ if (pkgver && *pkgver != 0 && isdigit(*pkgver))
+ snprintf(info->fw_version, sizeof(info->fw_version) - 1,
+ "%s pkg %s", bp->fw_ver_str, pkgver);
+ else
+ strlcpy(info->fw_version, bp->fw_ver_str,
+ sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
info->testinfo_len = BNXT_NUM_TESTS(bp);
@@ -471,6 +493,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->eedump_len = 0;
/* TODO CHIMP FW: reg dump details */
info->regdump_len = 0;
+ kfree(pkglog);
}
static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
@@ -1102,6 +1125,85 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
return rc;
}
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_find_dir_entry_input req = {0};
+ struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
+ req.enables = 0;
+ req.dir_idx = 0;
+ req.dir_type = cpu_to_le16(type);
+ req.dir_ordinal = cpu_to_le16(ordinal);
+ req.dir_ext = cpu_to_le16(ext);
+ req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0) {
+ if (index)
+ *index = le16_to_cpu(output->dir_idx);
+ if (item_length)
+ *item_length = le32_to_cpu(output->dir_item_length);
+ if (data_length)
+ *data_length = le32_to_cpu(output->dir_data_length);
+ }
+ return rc;
+}
+
+static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
+{
+ char *retval = NULL;
+ char *p;
+ char *value;
+ int field = 0;
+
+ if (datalen < 1)
+ return NULL;
+ /* null-terminate the log data (removing last '\n'): */
+ data[datalen - 1] = 0;
+ for (p = data; *p != 0; p++) {
+ field = 0;
+ retval = NULL;
+ while (*p != 0 && *p != '\n') {
+ value = p;
+ while (*p != 0 && *p != '\t' && *p != '\n')
+ p++;
+ if (field == desired_field)
+ retval = value;
+ if (*p != '\t')
+ break;
+ *p = 0;
+ field++;
+ p++;
+ }
+ if (*p == 0)
+ break;
+ *p = 0;
+ }
+ return retval;
+}
+
+static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
+{
+ u16 index = 0;
+ u32 datalen;
+
+ if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &datalen) != 0)
+ return NULL;
+
+ memset(buf, 0, buflen);
+ if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
+ return NULL;
+
+ return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
+ datalen);
+}
+
static int bnxt_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom,
u8 *data)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 3cf3e1b70b64..43ef392c8588 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -50,10 +50,24 @@ enum bnxt_nvm_directory_type {
#define BNX_DIR_ORDINAL_FIRST 0
+#define BNX_DIR_EXT_NONE 0
#define BNX_DIR_EXT_INACTIVE (1 << 0)
#define BNX_DIR_EXT_UPDATE (1 << 1)
+#define BNX_DIR_ATTR_NONE 0
#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
+#define BNX_PKG_LOG_MAX_LENGTH 4096
+
+enum bnxnvm_pkglog_field_index {
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
+ BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
+ BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2,
+ BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3,
+ BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6
+};
+
#endif /* Don't add anything after this line */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c1cc83d7e38c..0c5f510492f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -522,6 +522,46 @@ err_out1:
return rc;
}
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+ struct bnxt_vf_info *vf,
+ u16 event_id)
+{
+ int rc = 0;
+ struct hwrm_fwd_async_event_cmpl_input req = {0};
+ struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_async_event_cmpl *async_cmpl;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+ if (vf)
+ req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+ else
+ /* broadcast this async event to all VFs */
+ req.encap_async_event_target_id = cpu_to_le16(0xffff);
+ async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+ async_cmpl->type =
+ cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+ async_cmpl->event_id = cpu_to_le16(event_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+ rc);
+ goto fwd_async_event_cmpl_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_async_event_cmpl_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
void bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
@@ -530,6 +570,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
return;
if (pci_vfs_assigned(bp->pdev)) {
+ bnxt_hwrm_fwd_async_event_cmpl(
+ bp, NULL,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
num_vfs);
} else {
@@ -758,8 +801,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
{
int rc = 0;
- struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
- u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
+ struct input *encap_req = vf->hwrm_cmd_req_addr;
+ u32 req_type = le16_to_cpu(encap_req->req_type);
switch (req_type) {
case HWRM_CFA_L2_FILTER_ALLOC:
@@ -809,13 +852,19 @@ void bnxt_update_vf_mac(struct bnxt *bp)
if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
goto update_vf_mac_exit;
- if (!is_valid_ether_addr(resp->perm_mac_address))
- goto update_vf_mac_exit;
-
+ /* Store MAC address from the firmware. There are 2 cases:
+ * 1. MAC address is valid. It is assigned from the PF and we
+ * need to override the current VF MAC address with it.
+ * 2. MAC address is zero. The VF will use a random MAC address by
+ * default but the stored zero MAC will allow the VF user to change
+ * the random MAC address using ndo_set_mac_address() if he wants.
+ */
if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+
+ /* overwrite netdev dev_addr with admin VF MAC */
+ if (is_valid_ether_addr(bp->vf.mac_addr))
+ memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
}
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 04b0d16b210e..95bc470ae441 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -987,7 +987,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
if (!list_empty(&rxf->ucast_pending_add_q)) {
mac = list_first_entry(&rxf->ucast_pending_add_q,
struct bna_mac, qe);
- list_add_tail(&mac->qe, &rxf->ucast_active_q);
+ list_move_tail(&mac->qe, &rxf->ucast_active_q);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
return 1;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 00cc9156abbb..092f097a5943 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -116,6 +116,15 @@
#define NIC_PF_INTR_ID_MBOX0 8
#define NIC_PF_INTR_ID_MBOX1 9
+/* Minimum FIFO level before all packets for the CQ are dropped
+ *
+ * This value ensures that once a packet has been "accepted"
+ * for reception it will not get dropped due to non-availability
+ * of CQ descriptor. An errata in HW mandates this value to be
+ * atleast 0x100.
+ */
+#define NICPF_CQM_MIN_DROP_LEVEL 0x100
+
/* Global timer for CQ timer thresh interrupts
* Calculated for SCLK of 700Mhz
* value written should be a 1/16th of what is expected
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 4dded90076c8..95f17f8cadac 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -304,6 +304,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
static void nic_init_hw(struct nicpf *nic)
{
int i;
+ u64 cqm_cfg;
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
@@ -340,6 +341,11 @@ static void nic_init_hw(struct nicpf *nic)
/* Enable VLAN ethertype matching and stripping */
nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
(2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
+
+ /* Check if HW expected value is higher (could be in future chips) */
+ cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
+ if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
+ nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
}
/* Channel parse index configuration */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index dd536be20193..afb10e326b4f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -21,7 +21,7 @@
#define NIC_PF_TCP_TIMER (0x0060)
#define NIC_PF_BP_CFG (0x0080)
#define NIC_PF_RRM_CFG (0x0088)
-#define NIC_PF_CQM_CF (0x00A0)
+#define NIC_PF_CQM_CFG (0x00A0)
#define NIC_PF_CNM_CF (0x00A8)
#define NIC_PF_CNM_STATUS (0x00B0)
#define NIC_PF_CQ_AVG_CFG (0x00C0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b4eb4680a27c..deca4a2956cc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2226,7 +2226,7 @@ static int process_responses(struct sge_rspq *q, int budget)
budget_left--;
}
- if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
__refill_fl(q->adap, &rxq->fl);
return budget - budget_left;
}
@@ -2611,8 +2611,18 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
FW_IQ_CMD_FL0CONGCIF_F |
FW_IQ_CMD_FL0CONGEN_F);
+ /* In T6, for egress queue type FL there is internal overhead
+ * of 16B for header going into FLM module. Hence the maximum
+ * allowed burst size is 448 bytes. For T4/T5, the hardware
+ * doesn't coalesce fetch requests if more than 64 bytes of
+ * Free List pointers are provided, so we use a 128-byte Fetch
+ * Burst Minimum there (T6 implements coalescing so we can use
+ * the smaller 64-byte value there).
+ */
c.fl0dcaen_to_fl0cidxfthresh =
- htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
+ htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
+ FETCHBURSTMIN_128B_X :
+ FETCHBURSTMIN_64B_X) |
FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
FETCHBURSTMAX_512B_X :
FETCHBURSTMAX_256B_X));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index a5231fa771db..36cf3073ca37 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -65,6 +65,7 @@
#define TIMERREG_COUNTER0_X 0
#define FETCHBURSTMIN_64B_X 2
+#define FETCHBURSTMIN_128B_X 3
#define FETCHBURSTMAX_256B_X 2
#define FETCHBURSTMAX_512B_X 3
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 8337514ababb..91857b81009e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -862,52 +862,6 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
return ns;
}
-/*
- * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
- * at a specified offset within the list, into an array of addrss pointers and
- * return the number collected.
- */
-static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int offset,
- unsigned int maxaddrs)
-{
- unsigned int index = 0;
- unsigned int naddr = 0;
- const struct netdev_hw_addr *ha;
-
- for_each_dev_addr(dev, ha)
- if (index++ >= offset) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
- return naddr;
-}
-
-/*
- * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
- * at a specified offset within the list, into an array of addrss pointers and
- * return the number collected.
- */
-static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int offset,
- unsigned int maxaddrs)
-{
- unsigned int index = 0;
- unsigned int naddr = 0;
- const struct netdev_hw_addr *ha;
-
- netdev_for_each_mc_addr(ha, dev)
- if (index++ >= offset) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
- return naddr;
-}
-
static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
{
struct adapter *adapter = pi->adapter;
@@ -2237,16 +2191,6 @@ static int adap_init0(struct adapter *adapter)
u32 param, val = 0;
/*
- * Wait for the device to become ready before proceeding ...
- */
- err = t4vf_wait_dev_ready(adapter);
- if (err) {
- dev_err(adapter->pdev_dev, "device didn't become ready:"
- " err=%d\n", err);
- return err;
- }
-
- /*
* Some environments do not properly handle PCIE FLRs -- e.g. in Linux
* 2.6.31 and later we can't call pci_reset_function() in order to
* issue an FLR because of a self- deadlock on the device semaphore.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 6528231d8a59..1ccd282949a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1864,7 +1864,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
* for new buffer pointers, refill the Free List.
*/
if (rspq->offset >= 0 &&
- rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
+ fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
__refill_fl(rspq->adapter, &rxq->fl);
return budget - budget_left;
}
@@ -2300,9 +2300,20 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
FW_IQ_CMD_FL0PACKEN_F |
FW_IQ_CMD_FL0PADEN_F);
+
+ /* In T6, for egress queue type FL there is internal overhead
+ * of 16B for header going into FLM module. Hence the maximum
+ * allowed burst size is 448 bytes. For T4/T5, the hardware
+ * doesn't coalesce fetch requests if more than 64 bytes of
+ * Free List pointers are provided, so we use a 128-byte Fetch
+ * Burst Minimum there (T6 implements coalescing so we can use
+ * the smaller 64-byte value there).
+ */
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
- FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
+ FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
+ FETCHBURSTMIN_128B_X :
+ FETCHBURSTMIN_64B_X) |
FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
FETCHBURSTMAX_512B_X :
FETCHBURSTMAX_256B_X));
@@ -2607,7 +2618,6 @@ int t4vf_sge_init(struct adapter *adapter)
u32 fl0 = sge_params->sge_fl_buffer_size[0];
u32 fl1 = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
- unsigned int ingpadboundary, ingpackboundary, ingpad_shift;
/*
* Start by vetting the basic SGE parameters which have been set up by
@@ -2619,7 +2629,8 @@ int t4vf_sge_init(struct adapter *adapter)
fl0, fl1);
return -EINVAL;
}
- if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
+ if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
+ RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL;
}
@@ -2632,41 +2643,7 @@ int t4vf_sge_init(struct adapter *adapter)
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
-
- /* T4 uses a single control field to specify both the PCIe Padding and
- * Packing Boundary. T5 introduced the ability to specify these
- * separately. The actual Ingress Packet Data alignment boundary
- * within Packed Buffer Mode is the maximum of these two
- * specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
- * could set the chip up that way and, in fact, legacy T4 code would
- * end doing this because it would initialize the Padding Boundary and
- * leave the Packing Boundary initialized to 0 (16 bytes).)
- * Padding Boundary values in T6 starts from 8B,
- * where as it is 32B for T4 and T5.
- */
- if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
- ingpad_shift = INGPADBOUNDARY_SHIFT_X;
- else
- ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
-
- ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
- ingpad_shift);
- if (is_t4(adapter->params.chip)) {
- s->fl_align = ingpadboundary;
- } else {
- /* T5 has a different interpretation of one of the PCIe Packing
- * Boundary values.
- */
- ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
- if (ingpackboundary == INGPACKBOUNDARY_16B_X)
- ingpackboundary = 16;
- else
- ingpackboundary = 1 << (ingpackboundary +
- INGPACKBOUNDARY_SHIFT_X);
-
- s->fl_align = max(ingpadboundary, ingpackboundary);
- }
+ s->fl_align = t4vf_fl_pkt_align(adapter);
/* A FL with <= fl_starve_thres buffers is starving and a periodic
* timer will attempt to refill it. This needs to be larger than the
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 6ce302fe1a61..9b40a85cc1e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -309,6 +309,7 @@ int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
+int t4vf_fl_pkt_align(struct adapter *adapter);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
int t4vf_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 54220117dcba..fed83d88fc4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -418,6 +418,61 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
}
/**
+ * t4vf_fl_pkt_align - return the fl packet alignment
+ * @adapter: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two. And T6 changes the
+ * Ingress Padding Boundary Shift, so it's all a mess and it's best
+ * if we put this in low-level Common Code ...
+ *
+ */
+int t4vf_fl_pkt_align(struct adapter *adapter)
+{
+ u32 sge_control, sge_control2;
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ sge_control = adapter->params.sge.sge_control;
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
+ * Padding Boundary values in T6 starts from 8B,
+ * where as it is 32B for T4 and T5.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ ingpad_shift = INGPADBOUNDARY_SHIFT_X;
+ else
+ ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
+
+ ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adapter->params.chip)) {
+ /* T5 has a different interpretation of one of the PCIe Packing
+ * Boundary values.
+ */
+ sge_control2 = adapter->params.sge.sge_control2;
+ ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+/**
* t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index ee584c59ff62..fe3763df3f13 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -545,6 +545,7 @@ struct be_adapter {
struct delayed_work be_err_detection_work;
u8 recovery_retries;
u8 err_flags;
+ bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
u32 flags;
u32 cmd_privileges;
/* Ethtool knobs and info */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0b9f741f31af..d8540ae95e5a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -666,10 +666,13 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_MCAST_PROMISCUOUS)
-#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+#define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \
+ BE_IF_FLAGS_PASS_L3L4_ERRORS | \
+ BE_IF_FLAGS_UNTAGGED)
-#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+#define BE_IF_ALL_FILT_FLAGS (BE_IF_FILT_FLAGS_BASIC | \
+ BE_IF_FLAGS_MULTICAST | \
+ BE_IF_FLAGS_ALL_PROMISCUOUS)
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 17422b20a8ec..536686476369 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -125,6 +125,11 @@ static const char * const ue_status_hi_desc[] = {
"Unknown"
};
+#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
+ BE_IF_FLAGS_BROADCAST | \
+ BE_IF_FLAGS_MULTICAST | \
+ BE_IF_FLAGS_PASS_L3L4_ERRORS)
+
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -3558,7 +3563,7 @@ static int be_enable_if_filters(struct be_adapter *adapter)
{
int status;
- status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+ status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
if (status)
return status;
@@ -3875,8 +3880,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
int status;
/* If a FW profile exists, then cap_flags are updated */
- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ cap_flags = BE_VF_IF_EN_FLAGS;
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
@@ -3892,10 +3896,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
}
}
- en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST |
- BE_IF_FLAGS_PASS_L3L4_ERRORS);
+ /* PF should enable IF flags during proxy if_create call */
+ en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
status = be_cmd_if_create(adapter, cap_flags, en_flags,
&vf_cfg->if_handle, vf + 1);
if (status)
@@ -5040,6 +5042,8 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
pci_iounmap(adapter->pdev, adapter->csr);
if (adapter->db)
pci_iounmap(adapter->pdev, adapter->db);
+ if (adapter->pcicfg && adapter->pcicfg_mapped)
+ pci_iounmap(adapter->pdev, adapter->pcicfg);
}
static int db_bar(struct be_adapter *adapter)
@@ -5091,8 +5095,10 @@ static int be_map_pci_bars(struct be_adapter *adapter)
if (!addr)
goto pci_map_err;
adapter->pcicfg = addr;
+ adapter->pcicfg_mapped = true;
} else {
adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+ adapter->pcicfg_mapped = false;
}
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 62fa136554ac..41b010645100 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1265,7 +1265,6 @@ static int ethoc_remove(struct platform_device *pdev)
if (priv->mdio) {
mdiobus_unregister(priv->mdio);
- kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
if (priv->clk)
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index b1026689b78f..1f23845a0694 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -43,20 +43,21 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev,
bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32));
/* In case dst is not aligned we need an intermediate buffer */
- if (dst_is_aligned)
- for (i = 0; i < len; i++, reg++)
- *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+ if (dst_is_aligned) {
+ ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len);
+ reg += len;
+ }
else { /* !dst_is_aligned */
for (i = 0; i < len; i++, reg++) {
u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
- put_unaligned(buf, reg);
+ put_unaligned_be32(buf, reg);
}
}
-
/* copy last bytes (if any) */
if (last) {
- u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
- memcpy((u8*)reg, &buf, last);
+ u32 buf;
+ ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1);
+ memcpy((u8 *)reg, &buf, last);
}
}
@@ -66,26 +67,28 @@ static u32 nps_enet_rx_handler(struct net_device *ndev)
u32 work_done = 0;
struct nps_enet_priv *priv = netdev_priv(ndev);
struct sk_buff *skb;
- struct nps_enet_rx_ctl rx_ctrl;
+ u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+ u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
+ u32 rx_ctrl_er = (rx_ctrl_value & RX_CTL_ER_MASK) >> RX_CTL_ER_SHIFT;
+ u32 rx_ctrl_crc = (rx_ctrl_value & RX_CTL_CRC_MASK) >> RX_CTL_CRC_SHIFT;
- rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
- frame_len = rx_ctrl.nr;
+ frame_len = (rx_ctrl_value & RX_CTL_NR_MASK) >> RX_CTL_NR_SHIFT;
/* Check if we got RX */
- if (!rx_ctrl.cr)
+ if (!rx_ctrl_cr)
return work_done;
/* If we got here there is a work for us */
work_done++;
/* Check Rx error */
- if (rx_ctrl.er) {
+ if (rx_ctrl_er) {
ndev->stats.rx_errors++;
err = 1;
}
/* Check Rx CRC error */
- if (rx_ctrl.crc) {
+ if (rx_ctrl_crc) {
ndev->stats.rx_crc_errors++;
ndev->stats.rx_dropped++;
err = 1;
@@ -136,23 +139,24 @@ rx_irq_frame_done:
static void nps_enet_tx_handler(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_tx_ctl tx_ctrl;
-
- tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
+ u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT;
+ u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
/* Check if we got TX */
- if (!priv->tx_packet_sent || tx_ctrl.ct)
+ if (!priv->tx_packet_sent || tx_ctrl_ct)
return;
/* Ack Tx ctrl register */
nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
/* Check Tx transmit error */
- if (unlikely(tx_ctrl.et)) {
+ if (unlikely(tx_ctrl_et)) {
ndev->stats.tx_errors++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += tx_ctrl.nt;
+ ndev->stats.tx_bytes += tx_ctrl_nt;
}
dev_kfree_skb(priv->tx_skb);
@@ -178,13 +182,16 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
nps_enet_tx_handler(ndev);
work_done = nps_enet_rx_handler(ndev);
if (work_done < budget) {
- struct nps_enet_buf_int_enable buf_int_enable;
+ u32 buf_int_enable_value = 0;
napi_complete(napi);
- buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
- buf_int_enable.tx_done = NPS_ENET_ENABLE;
+
+ /* set tx_done and rx_rdy bits */
+ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
+ buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
+
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
- buf_int_enable.value);
+ buf_int_enable_value);
}
return work_done;
@@ -205,13 +212,12 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
{
struct net_device *ndev = dev_instance;
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_rx_ctl rx_ctrl;
- struct nps_enet_tx_ctl tx_ctrl;
-
- rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
- tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+ u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
+ u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
- if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
+ if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr)
if (likely(napi_schedule_prep(&priv->napi))) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
__napi_schedule(&priv->napi);
@@ -223,22 +229,24 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
static void nps_enet_set_hw_mac_address(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_ge_mac_cfg_1 ge_mac_cfg_1;
- struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2;
+ u32 ge_mac_cfg_1_value = 0;
+ u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value;
/* set MAC address in HW */
- ge_mac_cfg_1.octet_0 = ndev->dev_addr[0];
- ge_mac_cfg_1.octet_1 = ndev->dev_addr[1];
- ge_mac_cfg_1.octet_2 = ndev->dev_addr[2];
- ge_mac_cfg_1.octet_3 = ndev->dev_addr[3];
- ge_mac_cfg_2->octet_4 = ndev->dev_addr[4];
- ge_mac_cfg_2->octet_5 = ndev->dev_addr[5];
+ ge_mac_cfg_1_value |= ndev->dev_addr[0] << CFG_1_OCTET_0_SHIFT;
+ ge_mac_cfg_1_value |= ndev->dev_addr[1] << CFG_1_OCTET_1_SHIFT;
+ ge_mac_cfg_1_value |= ndev->dev_addr[2] << CFG_1_OCTET_2_SHIFT;
+ ge_mac_cfg_1_value |= ndev->dev_addr[3] << CFG_1_OCTET_3_SHIFT;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_4_MASK)
+ | ndev->dev_addr[4] << CFG_2_OCTET_4_SHIFT;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_5_MASK)
+ | ndev->dev_addr[5] << CFG_2_OCTET_5_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1,
- ge_mac_cfg_1.value);
+ ge_mac_cfg_1_value);
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
- ge_mac_cfg_2->value);
+ *ge_mac_cfg_2_value);
}
/**
@@ -254,93 +262,97 @@ static void nps_enet_set_hw_mac_address(struct net_device *ndev)
static void nps_enet_hw_reset(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_ge_rst ge_rst;
- struct nps_enet_phase_fifo_ctl phase_fifo_ctl;
+ u32 ge_rst_value = 0, phase_fifo_ctl_value = 0;
- ge_rst.value = 0;
- phase_fifo_ctl.value = 0;
/* Pcs reset sequence*/
- ge_rst.gmac_0 = NPS_ENET_ENABLE;
- nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value);
+ ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
usleep_range(10, 20);
- ge_rst.value = 0;
- nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value);
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
/* Tx fifo reset sequence */
- phase_fifo_ctl.rst = NPS_ENET_ENABLE;
- phase_fifo_ctl.init = NPS_ENET_ENABLE;
+ phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_RST_SHIFT;
+ phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_INIT_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
- phase_fifo_ctl.value);
+ phase_fifo_ctl_value);
usleep_range(10, 20);
- phase_fifo_ctl.value = 0;
+ phase_fifo_ctl_value = 0;
nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
- phase_fifo_ctl.value);
+ phase_fifo_ctl_value);
}
static void nps_enet_hw_enable_control(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_ge_mac_cfg_0 ge_mac_cfg_0;
- struct nps_enet_buf_int_enable buf_int_enable;
- struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2;
- struct nps_enet_ge_mac_cfg_3 *ge_mac_cfg_3 = &priv->ge_mac_cfg_3;
+ u32 ge_mac_cfg_0_value = 0, buf_int_enable_value = 0;
+ u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value;
+ u32 *ge_mac_cfg_3_value = &priv->ge_mac_cfg_3_value;
s32 max_frame_length;
- ge_mac_cfg_0.value = 0;
- buf_int_enable.value = 0;
/* Enable Rx and Tx statistics */
- ge_mac_cfg_2->stat_en = NPS_ENET_GE_MAC_CFG_2_STAT_EN;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_STAT_EN_MASK)
+ | NPS_ENET_GE_MAC_CFG_2_STAT_EN << CFG_2_STAT_EN_SHIFT;
/* Discard packets with different MAC address */
- ge_mac_cfg_2->disc_da = NPS_ENET_ENABLE;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
+ | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
/* Discard multicast packets */
- ge_mac_cfg_2->disc_mc = NPS_ENET_ENABLE;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
+ | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
- ge_mac_cfg_2->value);
+ *ge_mac_cfg_2_value);
/* Discard Packets bigger than max frame length */
max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
- if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH)
- ge_mac_cfg_3->max_len = max_frame_length;
+ if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+ *ge_mac_cfg_3_value =
+ (*ge_mac_cfg_3_value & ~CFG_3_MAX_LEN_MASK)
+ | max_frame_length << CFG_3_MAX_LEN_SHIFT;
+ }
/* Enable interrupts */
- buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
- buf_int_enable.tx_done = NPS_ENET_ENABLE;
+ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
+ buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
- buf_int_enable.value);
+ buf_int_enable_value);
/* Write device MAC address to HW */
nps_enet_set_hw_mac_address(ndev);
/* Rx and Tx HW features */
- ge_mac_cfg_0.tx_pad_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.tx_crc_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.rx_crc_strip = NPS_ENET_ENABLE;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_PAD_EN_SHIFT;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_CRC_EN_SHIFT;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_CRC_STRIP_SHIFT;
/* IFG configuration */
- ge_mac_cfg_0.rx_ifg = NPS_ENET_GE_MAC_CFG_0_RX_IFG;
- ge_mac_cfg_0.tx_ifg = NPS_ENET_GE_MAC_CFG_0_TX_IFG;
+ ge_mac_cfg_0_value |=
+ NPS_ENET_GE_MAC_CFG_0_RX_IFG << CFG_0_RX_IFG_SHIFT;
+ ge_mac_cfg_0_value |=
+ NPS_ENET_GE_MAC_CFG_0_TX_IFG << CFG_0_TX_IFG_SHIFT;
/* preamble configuration */
- ge_mac_cfg_0.rx_pr_check_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.tx_pr_len = NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_PR_CHECK_EN_SHIFT;
+ ge_mac_cfg_0_value |=
+ NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN << CFG_0_TX_PR_LEN_SHIFT;
/* enable flow control frames */
- ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
- ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_FC_EN_SHIFT;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_FC_EN_SHIFT;
+ ge_mac_cfg_0_value |=
+ NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR << CFG_0_TX_FC_RETR_SHIFT;
+ *ge_mac_cfg_3_value = (*ge_mac_cfg_3_value & ~CFG_3_CF_DROP_MASK)
+ | NPS_ENET_ENABLE << CFG_3_CF_DROP_SHIFT;
/* Enable Rx and Tx */
- ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_EN_SHIFT;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_EN_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
- ge_mac_cfg_3->value);
+ *ge_mac_cfg_3_value);
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
- ge_mac_cfg_0.value);
+ ge_mac_cfg_0_value);
}
static void nps_enet_hw_disable_control(struct net_device *ndev)
@@ -358,31 +370,28 @@ static void nps_enet_send_frame(struct net_device *ndev,
struct sk_buff *skb)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_tx_ctl tx_ctrl;
+ u32 tx_ctrl_value = 0;
short length = skb->len;
u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
u32 *src = (void *)skb->data;
bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
- tx_ctrl.value = 0;
/* In case src is not aligned we need an intermediate buffer */
if (src_is_aligned)
- for (i = 0; i < len; i++, src++)
- nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src);
+ iowrite32_rep(priv->regs_base + NPS_ENET_REG_TX_BUF, src, len);
else /* !src_is_aligned */
for (i = 0; i < len; i++, src++)
nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF,
- get_unaligned(src));
+ get_unaligned_be32(src));
/* Write the length of the Frame */
- tx_ctrl.nt = length;
+ tx_ctrl_value |= length << TX_CTL_NT_SHIFT;
/* Indicate SW is done */
priv->tx_packet_sent = true;
- tx_ctrl.ct = NPS_ENET_ENABLE;
-
+ tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT;
/* Send Frame */
- nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl.value);
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value);
}
/**
@@ -422,19 +431,23 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
static void nps_enet_set_rx_mode(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2;
-
- ge_mac_cfg_2.value = priv->ge_mac_cfg_2.value;
+ u32 ge_mac_cfg_2_value = priv->ge_mac_cfg_2_value;
if (ndev->flags & IFF_PROMISC) {
- ge_mac_cfg_2.disc_da = NPS_ENET_DISABLE;
- ge_mac_cfg_2.disc_mc = NPS_ENET_DISABLE;
+ ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
+ | NPS_ENET_DISABLE << CFG_2_DISK_DA_SHIFT;
+ ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
+ | NPS_ENET_DISABLE << CFG_2_DISK_MC_SHIFT;
+
} else {
- ge_mac_cfg_2.disc_da = NPS_ENET_ENABLE;
- ge_mac_cfg_2.disc_mc = NPS_ENET_ENABLE;
+ ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
+ | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
+ ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
+ | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
+
}
- nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2.value);
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value);
}
/**
@@ -453,12 +466,15 @@ static s32 nps_enet_open(struct net_device *ndev)
/* Reset private variables */
priv->tx_packet_sent = false;
- priv->ge_mac_cfg_2.value = 0;
- priv->ge_mac_cfg_3.value = 0;
+ priv->ge_mac_cfg_2_value = 0;
+ priv->ge_mac_cfg_3_value = 0;
/* ge_mac_cfg_3 default values */
- priv->ge_mac_cfg_3.rx_ifg_th = NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH;
- priv->ge_mac_cfg_3.max_len = NPS_ENET_GE_MAC_CFG_3_MAX_LEN;
+ priv->ge_mac_cfg_3_value |=
+ NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH << CFG_3_RX_IFG_TH_SHIFT;
+
+ priv->ge_mac_cfg_3_value |=
+ NPS_ENET_GE_MAC_CFG_3_MAX_LEN << CFG_3_MAX_LEN_SHIFT;
/* Disable HW device */
nps_enet_hw_disable_control(ndev);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index 6703674d679c..d0cab600bce8 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -43,233 +43,123 @@
#define NPS_ENET_REG_GE_RST 0x1400
#define NPS_ENET_REG_PHASE_FIFO_CTL 0x1404
-/* Tx control register */
-struct nps_enet_tx_ctl {
- union {
- /* ct: SW sets to indicate frame ready in Tx buffer for
- * transmission. HW resets to when transmission done
- * et: Transmit error
- * nt: Length in bytes of Tx frame loaded to Tx buffer
- */
- struct {
- u32
- __reserved_1:16,
- ct:1,
- et:1,
- __reserved_2:3,
- nt:11;
- };
-
- u32 value;
- };
-};
-
-/* Rx control register */
-struct nps_enet_rx_ctl {
- union {
- /* cr: HW sets to indicate frame ready in Rx buffer.
- * SW resets to indicate host read received frame
- * and new frames can be written to Rx buffer
- * er: Rx error indication
- * crc: Rx CRC error indication
- * nr: Length in bytes of Rx frame loaded by MAC to Rx buffer
- */
- struct {
- u32
- __reserved_1:16,
- cr:1,
- er:1,
- crc:1,
- __reserved_2:2,
- nr:11;
- };
-
- u32 value;
- };
-};
-
-/* Interrupt enable for data buffer events register */
-struct nps_enet_buf_int_enable {
- union {
- /* tx_done: Interrupt generation in the case when new frame
- * is ready in Rx buffer
- * rx_rdy: Interrupt generation in the case when current frame
- * was read from TX buffer
- */
- struct {
- u32
- __reserved:30,
- tx_done:1,
- rx_rdy:1;
- };
-
- u32 value;
- };
-};
-
-/* Gbps Eth MAC Configuration 0 register */
-struct nps_enet_ge_mac_cfg_0 {
- union {
- /* tx_pr_len: Transmit preamble length in bytes
- * tx_ifg_nib: Tx idle pattern
- * nib_mode: Nibble (4-bit) Mode
- * rx_pr_check_en: Receive preamble Check Enable
- * tx_ifg: Transmit inter-Frame Gap
- * rx_ifg: Receive inter-Frame Gap
- * tx_fc_retr: Transmit Flow Control Retransmit Mode
- * rx_length_check_en: Receive Length Check Enable
- * rx_crc_ignore: Results of the CRC check are ignored
- * rx_crc_strip: MAC strips the CRC from received frames
- * rx_fc_en: Receive Flow Control Enable
- * tx_crc_en: Transmit CRC Enabled
- * tx_pad_en: Transmit Padding Enable
- * tx_cf_en: Transmit Flow Control Enable
- * tx_en: Transmit Enable
- * rx_en: Receive Enable
- */
- struct {
- u32
- tx_pr_len:4,
- tx_ifg_nib:4,
- nib_mode:1,
- rx_pr_check_en:1,
- tx_ifg:6,
- rx_ifg:4,
- tx_fc_retr:3,
- rx_length_check_en:1,
- rx_crc_ignore:1,
- rx_crc_strip:1,
- rx_fc_en:1,
- tx_crc_en:1,
- tx_pad_en:1,
- tx_fc_en:1,
- tx_en:1,
- rx_en:1;
- };
-
- u32 value;
- };
-};
-
-/* Gbps Eth MAC Configuration 1 register */
-struct nps_enet_ge_mac_cfg_1 {
- union {
- /* octet_3: MAC address octet 3
- * octet_2: MAC address octet 2
- * octet_1: MAC address octet 1
- * octet_0: MAC address octet 0
- */
- struct {
- u32
- octet_3:8,
- octet_2:8,
- octet_1:8,
- octet_0:8;
- };
-
- u32 value;
- };
-};
-
-/* Gbps Eth MAC Configuration 2 register */
-struct nps_enet_ge_mac_cfg_2 {
- union {
- /* transmit_flush_en: MAC flush enable
- * stat_en: RMON statistics interface enable
- * disc_da: Discard frames with DA different
- * from MAC address
- * disc_bc: Discard broadcast frames
- * disc_mc: Discard multicast frames
- * octet_5: MAC address octet 5
- * octet_4: MAC address octet 4
- */
- struct {
- u32
- transmit_flush_en:1,
- __reserved_1:5,
- stat_en:2,
- __reserved_2:1,
- disc_da:1,
- disc_bc:1,
- disc_mc:1,
- __reserved_3:4,
- octet_5:8,
- octet_4:8;
- };
-
- u32 value;
- };
-};
-
-/* Gbps Eth MAC Configuration 3 register */
-struct nps_enet_ge_mac_cfg_3 {
- union {
- /* ext_oob_cbfc_sel: Selects one of the 4 profiles for
- * extended OOB in-flow-control indication
- * max_len: Maximum receive frame length in bytes
- * tx_cbfc_en: Enable transmission of class-based
- * flow control packets
- * rx_ifg_th: Threshold for IFG status reporting via OOB
- * cf_timeout: Configurable time to decrement FC counters
- * cf_drop: Drop control frames
- * redirect_cbfc_sel: Selects one of CBFC redirect profiles
- * rx_cbfc_redir_en: Enable Rx class-based flow
- * control redirect
- * rx_cbfc_en: Enable Rx class-based flow control
- * tm_hd_mode: TM header mode
- */
- struct {
- u32
- ext_oob_cbfc_sel:2,
- max_len:14,
- tx_cbfc_en:1,
- rx_ifg_th:5,
- cf_timeout:4,
- cf_drop:1,
- redirect_cbfc_sel:2,
- rx_cbfc_redir_en:1,
- rx_cbfc_en:1,
- tm_hd_mode:1;
- };
-
- u32 value;
- };
-};
-
-/* GE MAC, PCS reset control register */
-struct nps_enet_ge_rst {
- union {
- /* gmac_0: GE MAC reset
- * spcs_0: SGMII PCS reset
- */
- struct {
- u32
- __reserved_1:23,
- gmac_0:1,
- __reserved_2:7,
- spcs_0:1;
- };
-
- u32 value;
- };
-};
-
-/* Tx phase sync FIFO control register */
-struct nps_enet_phase_fifo_ctl {
- union {
- /* init: initialize serdes TX phase sync FIFO pointers
- * rst: reset serdes TX phase sync FIFO
- */
- struct {
- u32
- __reserved:30,
- init:1,
- rst:1;
- };
-
- u32 value;
- };
-};
+/* Tx control register masks and shifts */
+#define TX_CTL_NT_MASK 0x7FF
+#define TX_CTL_NT_SHIFT 0
+#define TX_CTL_ET_MASK 0x4000
+#define TX_CTL_ET_SHIFT 14
+#define TX_CTL_CT_MASK 0x8000
+#define TX_CTL_CT_SHIFT 15
+
+/* Rx control register masks and shifts */
+#define RX_CTL_NR_MASK 0x7FF
+#define RX_CTL_NR_SHIFT 0
+#define RX_CTL_CRC_MASK 0x2000
+#define RX_CTL_CRC_SHIFT 13
+#define RX_CTL_ER_MASK 0x4000
+#define RX_CTL_ER_SHIFT 14
+#define RX_CTL_CR_MASK 0x8000
+#define RX_CTL_CR_SHIFT 15
+
+/* Interrupt enable for data buffer events register masks and shifts */
+#define RX_RDY_MASK 0x1
+#define RX_RDY_SHIFT 0
+#define TX_DONE_MASK 0x2
+#define TX_DONE_SHIFT 1
+
+/* Gbps Eth MAC Configuration 0 register masks and shifts */
+#define CFG_0_RX_EN_MASK 0x1
+#define CFG_0_RX_EN_SHIFT 0
+#define CFG_0_TX_EN_MASK 0x2
+#define CFG_0_TX_EN_SHIFT 1
+#define CFG_0_TX_FC_EN_MASK 0x4
+#define CFG_0_TX_FC_EN_SHIFT 2
+#define CFG_0_TX_PAD_EN_MASK 0x8
+#define CFG_0_TX_PAD_EN_SHIFT 3
+#define CFG_0_TX_CRC_EN_MASK 0x10
+#define CFG_0_TX_CRC_EN_SHIFT 4
+#define CFG_0_RX_FC_EN_MASK 0x20
+#define CFG_0_RX_FC_EN_SHIFT 5
+#define CFG_0_RX_CRC_STRIP_MASK 0x40
+#define CFG_0_RX_CRC_STRIP_SHIFT 6
+#define CFG_0_RX_CRC_IGNORE_MASK 0x80
+#define CFG_0_RX_CRC_IGNORE_SHIFT 7
+#define CFG_0_RX_LENGTH_CHECK_EN_MASK 0x100
+#define CFG_0_RX_LENGTH_CHECK_EN_SHIFT 8
+#define CFG_0_TX_FC_RETR_MASK 0xE00
+#define CFG_0_TX_FC_RETR_SHIFT 9
+#define CFG_0_RX_IFG_MASK 0xF000
+#define CFG_0_RX_IFG_SHIFT 12
+#define CFG_0_TX_IFG_MASK 0x3F0000
+#define CFG_0_TX_IFG_SHIFT 16
+#define CFG_0_RX_PR_CHECK_EN_MASK 0x400000
+#define CFG_0_RX_PR_CHECK_EN_SHIFT 22
+#define CFG_0_NIB_MODE_MASK 0x800000
+#define CFG_0_NIB_MODE_SHIFT 23
+#define CFG_0_TX_IFG_NIB_MASK 0xF000000
+#define CFG_0_TX_IFG_NIB_SHIFT 24
+#define CFG_0_TX_PR_LEN_MASK 0xF0000000
+#define CFG_0_TX_PR_LEN_SHIFT 28
+
+/* Gbps Eth MAC Configuration 1 register masks and shifts */
+#define CFG_1_OCTET_0_MASK 0x000000FF
+#define CFG_1_OCTET_0_SHIFT 0
+#define CFG_1_OCTET_1_MASK 0x0000FF00
+#define CFG_1_OCTET_1_SHIFT 8
+#define CFG_1_OCTET_2_MASK 0x00FF0000
+#define CFG_1_OCTET_2_SHIFT 16
+#define CFG_1_OCTET_3_MASK 0xFF000000
+#define CFG_1_OCTET_3_SHIFT 24
+
+/* Gbps Eth MAC Configuration 2 register masks and shifts */
+#define CFG_2_OCTET_4_MASK 0x000000FF
+#define CFG_2_OCTET_4_SHIFT 0
+#define CFG_2_OCTET_5_MASK 0x0000FF00
+#define CFG_2_OCTET_5_SHIFT 8
+#define CFG_2_DISK_MC_MASK 0x00100000
+#define CFG_2_DISK_MC_SHIFT 20
+#define CFG_2_DISK_BC_MASK 0x00200000
+#define CFG_2_DISK_BC_SHIFT 21
+#define CFG_2_DISK_DA_MASK 0x00400000
+#define CFG_2_DISK_DA_SHIFT 22
+#define CFG_2_STAT_EN_MASK 0x3000000
+#define CFG_2_STAT_EN_SHIFT 24
+#define CFG_2_TRANSMIT_FLUSH_EN_MASK 0x80000000
+#define CFG_2_TRANSMIT_FLUSH_EN_SHIFT 31
+
+/* Gbps Eth MAC Configuration 3 register masks and shifts */
+#define CFG_3_TM_HD_MODE_MASK 0x1
+#define CFG_3_TM_HD_MODE_SHIFT 0
+#define CFG_3_RX_CBFC_EN_MASK 0x2
+#define CFG_3_RX_CBFC_EN_SHIFT 1
+#define CFG_3_RX_CBFC_REDIR_EN_MASK 0x4
+#define CFG_3_RX_CBFC_REDIR_EN_SHIFT 2
+#define CFG_3_REDIRECT_CBFC_SEL_MASK 0x18
+#define CFG_3_REDIRECT_CBFC_SEL_SHIFT 3
+#define CFG_3_CF_DROP_MASK 0x20
+#define CFG_3_CF_DROP_SHIFT 5
+#define CFG_3_CF_TIMEOUT_MASK 0x3C0
+#define CFG_3_CF_TIMEOUT_SHIFT 6
+#define CFG_3_RX_IFG_TH_MASK 0x7C00
+#define CFG_3_RX_IFG_TH_SHIFT 10
+#define CFG_3_TX_CBFC_EN_MASK 0x8000
+#define CFG_3_TX_CBFC_EN_SHIFT 15
+#define CFG_3_MAX_LEN_MASK 0x3FFF0000
+#define CFG_3_MAX_LEN_SHIFT 16
+#define CFG_3_EXT_OOB_CBFC_SEL_MASK 0xC0000000
+#define CFG_3_EXT_OOB_CBFC_SEL_SHIFT 30
+
+/* GE MAC, PCS reset control register masks and shifts */
+#define RST_SPCS_MASK 0x1
+#define RST_SPCS_SHIFT 0
+#define RST_GMAC_0_MASK 0x100
+#define RST_GMAC_0_SHIFT 8
+
+/* Tx phase sync FIFO control register masks and shifts */
+#define PHASE_FIFO_CTL_RST_MASK 0x1
+#define PHASE_FIFO_CTL_RST_SHIFT 0
+#define PHASE_FIFO_CTL_INIT_MASK 0x2
+#define PHASE_FIFO_CTL_INIT_SHIFT 1
/**
* struct nps_enet_priv - Storage of ENET's private information.
@@ -285,8 +175,8 @@ struct nps_enet_priv {
bool tx_packet_sent;
struct sk_buff *tx_skb;
struct napi_struct napi;
- struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2;
- struct nps_enet_ge_mac_cfg_3 ge_mac_cfg_3;
+ u32 ge_mac_cfg_2_value;
+ u32 ge_mac_cfg_3_value;
};
/**
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bad0ba29a94a..37c081583084 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3191,7 +3191,7 @@ static int fec_enet_init(struct net_device *ndev)
static void fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
- bool active_low = false;
+ bool active_high = false;
int msec = 1;
struct device_node *np = pdev->dev.of_node;
@@ -3207,17 +3207,17 @@ static void fec_reset_phy(struct platform_device *pdev)
if (!gpio_is_valid(phy_reset))
return;
- active_low = of_property_read_bool(np, "phy-reset-active-low");
+ active_high = of_property_read_bool(np, "phy-reset-active-high");
err = devm_gpio_request_one(&pdev->dev, phy_reset,
- active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
"phy-reset");
if (err) {
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
return;
}
msleep(msec);
- gpio_set_value_cansleep(phy_reset, !active_low);
+ gpio_set_value_cansleep(phy_reset, !active_high);
}
#else /* CONFIG_OF */
static void fec_reset_phy(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 623aa1c8ebc6..79a210aaf0bb 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2791,6 +2791,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_free;
}
+ fman->dev = &of_dev->dev;
+
return fman;
fman_node_put:
@@ -2845,8 +2847,6 @@ static int fman_probe(struct platform_device *of_dev)
dev_set_drvdata(dev, fman);
- fman->dev = dev;
-
dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a01e5a32b631..08c041548300 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1111,8 +1111,10 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
priv->errata |= GFAR_ERRATA_12;
+ /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
- ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+ ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
}
#endif
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 74beb1867230..4ccc032633c4 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -25,6 +25,7 @@ config HIX5HD2_GMAC
config HIP04_ETH
tristate "HISILICON P04 Ethernet support"
+ depends on HAS_IOMEM # For MFD_SYSCON
select MARVELL_PHY
select MFD_SYSCON
select HNS_MDIO
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index a0070d0e740d..d4f92ed322d6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -675,8 +675,12 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
{
int ret;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
switch (loop) {
+ case MAC_INTERNALLOOP_PHY:
+ ret = 0;
+ break;
case MAC_INTERNALLOOP_SERDES:
ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
break;
@@ -686,6 +690,10 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
default:
ret = -EINVAL;
}
+
+ if (!ret)
+ hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
+
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 9439f04962e1..38fc5be3870c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -230,6 +230,30 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
}
}
+static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+ u16 max_q_per_vf, max_vfn;
+ u32 q_id, q_num_per_port;
+ u32 mac_id;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ return;
+
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
+ HNS_DSAF_COMM_SERVICE_NW_IDX,
+ &max_vfn, &max_q_per_vf);
+ q_num_per_port = max_vfn * max_q_per_vf;
+
+ for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
+ DSAFV2_SERDES_LBK_QID_M,
+ DSAFV2_SERDES_LBK_QID_S,
+ q_id);
+ q_id += q_num_per_port;
+ }
+}
+
/**
* hns_dsaf_sw_port_type_cfg - cfg sw type
* @dsaf_id: dsa fabric id
@@ -691,6 +715,16 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
}
+void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
+ return;
+
+ dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
+ DSAFV2_SERDES_LBK_EN_B, !!en);
+}
+
/**
* hns_dsaf_tbl_stat_en - tbl
* @dsaf_id: dsa fabric id
@@ -1022,6 +1056,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
/* set promisc def queue id */
hns_dsaf_mix_def_qid_cfg(dsaf_dev);
+ /* set inner loopback queue id */
+ hns_dsaf_inner_qid_cfg(dsaf_dev);
+
/* in non switch mode, set all port to access mode */
hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 40205b910f80..5fea226efaf3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -417,5 +417,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
int hns_dsaf_get_regs_count(void);
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index f0c4f9b09d5b..60d695daa471 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -134,6 +134,7 @@
#define DSAF_XGE_INT_STS_0_REG 0x1C0
#define DSAF_PPE_INT_STS_0_REG 0x1E0
#define DSAF_ROCEE_INT_STS_0_REG 0x200
+#define DSAFV2_SERDES_LBK_0_REG 0x220
#define DSAF_PPE_QID_CFG_0_REG 0x300
#define DSAF_SW_PORT_TYPE_0_REG 0x320
#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -857,6 +858,10 @@
#define PPEV2_CFG_RSS_TBL_4N3_S 24
#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S)
+#define DSAFV2_SERDES_LBK_EN_B 8
+#define DSAFV2_SERDES_LBK_QID_S 0
+#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S)
+
#define PPE_CNT_CLR_CE_B 0
#define PPE_CNT_CLR_SNAP_EN_B 1
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3df22840fcd1..3c4a3bc31a89 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -295,8 +295,10 @@ static int __lb_setup(struct net_device *ndev,
switch (loop) {
case MAC_INTERNALLOOP_PHY:
- if ((phy_dev) && (!phy_dev->is_c45))
+ if ((phy_dev) && (!phy_dev->is_c45)) {
ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+ ret |= h->dev->ops->set_loopback(h, loop, 0x1);
+ }
break;
case MAC_INTERNALLOOP_MAC:
if ((h->dev->ops->set_loopback) &&
@@ -376,6 +378,7 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb)
{
struct net_device *ndev;
+ struct hns_nic_priv *priv;
struct hnae_ring *ring;
struct netdev_queue *dev_queue;
struct sk_buff *new_skb;
@@ -385,8 +388,17 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
char buff[33]; /* 32B data and the last character '\0' */
if (!ring_data) { /* Just for doing create frame*/
+ ndev = skb->dev;
+ priv = netdev_priv(ndev);
+
frame_size = skb->len;
memset(skb->data, 0xFF, frame_size);
+ if ((!AE_IS_VER1(priv->enet_ver)) &&
+ (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) {
+ memcpy(skb->data, ndev->dev_addr, 6);
+ skb->data[5] += 0x1f;
+ }
+
frame_size &= ~1ul;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
memset(&skb->data[frame_size / 2 + 10], 0xBE,
@@ -486,6 +498,7 @@ static int __lb_run_test(struct net_device *ndev,
/* place data into test skb */
(void)skb_put(skb, size);
+ skb->dev = ndev;
__lb_other_process(NULL, skb);
skb->queue_mapping = NIC_LB_TEST_RING_ID;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 335417b4756b..ebe60719e489 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1166,7 +1166,10 @@ map_failed:
if (!firmware_has_feature(FW_FEATURE_CMO))
netdev_err(netdev, "tx: unable to map xmit buffer\n");
adapter->tx_map_failed++;
- skb_linearize(skb);
+ if (skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
force_bounce = 1;
goto retry_bounce;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7d6570843723..6e9e16eee5d0 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1348,44 +1348,44 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
- crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues);
+ crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
- crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues);
+ crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
- crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues);
+ crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
- cpu_to_be32(adapter->req_tx_entries_per_subcrq);
+ cpu_to_be64(adapter->req_tx_entries_per_subcrq);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
- cpu_to_be32(adapter->req_rx_add_entries_per_subcrq);
+ cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
- crq.request_capability.number = cpu_to_be32(adapter->req_mtu);
+ crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
if (adapter->promisc_supported) {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
- crq.request_capability.number = cpu_to_be32(1);
+ crq.request_capability.number = cpu_to_be64(1);
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
- crq.request_capability.number = cpu_to_be32(0);
+ crq.request_capability.number = cpu_to_be64(0);
ibmvnic_send_crq(adapter, &crq);
}
@@ -2312,93 +2312,93 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
switch (be16_to_cpu(crq->query_capability.capability)) {
case MIN_TX_QUEUES:
adapter->min_tx_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_tx_queues = %lld\n",
adapter->min_tx_queues);
break;
case MIN_RX_QUEUES:
adapter->min_rx_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_rx_queues = %lld\n",
adapter->min_rx_queues);
break;
case MIN_RX_ADD_QUEUES:
adapter->min_rx_add_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
adapter->min_rx_add_queues);
break;
case MAX_TX_QUEUES:
adapter->max_tx_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_tx_queues = %lld\n",
adapter->max_tx_queues);
break;
case MAX_RX_QUEUES:
adapter->max_rx_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_rx_queues = %lld\n",
adapter->max_rx_queues);
break;
case MAX_RX_ADD_QUEUES:
adapter->max_rx_add_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
adapter->max_rx_add_queues);
break;
case MIN_TX_ENTRIES_PER_SUBCRQ:
adapter->min_tx_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
adapter->min_tx_entries_per_subcrq);
break;
case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
adapter->min_rx_add_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
adapter->min_rx_add_entries_per_subcrq);
break;
case MAX_TX_ENTRIES_PER_SUBCRQ:
adapter->max_tx_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
adapter->max_tx_entries_per_subcrq);
break;
case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
adapter->max_rx_add_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
adapter->max_rx_add_entries_per_subcrq);
break;
case TCP_IP_OFFLOAD:
adapter->tcp_ip_offload =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
adapter->tcp_ip_offload);
break;
case PROMISC_SUPPORTED:
adapter->promisc_supported =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "promisc_supported = %lld\n",
adapter->promisc_supported);
break;
case MIN_MTU:
- adapter->min_mtu = be32_to_cpu(crq->query_capability.number);
+ adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
break;
case MAX_MTU:
- adapter->max_mtu = be32_to_cpu(crq->query_capability.number);
+ adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
break;
case MAX_MULTICAST_FILTERS:
adapter->max_multicast_filters =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_multicast_filters = %lld\n",
adapter->max_multicast_filters);
break;
case VLAN_HEADER_INSERTION:
adapter->vlan_header_insertion =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
if (adapter->vlan_header_insertion)
netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
@@ -2406,43 +2406,43 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
break;
case MAX_TX_SG_ENTRIES:
adapter->max_tx_sg_entries =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
adapter->max_tx_sg_entries);
break;
case RX_SG_SUPPORTED:
adapter->rx_sg_supported =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "rx_sg_supported = %lld\n",
adapter->rx_sg_supported);
break;
case OPT_TX_COMP_SUB_QUEUES:
adapter->opt_tx_comp_sub_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
adapter->opt_tx_comp_sub_queues);
break;
case OPT_RX_COMP_QUEUES:
adapter->opt_rx_comp_queues =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
adapter->opt_rx_comp_queues);
break;
case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
adapter->opt_rx_bufadd_q_per_rx_comp_q =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
adapter->opt_rx_bufadd_q_per_rx_comp_q);
break;
case OPT_TX_ENTRIES_PER_SUBCRQ:
adapter->opt_tx_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
adapter->opt_tx_entries_per_subcrq);
break;
case OPT_RXBA_ENTRIES_PER_SUBCRQ:
adapter->opt_rxba_entries_per_subcrq =
- be32_to_cpu(crq->query_capability.number);
+ be64_to_cpu(crq->query_capability.number);
netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
adapter->opt_rxba_entries_per_subcrq);
break;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1242925ad34c..1a9993cc79b5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -319,10 +319,8 @@ struct ibmvnic_capability {
u8 first;
u8 cmd;
__be16 capability; /* one of ibmvnic_capabilities */
+ __be64 number;
struct ibmvnic_rc rc;
- __be32 number; /*FIX: should be __be64, but I'm getting the least
- * significant word first
- */
} __packed __aligned(8);
struct ibmvnic_login {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index dc1a82148ff0..d09a8dd71fc2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1207,7 +1207,7 @@ err_queueing_scheme:
static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
{
- if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
return fm10k_setup_tc(dev, tc->tc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cf4b729c92d7..4d6223da4a19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8400,9 +8400,6 @@ int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
tc->type == TC_SETUP_CLSU32) {
- if (!(dev->features & NETIF_F_HW_TC))
- return -EINVAL;
-
switch (tc->cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
@@ -8422,7 +8419,7 @@ int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
}
}
- if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
return ixgbe_setup_tc(dev, tc->tc);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b1de7afd4116..3ddf657bc10b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
}
static inline void
-jme_clear_pm(struct jme_adapter *jme)
+jme_clear_pm_enable_wol(struct jme_adapter *jme)
{
jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
}
+static inline void
+jme_clear_pm_disable_wol(struct jme_adapter *jme)
+{
+ jwrite32(jme, JME_PMCS, PMCS_STMASK);
+}
+
static int
jme_reload_eeprom(struct jme_adapter *jme)
{
@@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
struct jme_adapter *jme = netdev_priv(netdev);
int rc;
- jme_clear_pm(jme);
+ jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
@@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
static void
jme_powersave_phy(struct jme_adapter *jme)
{
- if (jme->reg_pmcs) {
+ if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
jme_set_100m_half(jme);
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
- jme_clear_pm(jme);
+ jme_clear_pm_enable_wol(jme);
} else {
jme_phy_off(jme);
}
@@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
if (wol->wolopts & WAKE_MAGIC)
jme->reg_pmcs |= PMCS_MFEN;
- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
- device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
-
return 0;
}
@@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
jme->mii_if.mdio_read = jme_mdio_read;
jme->mii_if.mdio_write = jme_mdio_write;
- jme_clear_pm(jme);
- device_set_wakeup_enable(&pdev->dev, true);
+ jme_clear_pm_disable_wol(jme);
+ device_init_wakeup(&pdev->dev, true);
jme_set_phyfifo_5level(jme);
jme->pcirev = pdev->revision;
@@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
if (!netif_running(netdev))
return 0;
- jme_clear_pm(jme);
+ jme_clear_pm_disable_wol(jme);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
@@ -3312,13 +3315,14 @@ jme_resume(struct device *dev)
jme_reset_phy_processor(jme);
jme_phy_calibration(jme);
jme_phy_setEA(jme);
- jme_start_irq(jme);
netif_device_attach(netdev);
atomic_inc(&jme->link_changing);
jme_reset_link(jme);
+ jme_start_irq(jme);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 1486ce902a56..9ca3734ebb6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -4,6 +4,7 @@
config MLX4_EN
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
+ depends on MAY_USE_DEVLINK
depends on PCI
select MLX4_CORE
select PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 96d95cb36c52..b4b258c8ca47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -40,6 +40,7 @@
#include <net/ip.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
+#include <net/devlink.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -72,7 +73,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
{
- if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
return mlx4_en_setup_tc(dev, tc->tc);
@@ -2033,8 +2034,11 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
/* Unregister device - this will close the port if it was up */
- if (priv->registered)
+ if (priv->registered) {
+ devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
+ priv->port));
unregister_netdev(dev);
+ }
if (priv->allocated)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
@@ -2254,7 +2258,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
struct mlx4_en_dev *mdev = en_priv->mdev;
u64 mac_u64 = mlx4_mac_to_u64(mac);
- if (!is_valid_ether_addr(mac))
+ if (is_multicast_ether_addr(mac))
return -EINVAL;
return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
@@ -3051,6 +3055,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
priv->registered = 1;
+ devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
+ dev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0472941af820..dec77d6f0ac9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/errno.h>
+#include <net/devlink.h>
#include "mlx4.h"
@@ -249,3 +250,11 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int
return result;
}
EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
+
+struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+
+ return &info->devlink_port;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_devlink_port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2cc3c626c3fe..503ec23e84cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
#include <linux/io-mapping.h>
#include <linux/delay.h>
#include <linux/kmod.h>
+#include <net/devlink.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -1081,36 +1082,20 @@ static ssize_t show_port_type(struct device *dev,
return strlen(buf);
}
-static ssize_t set_port_type(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int __set_port_type(struct mlx4_port_info *info,
+ enum mlx4_port_type port_type)
{
- struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
- port_attr);
struct mlx4_dev *mdev = info->dev;
struct mlx4_priv *priv = mlx4_priv(mdev);
enum mlx4_port_type types[MLX4_MAX_PORTS];
enum mlx4_port_type new_types[MLX4_MAX_PORTS];
- static DEFINE_MUTEX(set_port_type_mutex);
int i;
int err = 0;
- mutex_lock(&set_port_type_mutex);
-
- if (!strcmp(buf, "ib\n"))
- info->tmp_type = MLX4_PORT_TYPE_IB;
- else if (!strcmp(buf, "eth\n"))
- info->tmp_type = MLX4_PORT_TYPE_ETH;
- else if (!strcmp(buf, "auto\n"))
- info->tmp_type = MLX4_PORT_TYPE_AUTO;
- else {
- mlx4_err(mdev, "%s is not supported port type\n", buf);
- err = -EINVAL;
- goto err_out;
- }
-
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
+ info->tmp_type = port_type;
+
/* Possible type is always the one that was delivered */
mdev->caps.possible_type[info->port] = info->tmp_type;
@@ -1152,6 +1137,37 @@ static ssize_t set_port_type(struct device *dev,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
+
+ return err;
+}
+
+static ssize_t set_port_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+ port_attr);
+ struct mlx4_dev *mdev = info->dev;
+ enum mlx4_port_type port_type;
+ static DEFINE_MUTEX(set_port_type_mutex);
+ int err;
+
+ mutex_lock(&set_port_type_mutex);
+
+ if (!strcmp(buf, "ib\n")) {
+ port_type = MLX4_PORT_TYPE_IB;
+ } else if (!strcmp(buf, "eth\n")) {
+ port_type = MLX4_PORT_TYPE_ETH;
+ } else if (!strcmp(buf, "auto\n")) {
+ port_type = MLX4_PORT_TYPE_AUTO;
+ } else {
+ mlx4_err(mdev, "%s is not supported port type\n", buf);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = __set_port_type(info, port_type);
+
err_out:
mutex_unlock(&set_port_type_mutex);
@@ -1256,6 +1272,7 @@ err_set_port:
static int mlx4_mf_bond(struct mlx4_dev *dev)
{
int err = 0;
+ int nvfs;
struct mlx4_slaves_pport slaves_port1;
struct mlx4_slaves_pport slaves_port2;
DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
@@ -1272,11 +1289,18 @@ static int mlx4_mf_bond(struct mlx4_dev *dev)
return -EINVAL;
}
+ /* number of virtual functions is number of total functions minus one
+ * physical function for each port.
+ */
+ nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
+ bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
+
/* limit on maximum allowed VFs */
- if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
- bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) >
- MAX_MF_BOND_ALLOWED_SLAVES)
+ if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
+ mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
+ nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
return -EINVAL;
+ }
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
@@ -2881,8 +2905,13 @@ no_msi:
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- int err = 0;
+ int err;
+
+ err = devlink_port_register(devlink, &info->devlink_port, port);
+ if (err)
+ return err;
info->dev = dev;
info->port = port;
@@ -2907,6 +2936,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
+ devlink_port_unregister(&info->devlink_port);
info->port = -1;
}
@@ -3678,23 +3708,54 @@ err_disable_pdev:
return err;
}
+static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type)
+{
+ struct mlx4_port_info *info = container_of(devlink_port,
+ struct mlx4_port_info,
+ devlink_port);
+ enum mlx4_port_type mlx4_port_type;
+
+ switch (port_type) {
+ case DEVLINK_PORT_TYPE_AUTO:
+ mlx4_port_type = MLX4_PORT_TYPE_AUTO;
+ break;
+ case DEVLINK_PORT_TYPE_ETH:
+ mlx4_port_type = MLX4_PORT_TYPE_ETH;
+ break;
+ case DEVLINK_PORT_TYPE_IB:
+ mlx4_port_type = MLX4_PORT_TYPE_IB;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return __set_port_type(info, mlx4_port_type);
+}
+
+static const struct devlink_ops mlx4_devlink_ops = {
+ .port_type_set = mlx4_devlink_port_type_set,
+};
+
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct devlink *devlink;
struct mlx4_priv *priv;
struct mlx4_dev *dev;
int ret;
printk_once(KERN_INFO "%s", mlx4_version);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv));
+ if (!devlink)
return -ENOMEM;
+ priv = devlink_priv(devlink);
dev = &priv->dev;
dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
if (!dev->persist) {
- kfree(priv);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_devlink_free;
}
dev->persist->pdev = pdev;
dev->persist->dev = dev;
@@ -3703,14 +3764,23 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&dev->persist->device_state_mutex);
mutex_init(&dev->persist->interface_state_mutex);
+ ret = devlink_register(devlink, &pdev->dev);
+ if (ret)
+ goto err_persist_free;
+
ret = __mlx4_init_one(pdev, id->driver_data, priv);
- if (ret) {
- kfree(dev->persist);
- kfree(priv);
- } else {
- pci_save_state(pdev);
- }
+ if (ret)
+ goto err_devlink_unregister;
+ pci_save_state(pdev);
+ return 0;
+
+err_devlink_unregister:
+ devlink_unregister(devlink);
+err_persist_free:
+ kfree(dev->persist);
+err_devlink_free:
+ devlink_free(devlink);
return ret;
}
@@ -3811,6 +3881,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
+ struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
mutex_lock(&persist->interface_state_mutex);
@@ -3841,8 +3912,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
+ devlink_unregister(devlink);
kfree(dev->persist);
- kfree(priv);
+ devlink_free(devlink);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7baef52db6b7..ef9683101ead 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -45,6 +45,7 @@
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <net/devlink.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
@@ -828,6 +829,7 @@ struct mlx4_port_info {
struct mlx4_roce_gid_table gid_table;
int base_qpn;
struct cpu_rmap *rmap;
+ struct devlink_port devlink_port;
};
struct mlx4_sense {
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 787b7bb54d52..211c65087997 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -193,10 +193,10 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
if (need_mf_bond) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
@@ -389,10 +389,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
@@ -479,10 +479,10 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
@@ -588,10 +588,10 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
if (need_mf_bond) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
@@ -764,10 +764,10 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
- mutex_lock(&dup_table->mutex);
+ mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
- mutex_lock(&table->mutex);
+ mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 1dca3dcf90f5..dbc2fb89e067 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -237,6 +237,7 @@ struct mlx5e_pport_stats {
static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
+ "bytes",
"csum_none",
"csum_sw",
"lro_packets",
@@ -246,41 +247,46 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
struct mlx5e_rq_stats {
u64 packets;
+ u64 bytes;
u64 csum_none;
u64 csum_sw;
u64 lro_packets;
u64 lro_bytes;
u64 wqe_err;
-#define NUM_RQ_STATS 6
+#define NUM_RQ_STATS 7
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
+ "bytes",
"tso_packets",
"tso_bytes",
"tso_inner_packets",
"tso_inner_bytes",
- "csum_offload_none",
"csum_offload_inner",
+ "nop",
+ "csum_offload_none",
"stopped",
"wake",
"dropped",
- "nop"
};
struct mlx5e_sq_stats {
+ /* commonly accessed in data path */
u64 packets;
+ u64 bytes;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
- u64 csum_offload_none;
u64 csum_offload_inner;
+ u64 nop;
+ /* less likely accessed in data path */
+ u64 csum_offload_none;
u64 stopped;
u64 wake;
u64 dropped;
- u64 nop;
-#define NUM_SQ_STATS 11
+#define NUM_SQ_STATS 12
};
struct mlx5e_stats {
@@ -326,14 +332,9 @@ enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
};
-enum cq_flags {
- MLX5E_CQ_HAS_CQES = 1,
-};
-
struct mlx5e_cq {
/* data path - accessed per cqe */
struct mlx5_cqwq wq;
- unsigned long flags;
/* data path - accessed per napi poll */
struct napi_struct *napi;
@@ -386,6 +387,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+ MLX5E_SQ_STATE_BF_ENABLE,
};
struct mlx5e_sq {
@@ -414,7 +416,6 @@ struct mlx5e_sq {
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
- void __iomem *uar_bf_map;
struct netdev_queue *txq;
u32 sqn;
u16 bf_buf_size;
@@ -474,6 +475,8 @@ enum mlx5e_traffic_types {
MLX5E_NUM_TT,
};
+#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
+
enum mlx5e_rqt_ix {
MLX5E_INDIRECTION_RQT,
MLX5E_SINGLE_RQ_RQT,
@@ -555,7 +558,6 @@ struct mlx5e_priv {
struct mlx5e_vxlan_db vxlan;
struct mlx5e_params params;
- spinlock_t async_events_spinlock; /* sync hw events */
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
struct delayed_work update_stats_work;
@@ -645,9 +647,12 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+ int num_channels);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5e_tx_wqe *wqe, int bf_sz)
@@ -663,16 +668,12 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
* doorbell
*/
wmb();
-
- if (bf_sz) {
- __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
-
- /* flush the write-combining mapped buffer */
- wmb();
-
- } else {
+ if (bf_sz)
+ __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
+ else
mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
- }
+ /* flush the write-combining mapped buffer */
+ wmb();
sq->bf_offset ^= sq->bf_buf_size;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index be6543570b2b..2018eebe1531 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -62,10 +62,11 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
overflow_work);
+ unsigned long flags;
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock);
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
}
@@ -136,10 +137,11 @@ static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
return 0;
}
@@ -150,10 +152,11 @@ static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
u64 ns;
+ unsigned long flags;
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
ns = timecounter_read(&tstamp->clock);
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
*ts = ns_to_timespec64(ns);
@@ -164,10 +167,11 @@ static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
+ unsigned long flags;
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
timecounter_adjtime(&tstamp->clock, delta);
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
return 0;
}
@@ -176,6 +180,7 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
u64 adj;
u32 diff;
+ unsigned long flags;
int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
@@ -189,11 +194,11 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
- write_lock(&tstamp->lock);
+ write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock);
tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
tstamp->nominal_c_mult + diff;
- write_unlock(&tstamp->lock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index e9760f895744..68834b715f6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -211,13 +211,14 @@ static void mlx5e_get_strings(struct net_device *dev,
sprintf(data + (idx++) * ETH_GSTRING_LEN,
"rx%d_%s", i, rq_stats_strings[j]);
- for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data +
- (idx++) * ETH_GSTRING_LEN,
- "tx%d_%d_%s", i, tc,
- sq_stats_strings[j]);
+ (idx++) * ETH_GSTRING_LEN,
+ "tx%d_%s",
+ priv->channeltc_to_txq_map[i][tc],
+ sq_stats_strings[j]);
break;
}
}
@@ -249,8 +250,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
&priv->state) ? 0 :
((u64 *)&priv->channel[i]->rq.stats)[j];
- for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = !test_bit(MLX5E_STATE_OPENED,
&priv->state) ? 0 :
@@ -385,6 +386,8 @@ static int mlx5e_set_channels(struct net_device *dev,
mlx5e_close_locked(dev);
priv->params.num_channels = count;
+ mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
if (was_opened)
err = mlx5e_open_locked(dev);
@@ -399,6 +402,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ return -ENOTSUPP;
+
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
@@ -416,11 +422,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
int tc;
int i;
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -ENOTSUPP;
+
+ mutex_lock(&priv->state_lock);
priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
for (i = 0; i < priv->params.num_channels; ++i) {
c = priv->channel[i];
@@ -436,6 +449,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
coal->rx_max_coalesced_frames);
}
+out:
+ mutex_unlock(&priv->state_lock);
return 0;
}
@@ -703,18 +718,36 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
+static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+ int i;
+
+ MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
+ mlx5e_build_tir_ctx_hash(tirc, priv);
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ if (IS_HASHING_TT(i))
+ mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
+}
+
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- bool close_open;
- int err = 0;
+ int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ void *in;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
(hfunc != ETH_RSS_HASH_XOR) &&
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
mutex_lock(&priv->state_lock);
if (indir) {
@@ -723,11 +756,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
}
- close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
- test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (close_open)
- mlx5e_close_locked(dev);
-
if (key)
memcpy(priv->params.toeplitz_hash_key, key,
sizeof(priv->params.toeplitz_hash_key));
@@ -735,12 +763,13 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
if (hfunc != ETH_RSS_HASH_NO_CHANGE)
priv->params.rss_hfunc = hfunc;
- if (close_open)
- err = mlx5e_open_locked(priv->netdev);
+ mlx5e_modify_tirs_hash(priv, in, inlen);
mutex_unlock(&priv->state_lock);
- return err;
+ kvfree(in);
+
+ return 0;
}
static int mlx5e_get_rxnfc(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0d45f35aee72..19e5daeaa61d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -143,6 +143,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
return;
/* Collect firts the SW counters and then HW for consistency */
+ s->rx_packets = 0;
+ s->rx_bytes = 0;
+ s->tx_packets = 0;
+ s->tx_bytes = 0;
s->tso_packets = 0;
s->tso_bytes = 0;
s->tso_inner_packets = 0;
@@ -160,6 +164,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
s->lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
@@ -169,6 +175,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
s->tso_packets += sq_stats->tso_packets;
s->tso_bytes += sq_stats->tso_bytes;
s->tso_inner_packets += sq_stats->tso_inner_packets;
@@ -233,23 +241,6 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->tx_broadcast_bytes =
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
- s->rx_packets =
- s->rx_unicast_packets +
- s->rx_multicast_packets +
- s->rx_broadcast_packets;
- s->rx_bytes =
- s->rx_unicast_bytes +
- s->rx_multicast_bytes +
- s->rx_broadcast_bytes;
- s->tx_packets =
- s->tx_unicast_packets +
- s->tx_multicast_packets +
- s->tx_broadcast_packets;
- s->tx_bytes =
- s->tx_unicast_bytes +
- s->tx_multicast_bytes +
- s->tx_broadcast_bytes;
-
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
s->rx_csum_good = s->rx_packets - s->rx_csum_none -
@@ -275,9 +266,14 @@ static void mlx5e_update_stats_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-static void __mlx5e_async_event(struct mlx5e_priv *priv,
- enum mlx5_dev_event event)
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+ enum mlx5_dev_event event, unsigned long param)
{
+ struct mlx5e_priv *priv = vpriv;
+
+ if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ return;
+
switch (event) {
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
@@ -289,17 +285,6 @@ static void __mlx5e_async_event(struct mlx5e_priv *priv,
}
}
-static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
- enum mlx5_dev_event event, unsigned long param)
-{
- struct mlx5e_priv *priv = vpriv;
-
- spin_lock(&priv->async_events_spinlock);
- if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
- __mlx5e_async_event(priv, event);
- spin_unlock(&priv->async_events_spinlock);
-}
-
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
@@ -307,9 +292,8 @@ static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- spin_lock_irq(&priv->async_events_spinlock);
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
- spin_unlock_irq(&priv->async_events_spinlock);
+ synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
@@ -555,7 +539,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
int txq_ix;
int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar);
+ err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
if (err)
return err;
@@ -567,8 +551,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
goto err_unmap_free_uar;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- sq->uar_map = sq->uar.map;
- sq->uar_bf_map = sq->uar.bf_map;
+ if (sq->uar.bf_map) {
+ set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
+ sq->uar_map = sq->uar.bf_map;
+ } else {
+ sq->uar_map = sq->uar.map;
+ }
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
@@ -877,12 +865,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
if (err)
goto err_destroy_cq;
- err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
- moderation_usecs,
- moderation_frames);
- if (err)
- goto err_destroy_cq;
-
+ if (MLX5_CAP_GEN(mdev, cq_moderation))
+ mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+ moderation_usecs,
+ moderation_frames);
return 0;
err_destroy_cq:
@@ -1071,6 +1057,15 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
param->wq.linear = 1;
}
+static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+}
+
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
struct mlx5e_sq_param *param)
{
@@ -1207,7 +1202,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
ix = priv->params.indirection_rqt[ix];
- ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i],
test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[ix]->rq.rqn :
@@ -1325,7 +1319,22 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
lro_timer_supported_periods[2]));
}
-static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
+void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+{
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+ }
+}
+
+static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1333,6 +1342,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
void *tirc;
int inlen;
int err;
+ int tt;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -1344,7 +1354,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
mlx5e_build_tir_ctx_lro(tirc, priv);
- err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+ for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+ err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+ if (err)
+ break;
+ }
kvfree(in);
@@ -1458,8 +1472,8 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_close_channels;
}
- mlx5e_update_carrier(priv);
mlx5e_redirect_rqts(priv);
+ mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv);
schedule_delayed_work(&priv->update_stats_work, 0);
@@ -1498,8 +1512,8 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_timestamp_cleanup(priv);
- mlx5e_redirect_rqts(priv);
netif_carrier_off(priv->netdev);
+ mlx5e_redirect_rqts(priv);
mlx5e_close_channels(priv);
return 0;
@@ -1581,8 +1595,7 @@ static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
memset(&cq_param, 0, sizeof(cq_param));
memset(&rq_param, 0, sizeof(rq_param));
- mlx5e_build_rx_cq_param(priv, &cq_param);
- mlx5e_build_rq_param(priv, &rq_param);
+ mlx5e_build_drop_rq_param(&rq_param);
err = mlx5e_create_drop_cq(priv, cq, &cq_param);
if (err)
@@ -1700,17 +1713,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
default:
MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn[MLX5E_INDIRECTION_RQT]);
- MLX5_SET(tirc, tirc, rx_hash_fn,
- mlx5e_rx_hash_fn(priv->params.rss_hfunc));
- if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
- void *rss_key = MLX5_ADDR_OF(tirc, tirc,
- rx_hash_toeplitz_key);
- size_t len = MLX5_FLD_SZ_BYTES(tirc,
- rx_hash_toeplitz_key);
-
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- memcpy(rss_key, priv->params.toeplitz_hash_key, len);
- }
+ mlx5e_build_tir_ctx_hash(tirc, priv);
break;
}
@@ -1947,8 +1950,10 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_close_locked(priv->netdev);
priv->params.lro_en = !!(features & NETIF_F_LRO);
- mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
- mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+ err = mlx5e_modify_tirs_lro(priv);
+ if (err)
+ mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
+ err);
if (was_opened)
err = mlx5e_open_locked(priv->netdev);
@@ -2217,6 +2222,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
}
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ mlx5_core_warn(mdev, "CQ modiration is not supported\n");
return 0;
}
@@ -2248,12 +2255,20 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
}
#endif
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+ int num_channels)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ indirection_rqt[i] = i % num_channels;
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int i;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -2276,8 +2291,8 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
netdev_rss_key_fill(priv->params.toeplitz_hash_key,
sizeof(priv->params.toeplitz_hash_key));
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
- priv->params.indirection_rqt[i] = i % num_channels;
+ mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, num_channels);
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -2290,7 +2305,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
mlx5e_ets_init(priv);
#endif
- spin_lock_init(&priv->async_events_spinlock);
mutex_init(&priv->state_lock);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
@@ -2418,7 +2432,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
priv = netdev_priv(netdev);
- err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+ err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
if (err) {
mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
goto err_free_netdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 519a07f253f9..884ed19cded2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -231,10 +231,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done;
- /* avoid accessing cq (dma coherent memory) if not needed */
- if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
- return 0;
-
for (work_done = 0; work_done < budget; work_done++) {
struct mlx5e_rx_wqe *wqe;
struct mlx5_cqe64 *cqe;
@@ -268,6 +264,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
mlx5e_build_rx_skb(cqe, rq, skb);
rq->stats.packets++;
+ rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
napi_gro_receive(cq->napi, skb);
wq_ll_pop:
@@ -280,8 +277,5 @@ wq_ll_pop:
/* ensure cq space is freed before enabling more cqes */
wmb();
- if (work_done == budget)
- set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
-
return work_done;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index a05c070cbc2f..94a14f85f70d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -177,6 +177,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
unsigned int skb_len = skb->len;
u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0;
+ unsigned int num_bytes;
bool bf = false;
u16 headlen;
u16 ds_cnt;
@@ -216,16 +217,17 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_bytes += skb->len - ihs;
}
- wi->num_bytes = skb->len +
- (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
} else {
bf = sq->bf_budget &&
!skb->xmit_more &&
!skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
- wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
+ wi->num_bytes = num_bytes;
+
if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
&skb_len);
@@ -303,7 +305,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
- if (bf && sq->uar_bf_map)
+ if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
@@ -317,6 +319,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
sq->stats.packets++;
+ sq->stats.bytes += num_bytes;
return NETDEV_TX_OK;
dma_unmap_wqe_err:
@@ -345,10 +348,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
u16 sqcc;
int i;
- /* avoid accessing cq (dma coherent memory) if not needed */
- if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
- return false;
-
sq = container_of(cq, struct mlx5e_sq, cq);
npkts = 0;
@@ -432,10 +431,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
netif_tx_wake_queue(sq->txq);
sq->stats.wake++;
}
- if (i == MLX5E_TX_CQ_POLL_BUDGET) {
- set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
- return true;
- }
- return false;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 4ac8d716dbdd..66d51a77609e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -88,7 +88,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
- set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
barrier();
napi_schedule(cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 647a3ca2c2a9..18fccec72c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -442,6 +442,11 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
}
EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
+u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx)
+{
+ return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector;
+}
+
int mlx5_eq_init(struct mlx5_core_dev *dev)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 1545a944c309..8b7133de498e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -767,22 +767,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -ENOTSUPP;
}
-static int map_bf_area(struct mlx5_core_dev *dev)
-{
- resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
- resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
-
- dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
-
- return dev->priv.bf_mapping ? 0 : -ENOMEM;
-}
-
-static void unmap_bf_area(struct mlx5_core_dev *dev)
-{
- if (dev->priv.bf_mapping)
- io_mapping_free(dev->priv.bf_mapping);
-}
-
static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_device_context *dev_ctx;
@@ -1103,14 +1087,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_stop_eqs;
}
- if (map_bf_area(dev))
- dev_err(&pdev->dev, "Failed to map blue flame area\n");
-
err = mlx5_irq_set_affinity_hints(dev);
- if (err) {
+ if (err)
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
- goto err_unmap_bf_area;
- }
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -1169,10 +1148,6 @@ err_fs:
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
-
-err_unmap_bf_area:
- unmap_bf_area(dev);
-
free_comp_eqs(dev);
err_stop_eqs:
@@ -1242,7 +1217,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
- unmap_bf_area(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0336847ec9a1..0b0b226c789e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -99,6 +99,7 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
+u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index eb05c845ece9..8ba080e441a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -226,7 +226,8 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
return 0;
}
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
+ bool map_wc)
{
phys_addr_t pfn;
phys_addr_t uar_bar_start;
@@ -240,20 +241,26 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
uar_bar_start = pci_resource_start(mdev->pdev, 0);
pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
- uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
- if (!uar->map) {
- mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
- err = -ENOMEM;
- goto err_free_uar;
- }
- if (mdev->priv.bf_mapping)
- uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
- uar->index << PAGE_SHIFT);
+ if (map_wc) {
+ uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->bf_map) {
+ mlx5_core_warn(mdev, "ioremap_wc() failed\n");
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map)
+ goto err_free_uar;
+ }
+ } else {
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map)
+ goto err_free_uar;
+ }
return 0;
err_free_uar:
+ mlx5_core_warn(mdev, "ioremap() failed\n");
+ err = -ENOMEM;
mlx5_cmd_free_uar(mdev, uar->index);
return err;
@@ -262,8 +269,8 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
- io_mapping_unmap(uar->bf_map);
iounmap(uar->map);
+ iounmap(uar->bf_map);
mlx5_cmd_free_uar(mdev, uar->index);
}
EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index ce26adcb4988..2ad7f67854d5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -4,6 +4,7 @@
config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
+ depends on MAY_USE_DEVLINK
---help---
This driver supports Mellanox Technologies Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 22379eb8e924..f69f6280519f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -56,6 +56,7 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
+#include <net/devlink.h>
#include "core.h"
#include "item.h"
@@ -784,6 +785,38 @@ static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
debugfs_remove_recursive(mlxsw_core->dbg_dir);
}
+static int mlxsw_devlink_port_split(struct devlink *devlink,
+ unsigned int port_index,
+ unsigned int count)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (port_index >= MLXSW_PORT_MAX_PORTS)
+ return -EINVAL;
+ if (!mlxsw_core->driver->port_split)
+ return -EOPNOTSUPP;
+ return mlxsw_core->driver->port_split(mlxsw_core->driver_priv,
+ port_index, count);
+}
+
+static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
+ unsigned int port_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (port_index >= MLXSW_PORT_MAX_PORTS)
+ return -EINVAL;
+ if (!mlxsw_core->driver->port_unsplit)
+ return -EOPNOTSUPP;
+ return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv,
+ port_index);
+}
+
+static const struct devlink_ops mlxsw_devlink_ops = {
+ .port_split = mlxsw_devlink_port_split,
+ .port_unsplit = mlxsw_devlink_port_unsplit,
+};
+
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv)
@@ -791,6 +824,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
+ struct devlink *devlink;
size_t alloc_size;
int err;
@@ -798,12 +832,13 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (!mlxsw_driver)
return -EINVAL;
alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
- mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
- if (!mlxsw_core) {
+ devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+ if (!devlink) {
err = -ENOMEM;
- goto err_core_alloc;
+ goto err_devlink_alloc;
}
+ mlxsw_core = devlink_priv(devlink);
INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
mlxsw_core->driver = mlxsw_driver;
@@ -841,6 +876,10 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_hwmon_init;
+ err = devlink_register(devlink, mlxsw_bus_info->dev);
+ if (err)
+ goto err_devlink_register;
+
err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
mlxsw_bus_info);
if (err)
@@ -855,6 +894,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_debugfs_init:
mlxsw_core->driver->fini(mlxsw_core->driver_priv);
err_driver_init:
+ devlink_unregister(devlink);
+err_devlink_register:
err_hwmon_init:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
@@ -864,8 +905,8 @@ err_bus_init:
err_alloc_lag_mapping:
free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats:
- kfree(mlxsw_core);
-err_core_alloc:
+ devlink_free(devlink);
+err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
return err;
}
@@ -874,14 +915,16 @@ EXPORT_SYMBOL(mlxsw_core_bus_device_register);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
{
const char *device_kind = mlxsw_core->bus_info->device_kind;
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
mlxsw_core_debugfs_fini(mlxsw_core);
mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
kfree(mlxsw_core->lag.mapping);
free_percpu(mlxsw_core->pcpu_stats);
- kfree(mlxsw_core);
+ devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index a01723600f0a..c73d1c0792a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -186,6 +186,8 @@ struct mlxsw_driver {
int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info);
void (*fini)(void *driver_priv);
+ int (*port_split)(void *driver_priv, u8 local_port, unsigned int count);
+ int (*port_unsplit)(void *driver_priv, u8 local_port);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
u8 txhdr_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index c071077aafbd..7992c553c1f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -215,7 +215,7 @@ mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
{
int index = q->producer_counter & (q->count - 1);
- if ((q->producer_counter - q->consumer_counter) == q->count)
+ if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
return NULL;
return mlxsw_pci_queue_elem_info_get(q, index);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index ae65b9940aed..f33b997f2b61 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -59,6 +59,8 @@
#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
+#define MLXSW_PORT_MODULE_MAX_WIDTH 4
+
enum mlxsw_port_admin_status {
MLXSW_PORT_ADMIN_STATUS_UP = 1,
MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 09ce451c283b..4afbc3e9e381 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,6 +49,7 @@
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/list.h>
+#include <net/devlink.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -304,21 +305,47 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
-static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
- bool *p_usable)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 *p_module,
+ u8 *p_width)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
- mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
if (err)
return err;
- *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
return 0;
}
+static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 module, u8 width, u8 lane)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int i;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_width_set(pmlp_pl, width);
+ for (i = 0; i < width; i++) {
+ mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
+ mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+}
+
+static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+}
+
static int mlxsw_sp_port_open(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
@@ -1273,6 +1300,18 @@ static u32 mlxsw_sp_to_ptys_speed(u32 speed)
return ptys_proto;
}
+static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
+ ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
static int mlxsw_sp_port_set_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
@@ -1349,11 +1388,27 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.set_settings = mlxsw_sp_port_set_settings,
};
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static int
+mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_admin;
+
+ eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
+ eth_proto_admin);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct devlink_port *devlink_port;
struct net_device *dev;
- bool usable;
size_t bytes;
int err;
@@ -1364,6 +1419,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
+ mlxsw_sp_port->split = split;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -1404,17 +1460,14 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
*/
dev->hard_header_len += MLXSW_TXHDR_LEN;
- err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
+ devlink_port = &mlxsw_sp_port->devlink_port;
+ if (mlxsw_sp_port->split)
+ devlink_port_split_set(devlink_port, module);
+ err = devlink_port_register(devlink, devlink_port, local_port);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
mlxsw_sp_port->local_port);
- goto err_port_module_check;
- }
-
- if (!usable) {
- dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
- mlxsw_sp_port->local_port);
- goto port_not_usable;
+ goto err_devlink_port_register;
}
err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
@@ -1431,6 +1484,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
goto err_port_swid_set;
}
+ err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_speed_by_width_set;
+ }
+
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
@@ -1457,6 +1517,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
goto err_register_netdev;
}
+ devlink_port_type_eth_set(devlink_port, dev);
+
err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
if (err)
goto err_port_vlan_init;
@@ -1470,10 +1532,11 @@ err_register_netdev:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
+err_port_speed_by_width_set:
err_port_swid_set:
err_port_system_port_mapping_set:
-port_not_usable:
-err_port_module_check:
+ devlink_port_unregister(&mlxsw_sp_port->devlink_port);
+err_devlink_port_register:
err_dev_addr_init:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
@@ -1485,6 +1548,28 @@ err_port_active_vlans_alloc:
return err;
}
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
+{
+ int err;
+
+ err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+ lane);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
+ width);
+ if (err)
+ goto err_port_create;
+
+ return 0;
+
+err_port_create:
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
+ return err;
+}
+
static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
@@ -1505,12 +1590,19 @@ static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ struct devlink_port *devlink_port;
if (!mlxsw_sp_port)
return;
+ mlxsw_sp->ports[local_port] = NULL;
+ devlink_port = &mlxsw_sp_port->devlink_port;
+ devlink_port_type_clear(devlink_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ devlink_port_unregister(devlink_port);
mlxsw_sp_port_vports_fini(mlxsw_sp_port);
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
@@ -1529,6 +1621,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
size_t alloc_size;
+ u8 module, width;
int i;
int err;
@@ -1538,19 +1631,158 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, i);
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
+ &width);
+ if (err)
+ goto err_port_module_info_get;
+ if (!width)
+ continue;
+ mlxsw_sp->port_to_module[i] = module;
+ err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
if (err)
goto err_port_create;
}
return 0;
err_port_create:
+err_port_module_info_get:
for (i--; i >= 1; i--)
mlxsw_sp_port_remove(mlxsw_sp, i);
kfree(mlxsw_sp->ports);
return err;
}
+static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
+{
+ u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
+
+ return local_port - offset;
+}
+
+static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ u8 module, cur_width, base_port;
+ int i;
+ int err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
+ local_port);
+ return -EINVAL;
+ }
+
+ if (count != 2 && count != 4) {
+ netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
+ &cur_width);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
+ return err;
+ }
+
+ if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
+ netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
+ return -EINVAL;
+ }
+
+ /* Make sure we have enough slave (even) ports for the split. */
+ if (count == 2) {
+ base_port = local_port;
+ if (mlxsw_sp->ports[base_port + 1]) {
+ netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
+ return -EINVAL;
+ }
+ } else {
+ base_port = mlxsw_sp_cluster_base_port_get(local_port);
+ if (mlxsw_sp->ports[base_port + 1] ||
+ mlxsw_sp->ports[base_port + 3]) {
+ netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < count; i++)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+ module, width, i * width);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
+ goto err_port_create;
+ }
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ for (i = 0; i < count / 2; i++) {
+ module = mlxsw_sp->port_to_module[base_port + i * 2];
+ mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
+ module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
+ }
+ return err;
+}
+
+static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u8 module, cur_width, base_port;
+ unsigned int count;
+ int i;
+ int err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
+ local_port);
+ return -EINVAL;
+ }
+
+ if (!mlxsw_sp_port->split) {
+ netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
+ &cur_width);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
+ return err;
+ }
+ count = cur_width == 1 ? 4 : 2;
+
+ base_port = mlxsw_sp_cluster_base_port_get(local_port);
+
+ /* Determine which ports to remove. */
+ if (count == 2 && local_port >= base_port + 2)
+ base_port = base_port + 2;
+
+ for (i = 0; i < count; i++)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+
+ for (i = 0; i < count / 2; i++) {
+ module = mlxsw_sp->port_to_module[base_port + i * 2];
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
+ module, MLXSW_PORT_MODULE_MAX_WIDTH,
+ 0);
+ if (err)
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
+ }
+
+ return 0;
+}
+
static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
char *pude_pl, void *priv)
{
@@ -1974,6 +2206,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.priv_size = sizeof(struct mlxsw_sp),
.init = mlxsw_sp_init,
.fini = mlxsw_sp_fini,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
@@ -2358,9 +2592,7 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
if (mlxsw_sp_port->bridged) {
mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
-
- if (lag->ref_count == 1)
- mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+ mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
}
if (lag->ref_count == 1) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3b89ed2f3c76..1b691d7e4a2a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -43,6 +43,7 @@
#include <linux/if_vlan.h>
#include <linux/list.h>
#include <net/switchdev.h>
+#include <net/devlink.h>
#include "port.h"
#include "core.h"
@@ -57,6 +58,10 @@
#define MLXSW_SP_MID_MAX 7000
+#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
+
+#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
+
struct mlxsw_sp_port;
struct mlxsw_sp_upper {
@@ -122,6 +127,7 @@ struct mlxsw_sp {
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
+ u8 port_to_module[MLXSW_PORT_MAX_PORTS];
};
static inline struct mlxsw_sp_upper *
@@ -149,7 +155,8 @@ struct mlxsw_sp_port {
learning_sync:1,
uc_flood:1,
bridged:1,
- lagged:1;
+ lagged:1,
+ split:1;
u16 pvid;
u16 lag_id;
struct {
@@ -162,6 +169,7 @@ struct mlxsw_sp_port {
unsigned long *untagged_vlans;
/* VLAN interfaces */
struct list_head vports_list;
+ struct devlink_port devlink_port;
};
static inline struct mlxsw_sp_port *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index d85960cfb694..7a60a26759b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -43,6 +43,7 @@
#include <linux/device.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
+#include <net/devlink.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -78,6 +79,7 @@ struct mlxsw_sx_port {
struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sx *mlxsw_sx;
u8 local_port;
+ struct devlink_port devlink_port;
};
/* tx_hdr_version
@@ -953,7 +955,9 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sx->core);
struct mlxsw_sx_port *mlxsw_sx_port;
+ struct devlink_port *devlink_port;
struct net_device *dev;
bool usable;
int err;
@@ -1007,6 +1011,14 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto port_not_usable;
}
+ devlink_port = &mlxsw_sx_port->devlink_port;
+ err = devlink_port_register(devlink, devlink_port, local_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n",
+ mlxsw_sx_port->local_port);
+ goto err_devlink_port_register;
+ }
+
err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1064,6 +1076,8 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto err_register_netdev;
}
+ devlink_port_type_eth_set(devlink_port, dev);
+
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
@@ -1075,6 +1089,8 @@ err_port_mtu_set:
err_port_speed_set:
err_port_swid_set:
err_port_system_port_mapping_set:
+ devlink_port_unregister(&mlxsw_sx_port->devlink_port);
+err_devlink_port_register:
port_not_usable:
err_port_module_check:
err_dev_addr_get:
@@ -1087,11 +1103,15 @@ err_alloc_stats:
static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ struct devlink_port *devlink_port;
if (!mlxsw_sx_port)
return;
+ devlink_port = &mlxsw_sx_port->devlink_port;
+ devlink_port_type_clear(devlink_port);
unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ devlink_port_unregister(devlink_port);
free_percpu(mlxsw_sx_port->pcpu_stats);
free_netdev(mlxsw_sx_port->dev);
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 00cfd95ca59d..3e67f451f2ab 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -474,9 +474,9 @@ static int moxart_mac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ndev->base_addr = res->start;
priv->base = devm_ioremap_resource(p_dev, res);
- ret = IS_ERR(priv->base);
- if (ret) {
+ if (IS_ERR(priv->base)) {
dev_err(p_dev, "devm_ioremap_resource failed\n");
+ ret = PTR_ERR(priv->base);
goto init_fail;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 66b021e3c1be..e5604eec81bf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -146,9 +146,6 @@ struct qed_hw_info {
u16 ovlan;
u32 part_num[4];
- u32 vendor_id;
- u32 device_id;
-
unsigned char hw_mac_addr[ETH_ALEN];
struct qed_igu_info *p_igu_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index acfe7be49a58..b7d100f6bd6f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -32,6 +32,33 @@
#include "qed_sp.h"
/* API common to all protocols */
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ enum BAR_ID bar_id)
+{
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+
+ if (val)
+ return 1 << (val + 15);
+
+ /* Old MFW initialized above registered only conditionally */
+ if (p_hwfn->cdev->num_hwfns > 1) {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
+ return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
+ return 512 * 1024;
+ }
+}
+
void qed_init_dp(struct qed_dev *cdev,
u32 dp_module, u8 dp_level)
{
@@ -393,7 +420,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
{
int hw_mode = 0;
- hw_mode = (1 << MODE_BB_A0);
+ hw_mode = (1 << MODE_BB_B0);
switch (p_hwfn->cdev->num_ports_in_engines) {
case 1:
@@ -650,10 +677,8 @@ int qed_hw_init(struct qed_dev *cdev,
bool allow_npar_tx_switch,
const u8 *bin_fw_data)
{
- struct qed_storm_stats *p_stat;
- u32 load_code, param, *p_address;
+ u32 load_code, param;
int rc, mfw_rc, i;
- u8 fw_vport = 0;
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
@@ -662,10 +687,6 @@ int qed_hw_init(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
- if (rc != 0)
- return rc;
-
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
@@ -729,35 +750,60 @@ int qed_hw_init(struct qed_dev *cdev,
}
p_hwfn->hw_init_done = true;
+ }
- /* init PF stats */
- p_stat = &p_hwfn->storm_stats;
- p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
- MSTORM_QUEUE_STAT_OFFSET(fw_vport);
- p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+ return 0;
+}
- p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
- USTORM_QUEUE_STAT_OFFSET(fw_vport);
- p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static inline void qed_hw_timers_stop(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ int i;
+
+ /* close timers */
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
- PSTORM_QUEUE_STAT_OFFSET(fw_vport);
- p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
- p_address = &p_stat->tstats.address;
- *p_address = BAR0_MAP_REG_TSDM_RAM +
- TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
- p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
+ /* Dependent on number of connection/tasks, possibly
+ * 1ms sleep is required between polls
+ */
+ usleep_range(1000, 2000);
}
- return 0;
+ if (i < QED_HW_STOP_RETRY_LIMIT)
+ return;
+
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void qed_hw_timers_stop_all(struct qed_dev *cdev)
+{
+ int j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
+ }
}
-#define QED_HW_STOP_RETRY_LIMIT (10)
int qed_hw_stop(struct qed_dev *cdev)
{
int rc = 0, t_rc;
- int i, j;
+ int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
@@ -770,7 +816,8 @@ int qed_hw_stop(struct qed_dev *cdev)
rc = qed_sp_pf_stop(p_hwfn);
if (rc)
- return rc;
+ DP_NOTICE(p_hwfn,
+ "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -781,24 +828,7 @@ int qed_hw_stop(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
- if ((!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
- break;
-
- usleep_range(1000, 2000);
- }
- if (i == QED_HW_STOP_RETRY_LIMIT)
- DP_NOTICE(p_hwfn,
- "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
+ qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
/* Disable Attention Generation */
qed_int_igu_disable_int(p_hwfn, p_ptt);
@@ -827,7 +857,7 @@ int qed_hw_stop(struct qed_dev *cdev)
void qed_hw_stop_fastpath(struct qed_dev *cdev)
{
- int i, j;
+ int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
@@ -846,25 +876,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
- if ((!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
- break;
-
- usleep_range(1000, 2000);
- }
- if (i == QED_HW_STOP_RETRY_LIMIT)
- DP_NOTICE(p_hwfn,
- "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
-
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
@@ -949,18 +960,8 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
}
/* Setup bar access */
-static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
{
- int rc;
-
- /* Allocate PTT pool */
- rc = qed_ptt_pool_alloc(p_hwfn);
- if (rc)
- return rc;
-
- /* Allocate the main PTT */
- p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
-
/* clear indirect access */
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
@@ -975,8 +976,6 @@ static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
-
- return 0;
}
static void get_function_id(struct qed_hwfn *p_hwfn)
@@ -1011,13 +1010,17 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
u32 *resc_start = p_hwfn->hw_info.resc_start;
u32 *resc_num = p_hwfn->hw_info.resc_num;
+ struct qed_sb_cnt_info sb_cnt_info;
int num_funcs, i;
num_funcs = MAX_NUM_PFS_BB;
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+
resc_num[QED_SB] = min_t(u32,
(MAX_SB_PER_PATH_BB / num_funcs),
- qed_int_get_num_sbs(p_hwfn, NULL));
+ sb_cnt_info.sb_cnt);
resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
@@ -1080,13 +1083,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
- /* Read Vendor Id / Device Id */
- addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- offsetof(struct nvm_cfg1, glob) +
- offsetof(struct nvm_cfg1_glob, pci_id);
- p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
- NVM_CFG1_GLOB_VENDOR_ID_MASK;
-
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, core_cfg);
@@ -1280,7 +1276,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
return rc;
}
-static void qed_get_dev_info(struct qed_dev *cdev)
+static int qed_get_dev_info(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
u32 tmp;
@@ -1319,6 +1315,14 @@ static void qed_get_dev_info(struct qed_dev *cdev)
"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
+
+ if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
+ DP_NOTICE(cdev->hwfns,
+ "The chip type/rev (BB A0) is not supported!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
@@ -1341,15 +1345,24 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
get_function_id(p_hwfn);
- rc = qed_hw_hwfn_prepare(p_hwfn);
+ /* Allocate PTT pool */
+ rc = qed_ptt_pool_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
goto err0;
}
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
/* First hwfn learns basic information, e.g., number of hwfns */
- if (!p_hwfn->my_id)
- qed_get_dev_info(p_hwfn->cdev);
+ if (!p_hwfn->my_id) {
+ rc = qed_get_dev_info(p_hwfn->cdev);
+ if (rc != 0)
+ goto err1;
+ }
+
+ qed_hw_hwfn_prepare(p_hwfn);
/* Initialize MCP structure */
rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
@@ -1381,17 +1394,6 @@ err0:
return rc;
}
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
- u8 bar_id)
-{
- u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
- : PGLUE_B_REG_PF_BAR1_SIZE);
- u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
-
- /* Get the BAR size(in KB) from hardware given val */
- return 1 << (val + 15);
-}
-
int qed_hw_prepare(struct qed_dev *cdev,
int personality)
{
@@ -1416,11 +1418,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
u8 __iomem *addr;
/* adjust bar offset for second engine */
- addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
+ addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
p_regview = addr;
/* adjust doorbell bar offset for second engine */
- addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
+ addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
p_doorbell = addr;
/* prepare second hw function */
@@ -1532,223 +1534,6 @@ void qed_chain_free(struct qed_dev *cdev,
p_chain->p_phys_addr);
}
-static void __qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
-{
- int i, j;
-
- memset(stats, 0, sizeof(*stats));
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct eth_mstorm_per_queue_stat mstats;
- struct eth_ustorm_per_queue_stat ustats;
- struct eth_pstorm_per_queue_stat pstats;
- struct tstorm_per_port_stat tstats;
- struct port_stats port_stats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
-
- if (!p_ptt) {
- DP_ERR(p_hwfn, "Failed to acquire ptt\n");
- continue;
- }
-
- memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_from(p_hwfn, p_ptt, &mstats,
- p_hwfn->storm_stats.mstats.address,
- p_hwfn->storm_stats.mstats.len);
-
- memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_from(p_hwfn, p_ptt, &ustats,
- p_hwfn->storm_stats.ustats.address,
- p_hwfn->storm_stats.ustats.len);
-
- memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_from(p_hwfn, p_ptt, &pstats,
- p_hwfn->storm_stats.pstats.address,
- p_hwfn->storm_stats.pstats.len);
-
- memset(&tstats, 0, sizeof(tstats));
- qed_memcpy_from(p_hwfn, p_ptt, &tstats,
- p_hwfn->storm_stats.tstats.address,
- p_hwfn->storm_stats.tstats.len);
-
- memset(&port_stats, 0, sizeof(port_stats));
-
- if (p_hwfn->mcp_info)
- qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
- p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port, stats),
- sizeof(port_stats));
- qed_ptt_release(p_hwfn, p_ptt);
-
- stats->no_buff_discards +=
- HILO_64_REGPAIR(mstats.no_buff_discard);
- stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- stats->ttl0_discard +=
- HILO_64_REGPAIR(mstats.ttl0_discard);
- stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- stats->tpa_aborts_num +=
- HILO_64_REGPAIR(mstats.tpa_aborts_num);
- stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
-
- stats->rx_ucast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- stats->rx_mcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- stats->rx_bcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- stats->rx_ucast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- stats->rx_mcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- stats->rx_bcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
-
- stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
-
- stats->tx_ucast_bytes +=
- HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- stats->tx_mcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- stats->tx_bcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- stats->tx_ucast_pkts +=
- HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- stats->tx_mcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- stats->tx_bcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- stats->tx_err_drop_pkts +=
- HILO_64_REGPAIR(pstats.error_drop_pkts);
- stats->rx_64_byte_packets += port_stats.pmm.r64;
- stats->rx_127_byte_packets += port_stats.pmm.r127;
- stats->rx_255_byte_packets += port_stats.pmm.r255;
- stats->rx_511_byte_packets += port_stats.pmm.r511;
- stats->rx_1023_byte_packets += port_stats.pmm.r1023;
- stats->rx_1518_byte_packets += port_stats.pmm.r1518;
- stats->rx_1522_byte_packets += port_stats.pmm.r1522;
- stats->rx_2047_byte_packets += port_stats.pmm.r2047;
- stats->rx_4095_byte_packets += port_stats.pmm.r4095;
- stats->rx_9216_byte_packets += port_stats.pmm.r9216;
- stats->rx_16383_byte_packets += port_stats.pmm.r16383;
- stats->rx_crc_errors += port_stats.pmm.rfcs;
- stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
- stats->rx_pause_frames += port_stats.pmm.rxpf;
- stats->rx_pfc_frames += port_stats.pmm.rxpp;
- stats->rx_align_errors += port_stats.pmm.raln;
- stats->rx_carrier_errors += port_stats.pmm.rfcr;
- stats->rx_oversize_packets += port_stats.pmm.rovr;
- stats->rx_jabbers += port_stats.pmm.rjbr;
- stats->rx_undersize_packets += port_stats.pmm.rund;
- stats->rx_fragments += port_stats.pmm.rfrg;
- stats->tx_64_byte_packets += port_stats.pmm.t64;
- stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
- stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
- stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
- stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
- stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
- stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
- stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
- stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
- stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
- stats->tx_pause_frames += port_stats.pmm.txpf;
- stats->tx_pfc_frames += port_stats.pmm.txpp;
- stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
- stats->tx_total_collisions += port_stats.pmm.tncl;
- stats->rx_mac_bytes += port_stats.pmm.rbyte;
- stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
- stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
- stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
- stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
- stats->tx_mac_bytes += port_stats.pmm.tbyte;
- stats->tx_mac_uc_packets += port_stats.pmm.txuca;
- stats->tx_mac_mc_packets += port_stats.pmm.txmca;
- stats->tx_mac_bc_packets += port_stats.pmm.txbca;
- stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
-
- for (j = 0; j < 8; j++) {
- stats->brb_truncates += port_stats.brb.brb_truncate[j];
- stats->brb_discards += port_stats.brb.brb_discard[j];
- }
- }
-}
-
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
-{
- u32 i;
-
- if (!cdev) {
- memset(stats, 0, sizeof(*stats));
- return;
- }
-
- __qed_get_vport_stats(cdev, stats);
-
- if (!cdev->reset_stats)
- return;
-
- /* Reduce the statistics baseline */
- for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
- ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
-}
-
-/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
-void qed_reset_vport_stats(struct qed_dev *cdev)
-{
- int i;
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct eth_mstorm_per_queue_stat mstats;
- struct eth_ustorm_per_queue_stat ustats;
- struct eth_pstorm_per_queue_stat pstats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
-
- if (!p_ptt) {
- DP_ERR(p_hwfn, "Failed to acquire ptt\n");
- continue;
- }
-
- memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_to(p_hwfn, p_ptt,
- p_hwfn->storm_stats.mstats.address,
- &mstats,
- p_hwfn->storm_stats.mstats.len);
-
- memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_to(p_hwfn, p_ptt,
- p_hwfn->storm_stats.ustats.address,
- &ustats,
- p_hwfn->storm_stats.ustats.len);
-
- memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_to(p_hwfn, p_ptt,
- p_hwfn->storm_stats.pstats.address,
- &pstats,
- p_hwfn->storm_stats.pstats.len);
-
- qed_ptt_release(p_hwfn, p_ptt);
- }
-
- /* PORT statistics are not necessarily reset, so we need to
- * read and create a baseline for future statistics.
- */
- if (!cdev->reset_stats)
- DP_INFO(cdev, "Reset stats not allocated\n");
- else
- __qed_get_vport_stats(cdev, cdev->reset_stats);
-}
-
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id, u16 *dst_id)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index e29a3ba6c8b0..d6c7ddf4f4d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -78,6 +78,15 @@ int qed_hw_init(struct qed_dev *cdev,
const u8 *bin_fw_data);
/**
+ * @brief qed_hw_timers_stop_all - stop the timers HW block
+ *
+ * @param cdev
+ *
+ * @return void
+ */
+void qed_hw_timers_stop_all(struct qed_dev *cdev);
+
+/**
* @brief qed_hw_stop -
*
* @param cdev
@@ -156,8 +165,6 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
*/
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
enum qed_dmae_address_type_t {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 49bbf696a16d..236db8a99ec3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -968,7 +968,7 @@ struct igu_msix_vector {
enum init_modes {
MODE_BB_A0,
- MODE_RESERVED,
+ MODE_BB_B0,
MODE_RESERVED2,
MODE_ASIC,
MODE_RESERVED3,
@@ -2919,7 +2919,19 @@ struct eth_vport_rx_mode {
};
struct eth_vport_tpa_param {
- u64 reserved[2];
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+ u8 max_buff_num;
+ u8 reserved;
};
struct eth_vport_tx_mode {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index fa73daa94655..ffd0accc2ec9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -39,24 +39,1737 @@ struct qed_sb_sp_info {
struct qed_pi_info pi_info_arr[PIS_PER_SB];
};
+enum qed_attention_type {
+ QED_ATTN_TYPE_ATTN,
+ QED_ATTN_TYPE_PARITY,
+};
+
#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
-#define ATTN_STATE_BITS (0xfff)
+struct aeu_invert_reg_bit {
+ char bit_name[30];
+
+#define ATTENTION_PARITY (1 << 0)
+
+#define ATTENTION_LENGTH_MASK (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT (4)
+#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
+ ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
+ ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT (12)
+ unsigned int flags;
+
+ /* Callback to call if attention will be triggered */
+ int (*cb)(struct qed_hwfn *p_hwfn);
+
+ enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+ struct aeu_invert_reg_bit bits[32];
+};
+
+#define MAX_ATTN_GRPS (8)
+#define NUM_ATTN_REGS (9)
+
+/* HW Attention register */
+struct attn_hw_reg {
+ u16 reg_idx; /* Index of this register in its block */
+ u16 num_of_bits; /* number of valid attention bits */
+ u32 sts_addr; /* Address of the STS register */
+ u32 sts_clr_addr; /* Address of the STS_CLR register */
+ u32 sts_wr_addr; /* Address of the STS_WR register */
+ u32 mask_addr; /* Address of the MASK register */
+};
+
+/* HW block attention registers */
+struct attn_hw_regs {
+ u16 num_of_int_regs; /* Number of interrupt regs */
+ u16 num_of_prty_regs; /* Number of parity regs */
+ struct attn_hw_reg **int_regs; /* interrupt regs */
+ struct attn_hw_reg **prty_regs; /* parity regs */
+};
+
+/* HW block attention registers */
+struct attn_hw_block {
+ const char *name; /* Block name */
+ struct attn_hw_regs chip_regs[1];
+};
+
+static struct attn_hw_reg grc_int0_bb_b0 = {
+ 0, 4, 0x50180, 0x5018c, 0x50188, 0x50184};
+
+static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
+ &grc_int0_bb_b0};
+
+static struct attn_hw_reg grc_prty1_bb_b0 = {
+ 0, 2, 0x50200, 0x5020c, 0x50208, 0x50204};
+
+static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
+ &grc_prty1_bb_b0};
+
+static struct attn_hw_reg miscs_int0_bb_b0 = {
+ 0, 3, 0x9180, 0x918c, 0x9188, 0x9184};
+
+static struct attn_hw_reg miscs_int1_bb_b0 = {
+ 1, 11, 0x9190, 0x919c, 0x9198, 0x9194};
+
+static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
+ &miscs_int0_bb_b0, &miscs_int1_bb_b0};
+
+static struct attn_hw_reg miscs_prty0_bb_b0 = {
+ 0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4};
+
+static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
+ &miscs_prty0_bb_b0};
+
+static struct attn_hw_reg misc_int0_bb_b0 = {
+ 0, 1, 0x8180, 0x818c, 0x8188, 0x8184};
+
+static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
+ &misc_int0_bb_b0};
+
+static struct attn_hw_reg pglue_b_int0_bb_b0 = {
+ 0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184};
+
+static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
+ &pglue_b_int0_bb_b0};
+
+static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
+ 0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194};
+
+static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
+ 1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204};
+
+static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
+ &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0};
+
+static struct attn_hw_reg cnig_int0_bb_b0 = {
+ 0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec};
+
+static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
+ &cnig_int0_bb_b0};
+
+static struct attn_hw_reg cnig_prty0_bb_b0 = {
+ 0, 2, 0x218348, 0x218354, 0x218350, 0x21834c};
+
+static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
+ &cnig_prty0_bb_b0};
+
+static struct attn_hw_reg cpmu_int0_bb_b0 = {
+ 0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4};
+
+static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
+ &cpmu_int0_bb_b0};
+
+static struct attn_hw_reg ncsi_int0_bb_b0 = {
+ 0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0};
+
+static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
+ &ncsi_int0_bb_b0};
+
+static struct attn_hw_reg ncsi_prty1_bb_b0 = {
+ 0, 1, 0x40000, 0x4000c, 0x40008, 0x40004};
+
+static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
+ &ncsi_prty1_bb_b0};
+
+static struct attn_hw_reg opte_prty1_bb_b0 = {
+ 0, 11, 0x53000, 0x5300c, 0x53008, 0x53004};
+
+static struct attn_hw_reg opte_prty0_bb_b0 = {
+ 1, 1, 0x53208, 0x53214, 0x53210, 0x5320c};
+
+static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
+ &opte_prty1_bb_b0, &opte_prty0_bb_b0};
+
+static struct attn_hw_reg bmb_int0_bb_b0 = {
+ 0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4};
+
+static struct attn_hw_reg bmb_int1_bb_b0 = {
+ 1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc};
+
+static struct attn_hw_reg bmb_int2_bb_b0 = {
+ 2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4};
+
+static struct attn_hw_reg bmb_int3_bb_b0 = {
+ 3, 31, 0x540108, 0x540114, 0x540110, 0x54010c};
+
+static struct attn_hw_reg bmb_int4_bb_b0 = {
+ 4, 27, 0x540120, 0x54012c, 0x540128, 0x540124};
+
+static struct attn_hw_reg bmb_int5_bb_b0 = {
+ 5, 29, 0x540138, 0x540144, 0x540140, 0x54013c};
+
+static struct attn_hw_reg bmb_int6_bb_b0 = {
+ 6, 30, 0x540150, 0x54015c, 0x540158, 0x540154};
+
+static struct attn_hw_reg bmb_int7_bb_b0 = {
+ 7, 32, 0x540168, 0x540174, 0x540170, 0x54016c};
+
+static struct attn_hw_reg bmb_int8_bb_b0 = {
+ 8, 32, 0x540184, 0x540190, 0x54018c, 0x540188};
+
+static struct attn_hw_reg bmb_int9_bb_b0 = {
+ 9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0};
+
+static struct attn_hw_reg bmb_int10_bb_b0 = {
+ 10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8};
+
+static struct attn_hw_reg bmb_int11_bb_b0 = {
+ 11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0};
+
+static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
+ &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
+ &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
+ &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0};
+
+static struct attn_hw_reg bmb_prty0_bb_b0 = {
+ 0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0};
+
+static struct attn_hw_reg bmb_prty1_bb_b0 = {
+ 1, 31, 0x540400, 0x54040c, 0x540408, 0x540404};
+
+static struct attn_hw_reg bmb_prty2_bb_b0 = {
+ 2, 15, 0x540410, 0x54041c, 0x540418, 0x540414};
+
+static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
+ &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0};
+
+static struct attn_hw_reg pcie_prty1_bb_b0 = {
+ 0, 17, 0x54000, 0x5400c, 0x54008, 0x54004};
+
+static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
+ &pcie_prty1_bb_b0};
+
+static struct attn_hw_reg mcp2_prty0_bb_b0 = {
+ 0, 1, 0x52040, 0x5204c, 0x52048, 0x52044};
+
+static struct attn_hw_reg mcp2_prty1_bb_b0 = {
+ 1, 12, 0x52204, 0x52210, 0x5220c, 0x52208};
+
+static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
+ &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0};
+
+static struct attn_hw_reg pswhst_int0_bb_b0 = {
+ 0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184};
+
+static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
+ &pswhst_int0_bb_b0};
+
+static struct attn_hw_reg pswhst_prty0_bb_b0 = {
+ 0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194};
+
+static struct attn_hw_reg pswhst_prty1_bb_b0 = {
+ 1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204};
+
+static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
+ &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0};
+
+static struct attn_hw_reg pswhst2_int0_bb_b0 = {
+ 0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184};
+
+static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
+ &pswhst2_int0_bb_b0};
+
+static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
+ 0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194};
+
+static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
+ &pswhst2_prty0_bb_b0};
+
+static struct attn_hw_reg pswrd_int0_bb_b0 = {
+ 0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184};
+
+static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
+ &pswrd_int0_bb_b0};
+
+static struct attn_hw_reg pswrd_prty0_bb_b0 = {
+ 0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194};
+
+static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
+ &pswrd_prty0_bb_b0};
+
+static struct attn_hw_reg pswrd2_int0_bb_b0 = {
+ 0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184};
+
+static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
+ &pswrd2_int0_bb_b0};
+
+static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
+ 0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194};
+
+static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
+ 1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204};
+
+static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
+ 2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214};
+
+static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
+ &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0};
+
+static struct attn_hw_reg pswwr_int0_bb_b0 = {
+ 0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184};
+
+static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
+ &pswwr_int0_bb_b0};
+
+static struct attn_hw_reg pswwr_prty0_bb_b0 = {
+ 0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194};
+
+static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
+ &pswwr_prty0_bb_b0};
+
+static struct attn_hw_reg pswwr2_int0_bb_b0 = {
+ 0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184};
+
+static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
+ &pswwr2_int0_bb_b0};
+
+static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
+ 0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194};
+
+static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
+ 1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204};
+
+static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
+ 2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214};
+
+static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
+ 3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224};
+
+static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
+ 4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234};
+
+static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
+ &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
+ &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0};
+
+static struct attn_hw_reg pswrq_int0_bb_b0 = {
+ 0, 21, 0x280180, 0x28018c, 0x280188, 0x280184};
+
+static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
+ &pswrq_int0_bb_b0};
+
+static struct attn_hw_reg pswrq_prty0_bb_b0 = {
+ 0, 1, 0x280190, 0x28019c, 0x280198, 0x280194};
+
+static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
+ &pswrq_prty0_bb_b0};
+
+static struct attn_hw_reg pswrq2_int0_bb_b0 = {
+ 0, 15, 0x240180, 0x24018c, 0x240188, 0x240184};
+
+static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
+ &pswrq2_int0_bb_b0};
+
+static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
+ 0, 9, 0x240200, 0x24020c, 0x240208, 0x240204};
+
+static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
+ &pswrq2_prty1_bb_b0};
+
+static struct attn_hw_reg pglcs_int0_bb_b0 = {
+ 0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04};
+
+static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
+ &pglcs_int0_bb_b0};
+
+static struct attn_hw_reg dmae_int0_bb_b0 = {
+ 0, 2, 0xc180, 0xc18c, 0xc188, 0xc184};
+
+static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
+ &dmae_int0_bb_b0};
+
+static struct attn_hw_reg dmae_prty1_bb_b0 = {
+ 0, 3, 0xc200, 0xc20c, 0xc208, 0xc204};
+
+static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
+ &dmae_prty1_bb_b0};
+
+static struct attn_hw_reg ptu_int0_bb_b0 = {
+ 0, 8, 0x560180, 0x56018c, 0x560188, 0x560184};
+
+static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
+ &ptu_int0_bb_b0};
+
+static struct attn_hw_reg ptu_prty1_bb_b0 = {
+ 0, 18, 0x560200, 0x56020c, 0x560208, 0x560204};
+
+static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
+ &ptu_prty1_bb_b0};
+
+static struct attn_hw_reg tcm_int0_bb_b0 = {
+ 0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184};
+
+static struct attn_hw_reg tcm_int1_bb_b0 = {
+ 1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194};
+
+static struct attn_hw_reg tcm_int2_bb_b0 = {
+ 2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4};
+
+static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
+ &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0};
+
+static struct attn_hw_reg tcm_prty1_bb_b0 = {
+ 0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204};
+
+static struct attn_hw_reg tcm_prty2_bb_b0 = {
+ 1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214};
+
+static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
+ &tcm_prty1_bb_b0, &tcm_prty2_bb_b0};
+
+static struct attn_hw_reg mcm_int0_bb_b0 = {
+ 0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184};
+
+static struct attn_hw_reg mcm_int1_bb_b0 = {
+ 1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194};
+
+static struct attn_hw_reg mcm_int2_bb_b0 = {
+ 2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4};
+
+static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
+ &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0};
+
+static struct attn_hw_reg mcm_prty1_bb_b0 = {
+ 0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204};
+
+static struct attn_hw_reg mcm_prty2_bb_b0 = {
+ 1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214};
+
+static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
+ &mcm_prty1_bb_b0, &mcm_prty2_bb_b0};
+
+static struct attn_hw_reg ucm_int0_bb_b0 = {
+ 0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184};
+
+static struct attn_hw_reg ucm_int1_bb_b0 = {
+ 1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194};
+
+static struct attn_hw_reg ucm_int2_bb_b0 = {
+ 2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4};
+
+static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
+ &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0};
+
+static struct attn_hw_reg ucm_prty1_bb_b0 = {
+ 0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204};
+
+static struct attn_hw_reg ucm_prty2_bb_b0 = {
+ 1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214};
+
+static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
+ &ucm_prty1_bb_b0, &ucm_prty2_bb_b0};
+
+static struct attn_hw_reg xcm_int0_bb_b0 = {
+ 0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184};
+
+static struct attn_hw_reg xcm_int1_bb_b0 = {
+ 1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194};
+
+static struct attn_hw_reg xcm_int2_bb_b0 = {
+ 2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4};
+
+static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
+ &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0};
+
+static struct attn_hw_reg xcm_prty1_bb_b0 = {
+ 0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204};
+
+static struct attn_hw_reg xcm_prty2_bb_b0 = {
+ 1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214};
+
+static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
+ &xcm_prty1_bb_b0, &xcm_prty2_bb_b0};
+
+static struct attn_hw_reg ycm_int0_bb_b0 = {
+ 0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184};
+
+static struct attn_hw_reg ycm_int1_bb_b0 = {
+ 1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194};
+
+static struct attn_hw_reg ycm_int2_bb_b0 = {
+ 2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4};
+
+static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
+ &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0};
+
+static struct attn_hw_reg ycm_prty1_bb_b0 = {
+ 0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204};
+
+static struct attn_hw_reg ycm_prty2_bb_b0 = {
+ 1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214};
+
+static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
+ &ycm_prty1_bb_b0, &ycm_prty2_bb_b0};
+
+static struct attn_hw_reg pcm_int0_bb_b0 = {
+ 0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184};
+
+static struct attn_hw_reg pcm_int1_bb_b0 = {
+ 1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194};
+
+static struct attn_hw_reg pcm_int2_bb_b0 = {
+ 2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4};
+
+static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
+ &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0};
+
+static struct attn_hw_reg pcm_prty1_bb_b0 = {
+ 0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204};
+
+static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
+ &pcm_prty1_bb_b0};
+
+static struct attn_hw_reg qm_int0_bb_b0 = {
+ 0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184};
+
+static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
+ &qm_int0_bb_b0};
+
+static struct attn_hw_reg qm_prty0_bb_b0 = {
+ 0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194};
+
+static struct attn_hw_reg qm_prty1_bb_b0 = {
+ 1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204};
+
+static struct attn_hw_reg qm_prty2_bb_b0 = {
+ 2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214};
+
+static struct attn_hw_reg qm_prty3_bb_b0 = {
+ 3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224};
+
+static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
+ &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0};
+
+static struct attn_hw_reg tm_int0_bb_b0 = {
+ 0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184};
+
+static struct attn_hw_reg tm_int1_bb_b0 = {
+ 1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194};
+
+static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
+ &tm_int0_bb_b0, &tm_int1_bb_b0};
+
+static struct attn_hw_reg tm_prty1_bb_b0 = {
+ 0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204};
+
+static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
+ &tm_prty1_bb_b0};
+
+static struct attn_hw_reg dorq_int0_bb_b0 = {
+ 0, 9, 0x100180, 0x10018c, 0x100188, 0x100184};
+
+static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
+ &dorq_int0_bb_b0};
+
+static struct attn_hw_reg dorq_prty0_bb_b0 = {
+ 0, 1, 0x100190, 0x10019c, 0x100198, 0x100194};
+
+static struct attn_hw_reg dorq_prty1_bb_b0 = {
+ 1, 6, 0x100200, 0x10020c, 0x100208, 0x100204};
+
+static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
+ &dorq_prty0_bb_b0, &dorq_prty1_bb_b0};
+
+static struct attn_hw_reg brb_int0_bb_b0 = {
+ 0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4};
+
+static struct attn_hw_reg brb_int1_bb_b0 = {
+ 1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc};
+
+static struct attn_hw_reg brb_int2_bb_b0 = {
+ 2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4};
+
+static struct attn_hw_reg brb_int3_bb_b0 = {
+ 3, 31, 0x340108, 0x340114, 0x340110, 0x34010c};
+
+static struct attn_hw_reg brb_int4_bb_b0 = {
+ 4, 27, 0x340120, 0x34012c, 0x340128, 0x340124};
+
+static struct attn_hw_reg brb_int5_bb_b0 = {
+ 5, 1, 0x340138, 0x340144, 0x340140, 0x34013c};
+
+static struct attn_hw_reg brb_int6_bb_b0 = {
+ 6, 8, 0x340150, 0x34015c, 0x340158, 0x340154};
+
+static struct attn_hw_reg brb_int7_bb_b0 = {
+ 7, 32, 0x340168, 0x340174, 0x340170, 0x34016c};
+
+static struct attn_hw_reg brb_int8_bb_b0 = {
+ 8, 17, 0x340184, 0x340190, 0x34018c, 0x340188};
+
+static struct attn_hw_reg brb_int9_bb_b0 = {
+ 9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0};
+
+static struct attn_hw_reg brb_int10_bb_b0 = {
+ 10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8};
+
+static struct attn_hw_reg brb_int11_bb_b0 = {
+ 11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0};
+
+static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
+ &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
+ &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
+ &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0};
+
+static struct attn_hw_reg brb_prty0_bb_b0 = {
+ 0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0};
+
+static struct attn_hw_reg brb_prty1_bb_b0 = {
+ 1, 31, 0x340400, 0x34040c, 0x340408, 0x340404};
+
+static struct attn_hw_reg brb_prty2_bb_b0 = {
+ 2, 14, 0x340410, 0x34041c, 0x340418, 0x340414};
+
+static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
+ &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0};
+
+static struct attn_hw_reg src_int0_bb_b0 = {
+ 0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4};
+
+static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
+ &src_int0_bb_b0};
+
+static struct attn_hw_reg prs_int0_bb_b0 = {
+ 0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044};
+
+static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
+ &prs_int0_bb_b0};
+
+static struct attn_hw_reg prs_prty0_bb_b0 = {
+ 0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054};
+
+static struct attn_hw_reg prs_prty1_bb_b0 = {
+ 1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208};
+
+static struct attn_hw_reg prs_prty2_bb_b0 = {
+ 2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218};
+
+static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
+ &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0};
+
+static struct attn_hw_reg tsdm_int0_bb_b0 = {
+ 0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044};
+
+static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
+ &tsdm_int0_bb_b0};
+
+static struct attn_hw_reg tsdm_prty1_bb_b0 = {
+ 0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204};
+
+static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
+ &tsdm_prty1_bb_b0};
+
+static struct attn_hw_reg msdm_int0_bb_b0 = {
+ 0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044};
+
+static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
+ &msdm_int0_bb_b0};
+
+static struct attn_hw_reg msdm_prty1_bb_b0 = {
+ 0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204};
+
+static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
+ &msdm_prty1_bb_b0};
+
+static struct attn_hw_reg usdm_int0_bb_b0 = {
+ 0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044};
+
+static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
+ &usdm_int0_bb_b0};
+
+static struct attn_hw_reg usdm_prty1_bb_b0 = {
+ 0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204};
+
+static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
+ &usdm_prty1_bb_b0};
+
+static struct attn_hw_reg xsdm_int0_bb_b0 = {
+ 0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044};
+
+static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
+ &xsdm_int0_bb_b0};
+
+static struct attn_hw_reg xsdm_prty1_bb_b0 = {
+ 0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204};
+
+static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
+ &xsdm_prty1_bb_b0};
+
+static struct attn_hw_reg ysdm_int0_bb_b0 = {
+ 0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044};
+
+static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
+ &ysdm_int0_bb_b0};
+
+static struct attn_hw_reg ysdm_prty1_bb_b0 = {
+ 0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204};
+
+static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
+ &ysdm_prty1_bb_b0};
+
+static struct attn_hw_reg psdm_int0_bb_b0 = {
+ 0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044};
+
+static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
+ &psdm_int0_bb_b0};
+
+static struct attn_hw_reg psdm_prty1_bb_b0 = {
+ 0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204};
+
+static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
+ &psdm_prty1_bb_b0};
+
+static struct attn_hw_reg tsem_int0_bb_b0 = {
+ 0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044};
+
+static struct attn_hw_reg tsem_int1_bb_b0 = {
+ 1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044};
+
+static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
+ &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg tsem_prty0_bb_b0 = {
+ 0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc};
+
+static struct attn_hw_reg tsem_prty1_bb_b0 = {
+ 1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204};
+
+static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
+ &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
+ &tsem_fast_memory_vfc_config_prty1_bb_b0};
+
+static struct attn_hw_reg msem_int0_bb_b0 = {
+ 0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044};
+
+static struct attn_hw_reg msem_int1_bb_b0 = {
+ 1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044};
+
+static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
+ &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg msem_prty0_bb_b0 = {
+ 0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc};
+
+static struct attn_hw_reg msem_prty1_bb_b0 = {
+ 1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204};
+
+static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
+ &msem_prty0_bb_b0, &msem_prty1_bb_b0};
+
+static struct attn_hw_reg usem_int0_bb_b0 = {
+ 0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044};
+
+static struct attn_hw_reg usem_int1_bb_b0 = {
+ 1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044};
+
+static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
+ &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg usem_prty0_bb_b0 = {
+ 0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc};
+
+static struct attn_hw_reg usem_prty1_bb_b0 = {
+ 1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204};
+
+static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
+ &usem_prty0_bb_b0, &usem_prty1_bb_b0};
+
+static struct attn_hw_reg xsem_int0_bb_b0 = {
+ 0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044};
+
+static struct attn_hw_reg xsem_int1_bb_b0 = {
+ 1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044};
+
+static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
+ &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg xsem_prty0_bb_b0 = {
+ 0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc};
+
+static struct attn_hw_reg xsem_prty1_bb_b0 = {
+ 1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204};
+
+static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
+ &xsem_prty0_bb_b0, &xsem_prty1_bb_b0};
+
+static struct attn_hw_reg ysem_int0_bb_b0 = {
+ 0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044};
+
+static struct attn_hw_reg ysem_int1_bb_b0 = {
+ 1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044};
+
+static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
+ &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg ysem_prty0_bb_b0 = {
+ 0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc};
+
+static struct attn_hw_reg ysem_prty1_bb_b0 = {
+ 1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204};
+
+static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
+ &ysem_prty0_bb_b0, &ysem_prty1_bb_b0};
+
+static struct attn_hw_reg psem_int0_bb_b0 = {
+ 0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044};
+
+static struct attn_hw_reg psem_int1_bb_b0 = {
+ 1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044};
+
+static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
+ &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg psem_prty0_bb_b0 = {
+ 0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc};
+
+static struct attn_hw_reg psem_prty1_bb_b0 = {
+ 1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204};
+
+static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
+ &psem_prty0_bb_b0, &psem_prty1_bb_b0,
+ &psem_fast_memory_vfc_config_prty1_bb_b0};
+
+static struct attn_hw_reg rss_int0_bb_b0 = {
+ 0, 12, 0x238980, 0x23898c, 0x238988, 0x238984};
+
+static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
+ &rss_int0_bb_b0};
+
+static struct attn_hw_reg rss_prty1_bb_b0 = {
+ 0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04};
+
+static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
+ &rss_prty1_bb_b0};
+
+static struct attn_hw_reg tmld_int0_bb_b0 = {
+ 0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184};
+
+static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
+ &tmld_int0_bb_b0};
+
+static struct attn_hw_reg tmld_prty1_bb_b0 = {
+ 0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204};
+
+static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
+ &tmld_prty1_bb_b0};
+
+static struct attn_hw_reg muld_int0_bb_b0 = {
+ 0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184};
+
+static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
+ &muld_int0_bb_b0};
+
+static struct attn_hw_reg muld_prty1_bb_b0 = {
+ 0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204};
+
+static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
+ &muld_prty1_bb_b0};
+
+static struct attn_hw_reg yuld_int0_bb_b0 = {
+ 0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184};
+
+static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
+ &yuld_int0_bb_b0};
+
+static struct attn_hw_reg yuld_prty1_bb_b0 = {
+ 0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204};
+
+static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
+ &yuld_prty1_bb_b0};
+
+static struct attn_hw_reg xyld_int0_bb_b0 = {
+ 0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184};
+
+static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
+ &xyld_int0_bb_b0};
+
+static struct attn_hw_reg xyld_prty1_bb_b0 = {
+ 0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204};
+
+static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
+ &xyld_prty1_bb_b0};
+
+static struct attn_hw_reg prm_int0_bb_b0 = {
+ 0, 11, 0x230040, 0x23004c, 0x230048, 0x230044};
+
+static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
+ &prm_int0_bb_b0};
+
+static struct attn_hw_reg prm_prty0_bb_b0 = {
+ 0, 1, 0x230050, 0x23005c, 0x230058, 0x230054};
+
+static struct attn_hw_reg prm_prty1_bb_b0 = {
+ 1, 24, 0x230200, 0x23020c, 0x230208, 0x230204};
+
+static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
+ &prm_prty0_bb_b0, &prm_prty1_bb_b0};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
+ 0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
+ &pbf_pb1_int0_bb_b0};
+
+static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
+ 0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054};
+
+static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
+ &pbf_pb1_prty0_bb_b0};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
+ 0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
+ &pbf_pb2_int0_bb_b0};
+
+static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
+ 0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054};
+
+static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
+ &pbf_pb2_prty0_bb_b0};
+
+static struct attn_hw_reg rpb_int0_bb_b0 = {
+ 0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044};
+
+static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
+ &rpb_int0_bb_b0};
+
+static struct attn_hw_reg rpb_prty0_bb_b0 = {
+ 0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054};
+
+static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
+ &rpb_prty0_bb_b0};
+
+static struct attn_hw_reg btb_int0_bb_b0 = {
+ 0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4};
+
+static struct attn_hw_reg btb_int1_bb_b0 = {
+ 1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc};
+
+static struct attn_hw_reg btb_int2_bb_b0 = {
+ 2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4};
+
+static struct attn_hw_reg btb_int3_bb_b0 = {
+ 3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c};
+
+static struct attn_hw_reg btb_int4_bb_b0 = {
+ 4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124};
+
+static struct attn_hw_reg btb_int5_bb_b0 = {
+ 5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c};
+
+static struct attn_hw_reg btb_int6_bb_b0 = {
+ 6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154};
+
+static struct attn_hw_reg btb_int8_bb_b0 = {
+ 7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188};
+
+static struct attn_hw_reg btb_int9_bb_b0 = {
+ 8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0};
+
+static struct attn_hw_reg btb_int10_bb_b0 = {
+ 9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8};
+
+static struct attn_hw_reg btb_int11_bb_b0 = {
+ 10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0};
+
+static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
+ &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
+ &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
+ &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0};
+
+static struct attn_hw_reg btb_prty0_bb_b0 = {
+ 0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0};
+
+static struct attn_hw_reg btb_prty1_bb_b0 = {
+ 1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404};
+
+static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
+ &btb_prty0_bb_b0, &btb_prty1_bb_b0};
+
+static struct attn_hw_reg pbf_int0_bb_b0 = {
+ 0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184};
+
+static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
+ &pbf_int0_bb_b0};
+
+static struct attn_hw_reg pbf_prty0_bb_b0 = {
+ 0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194};
+
+static struct attn_hw_reg pbf_prty1_bb_b0 = {
+ 1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204};
+
+static struct attn_hw_reg pbf_prty2_bb_b0 = {
+ 2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214};
+
+static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
+ &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0};
+
+static struct attn_hw_reg rdif_int0_bb_b0 = {
+ 0, 8, 0x300180, 0x30018c, 0x300188, 0x300184};
+
+static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
+ &rdif_int0_bb_b0};
+
+static struct attn_hw_reg rdif_prty0_bb_b0 = {
+ 0, 1, 0x300190, 0x30019c, 0x300198, 0x300194};
+
+static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
+ &rdif_prty0_bb_b0};
+
+static struct attn_hw_reg tdif_int0_bb_b0 = {
+ 0, 8, 0x310180, 0x31018c, 0x310188, 0x310184};
+
+static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
+ &tdif_int0_bb_b0};
+
+static struct attn_hw_reg tdif_prty0_bb_b0 = {
+ 0, 1, 0x310190, 0x31019c, 0x310198, 0x310194};
+
+static struct attn_hw_reg tdif_prty1_bb_b0 = {
+ 1, 11, 0x310200, 0x31020c, 0x310208, 0x310204};
+
+static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
+ &tdif_prty0_bb_b0, &tdif_prty1_bb_b0};
+
+static struct attn_hw_reg cdu_int0_bb_b0 = {
+ 0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc};
+
+static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
+ &cdu_int0_bb_b0};
+
+static struct attn_hw_reg cdu_prty1_bb_b0 = {
+ 0, 5, 0x580200, 0x58020c, 0x580208, 0x580204};
+
+static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
+ &cdu_prty1_bb_b0};
+
+static struct attn_hw_reg ccfc_int0_bb_b0 = {
+ 0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184};
+
+static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
+ &ccfc_int0_bb_b0};
+
+static struct attn_hw_reg ccfc_prty1_bb_b0 = {
+ 0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204};
+
+static struct attn_hw_reg ccfc_prty0_bb_b0 = {
+ 1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8};
+
+static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
+ &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0};
+
+static struct attn_hw_reg tcfc_int0_bb_b0 = {
+ 0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184};
+
+static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
+ &tcfc_int0_bb_b0};
+
+static struct attn_hw_reg tcfc_prty1_bb_b0 = {
+ 0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204};
+
+static struct attn_hw_reg tcfc_prty0_bb_b0 = {
+ 1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8};
+
+static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
+ &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0};
+
+static struct attn_hw_reg igu_int0_bb_b0 = {
+ 0, 11, 0x180180, 0x18018c, 0x180188, 0x180184};
+
+static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
+ &igu_int0_bb_b0};
+
+static struct attn_hw_reg igu_prty0_bb_b0 = {
+ 0, 1, 0x180190, 0x18019c, 0x180198, 0x180194};
+
+static struct attn_hw_reg igu_prty1_bb_b0 = {
+ 1, 31, 0x180200, 0x18020c, 0x180208, 0x180204};
+
+static struct attn_hw_reg igu_prty2_bb_b0 = {
+ 2, 1, 0x180210, 0x18021c, 0x180218, 0x180214};
+
+static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
+ &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0};
+
+static struct attn_hw_reg cau_int0_bb_b0 = {
+ 0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0};
+
+static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
+ &cau_int0_bb_b0};
+
+static struct attn_hw_reg cau_prty1_bb_b0 = {
+ 0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204};
+
+static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
+ &cau_prty1_bb_b0};
+
+static struct attn_hw_reg dbg_int0_bb_b0 = {
+ 0, 1, 0x10180, 0x1018c, 0x10188, 0x10184};
+
+static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
+ &dbg_int0_bb_b0};
+
+static struct attn_hw_reg dbg_prty1_bb_b0 = {
+ 0, 1, 0x10200, 0x1020c, 0x10208, 0x10204};
+
+static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
+ &dbg_prty1_bb_b0};
+
+static struct attn_hw_reg nig_int0_bb_b0 = {
+ 0, 12, 0x500040, 0x50004c, 0x500048, 0x500044};
+
+static struct attn_hw_reg nig_int1_bb_b0 = {
+ 1, 32, 0x500050, 0x50005c, 0x500058, 0x500054};
+
+static struct attn_hw_reg nig_int2_bb_b0 = {
+ 2, 20, 0x500060, 0x50006c, 0x500068, 0x500064};
+
+static struct attn_hw_reg nig_int3_bb_b0 = {
+ 3, 18, 0x500070, 0x50007c, 0x500078, 0x500074};
+
+static struct attn_hw_reg nig_int4_bb_b0 = {
+ 4, 20, 0x500080, 0x50008c, 0x500088, 0x500084};
+
+static struct attn_hw_reg nig_int5_bb_b0 = {
+ 5, 18, 0x500090, 0x50009c, 0x500098, 0x500094};
+
+static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
+ &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
+ &nig_int4_bb_b0, &nig_int5_bb_b0};
+
+static struct attn_hw_reg nig_prty0_bb_b0 = {
+ 0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4};
+
+static struct attn_hw_reg nig_prty1_bb_b0 = {
+ 1, 31, 0x500200, 0x50020c, 0x500208, 0x500204};
+
+static struct attn_hw_reg nig_prty2_bb_b0 = {
+ 2, 31, 0x500210, 0x50021c, 0x500218, 0x500214};
+
+static struct attn_hw_reg nig_prty3_bb_b0 = {
+ 3, 31, 0x500220, 0x50022c, 0x500228, 0x500224};
+
+static struct attn_hw_reg nig_prty4_bb_b0 = {
+ 4, 17, 0x500230, 0x50023c, 0x500238, 0x500234};
+
+static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
+ &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0,
+ &nig_prty3_bb_b0, &nig_prty4_bb_b0};
+
+static struct attn_hw_reg ipc_int0_bb_b0 = {
+ 0, 13, 0x2050c, 0x20518, 0x20514, 0x20510};
+
+static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
+ &ipc_int0_bb_b0};
+
+static struct attn_hw_reg ipc_prty0_bb_b0 = {
+ 0, 1, 0x2051c, 0x20528, 0x20524, 0x20520};
+
+static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
+ &ipc_prty0_bb_b0};
+
+static struct attn_hw_block attn_blocks[] = {
+ {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } },
+ {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } },
+ {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } },
+ {"dbu", {{0, 0, NULL, NULL} } },
+ {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs,
+ pglue_b_prty_bb_b0_regs} } },
+ {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } },
+ {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } },
+ {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } },
+ {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } },
+ {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } },
+ {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } },
+ {"mcp", {{0, 0, NULL, NULL} } },
+ {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } },
+ {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } },
+ {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs,
+ pswhst2_prty_bb_b0_regs} } },
+ {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } },
+ {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } },
+ {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } },
+ {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } },
+ {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } },
+ {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } },
+ {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } },
+ {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } },
+ {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } },
+ {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } },
+ {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } },
+ {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } },
+ {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } },
+ {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } },
+ {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } },
+ {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } },
+ {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } },
+ {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } },
+ {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } },
+ {"src", {{1, 0, src_int_bb_b0_regs, NULL} } },
+ {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } },
+ {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } },
+ {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } },
+ {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } },
+ {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } },
+ {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } },
+ {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } },
+ {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } },
+ {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } },
+ {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } },
+ {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } },
+ {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } },
+ {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } },
+ {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } },
+ {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } },
+ {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } },
+ {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } },
+ {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } },
+ {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } },
+ {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs,
+ pbf_pb1_prty_bb_b0_regs} } },
+ {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs,
+ pbf_pb2_prty_bb_b0_regs} } },
+ {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } },
+ {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } },
+ {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } },
+ {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } },
+ {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } },
+ {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } },
+ {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } },
+ {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } },
+ {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } },
+ {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } },
+ {"umac", { {0, 0, NULL, NULL} } },
+ {"xmac", { {0, 0, NULL, NULL} } },
+ {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } },
+ {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } },
+ {"wol", { {0, 0, NULL, NULL} } },
+ {"bmbn", { {0, 0, NULL, NULL} } },
+ {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } },
+ {"nwm", { {0, 0, NULL, NULL} } },
+ {"nws", { {0, 0, NULL, NULL} } },
+ {"ms", { {0, 0, NULL, NULL} } },
+ {"phy_pcie", { {0, 0, NULL, NULL} } },
+ {"misc_aeu", { {0, 0, NULL, NULL} } },
+ {"bar0_map", { {0, 0, NULL, NULL} } },};
+
+/* Specific HW attention callbacks */
+static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
+
+ /* This might occur on certain instances; Log it once then mask it */
+ DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
+ tmp);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
+ 0xffffffff);
+
+ return 0;
+}
+
+#define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
+static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_VALID);
+
+ if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+ u32 addr, data, length;
+
+ addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
+ data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_DATA);
+ length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_LENGTH);
+
+ DP_INFO(p_hwfn->cdev,
+ "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
+ addr, length,
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_VF_VALID),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_CLIENT),
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_BYTE_EN),
+ data);
+ }
+
+ return 0;
+}
+
+#define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
+#define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
+#define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
+#define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
+#define QED_GRC_ATTENTION_MASTER_MASK (0xf)
+#define QED_GRC_ATTENTION_MASTER_SHIFT (24)
+#define QED_GRC_ATTENTION_PF_MASK (0xf)
+#define QED_GRC_ATTENTION_PF_SHIFT (0)
+#define QED_GRC_ATTENTION_VF_MASK (0xff)
+#define QED_GRC_ATTENTION_VF_SHIFT (4)
+#define QED_GRC_ATTENTION_PRIV_MASK (0x3)
+#define QED_GRC_ATTENTION_PRIV_SHIFT (14)
+#define QED_GRC_ATTENTION_PRIV_VF (0)
+static const char *attn_master_to_str(u8 master)
+{
+ switch (master) {
+ case 1: return "PXP";
+ case 2: return "MCP";
+ case 3: return "MSDM";
+ case 4: return "PSDM";
+ case 5: return "YSDM";
+ case 6: return "USDM";
+ case 7: return "TSDM";
+ case 8: return "XSDM";
+ case 9: return "DBU";
+ case 10: return "DMAE";
+ default:
+ return "Unkown";
+ }
+}
+
+static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp, tmp2;
+
+ /* We've already cleared the timeout interrupt register, so we learn
+ * of interrupts via the validity register
+ */
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
+ if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
+ goto out;
+
+ /* Read the GRC timeout information */
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
+ tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+
+ DP_INFO(p_hwfn->cdev,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
+ GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
+ attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
+ GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
+ (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
+ QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
+ GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
+
+out:
+ /* Regardles of anything else, clean the validity bit */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
+ return 0;
+}
+
+#define PGLUE_ATTENTION_VALID (1 << 29)
+#define PGLUE_ATTENTION_RD_VALID (1 << 26)
+#define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
+#define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
+#define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
+#define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
+#define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
+#define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
+#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
+#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
+static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp;
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
+ details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal write by chip to [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
+ }
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_RD_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
+ details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal read by chip from [%08x:%08x] blocked.\n"
+ " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+ : 0,
+ GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+ : 0);
+ }
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ if (tmp & PGLUE_ATTENTION_ICPL_VALID)
+ DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
+ u32 addr_hi, addr_lo;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
+
+ DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
+ }
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_ILT_VALID) {
+ u32 addr_hi, addr_lo, details;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
+ details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
+ }
+
+ /* Clear the indications */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+
+ return 0;
+}
+
+#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 reason;
+
+ reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
+ QED_DORQ_ATTENTION_REASON_MASK;
+ if (reason) {
+ u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS);
+
+ DP_INFO(p_hwfn->cdev,
+ "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
+ qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS),
+ (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
+ GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
+ reason);
+ }
+
+ return -EINVAL;
+}
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+ {
+ { /* After Invert 1 */
+ {"GPIO0 function%d",
+ (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 2 */
+ {"PGLUE config_space", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PGLUE misc_flr", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PGLUE B RBC", ATTENTION_PAR_INT,
+ qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
+ {"PGLUE misc_mctp", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
+ (1 << ATTENTION_OFFSET_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ {"PCIE glue/PXP VPD %d",
+ (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
+ }
+ },
+
+ {
+ { /* After Invert 3 */
+ {"General Attention %d",
+ (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 4 */
+ {"General Attention 32", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"General Attention %d",
+ (2 << ATTENTION_LENGTH_SHIFT) |
+ (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT),
+ NULL, BLOCK_CNIG},
+ {"MCP CPU", ATTENTION_SINGLE,
+ qed_mcp_attn_cb, MAX_BLOCK_ID},
+ {"MCP Watchdog timer", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"AVS stop status ready", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+ {"MSTAT per-path", ATTENTION_PAR_INT,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
+ {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
+ {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
+ {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
+ {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
+ }
+ },
+
+ {
+ { /* After Invert 5 */
+ {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
+ {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
+ {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
+ {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
+ {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
+ {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
+ {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
+ {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM},
+ {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
+ {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
+ {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
+ {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
+ {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
+ {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
+ {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
+ {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
+ }
+ },
+
+ {
+ { /* After Invert 6 */
+ {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
+ {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
+ {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
+ {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
+ {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
+ {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
+ {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
+ {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
+ {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
+ {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
+ {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
+ {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
+ {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
+ {"DORQ", ATTENTION_PAR_INT,
+ qed_dorq_attn_cb, BLOCK_DORQ},
+ {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
+ {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
+ }
+ },
+
+ {
+ { /* After Invert 7 */
+ {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
+ {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
+ {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
+ {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
+ {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+ {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
+ {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
+ {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
+ {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
+ {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
+ {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
+ {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
+ {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
+ {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
+ {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
+ {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
+ {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
+ }
+ },
+
+ {
+ { /* After Invert 8 */
+ {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWRQ2},
+ {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
+ {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWWR2},
+ {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
+ {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWRD2},
+ {"PSWHST", ATTENTION_PAR_INT,
+ qed_pswhst_attn_cb, BLOCK_PSWHST},
+ {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWHST2},
+ {"GRC", ATTENTION_PAR_INT,
+ qed_grc_attn_cb, BLOCK_GRC},
+ {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
+ {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
+ {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
+ {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
+ NULL, BLOCK_PGLCS},
+ {"PERST_B assertion", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PERST_B deassertion", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 9 */
+ {"MCP Latched memory", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched ump_tx", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ }
+ },
+};
+
+#define ATTN_STATE_BITS (0xfff)
#define ATTN_BITS_MASKABLE (0x3ff)
struct qed_sb_attn_info {
/* Virtual & Physical address of the SB */
struct atten_status_block *sb_attn;
- dma_addr_t sb_phys;
+ dma_addr_t sb_phys;
/* Last seen running index */
- u16 index;
+ u16 index;
+
+ /* A mask of the AEU bits resulting in a parity error */
+ u32 parity_mask[NUM_ATTN_REGS];
+
+ /* A pointer to the attention description structure */
+ struct aeu_invert_reg *p_aeu_desc;
/* Previously asserted attentions, which are still unasserted */
- u16 known_attn;
+ u16 known_attn;
/* Cleanup address for the link's general hw attention */
- u32 mfw_attn_addr;
+ u32 mfw_attn_addr;
};
static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
@@ -127,6 +1840,162 @@ static int qed_int_assertion(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn,
+ struct attn_hw_reg *p_reg_desc,
+ struct attn_hw_block *p_block,
+ enum qed_attention_type type,
+ u32 val, u32 mask)
+{
+ int j;
+
+ for (j = 0; j < p_reg_desc->num_of_bits; j++) {
+ if (!(val & (1 << j)))
+ continue;
+
+ DP_NOTICE(p_hwfn,
+ "%s (%s): reg %d [0x%08x], bit %d [%s]\n",
+ p_block->name,
+ type == QED_ATTN_TYPE_ATTN ? "Interrupt" :
+ "Parity",
+ p_reg_desc->reg_idx, p_reg_desc->sts_addr,
+ j, (mask & (1 << j)) ? " [MASKED]" : "");
+ }
+}
+
+/**
+ * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
+ * cause of the attention
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the attention
+ * @param aeu_en_reg - register offset of the AEU enable reg. which configured
+ * this bit to this group.
+ * @param bit_index - index of this bit in the aeu_en_reg
+ *
+ * @return int
+ */
+static int
+qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u32 aeu_en_reg,
+ u32 bitmask)
+{
+ int rc = -EINVAL;
+ u32 val;
+
+ DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
+ p_aeu->bit_name, bitmask);
+
+ /* Call callback before clearing the interrupt status */
+ if (p_aeu->cb) {
+ DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
+ p_aeu->bit_name);
+ rc = p_aeu->cb(p_hwfn);
+ }
+
+ /* Handle HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID) {
+ struct attn_hw_block *p_block;
+ u32 mask;
+ int i;
+
+ p_block = &attn_blocks[p_aeu->block_index];
+
+ /* Handle each interrupt register */
+ for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) {
+ struct attn_hw_reg *p_reg_desc;
+ u32 sts_addr;
+
+ p_reg_desc = p_block->chip_regs[0].int_regs[i];
+
+ /* In case of fatal attention, don't clear the status
+ * so it would appear in following idle check.
+ */
+ if (rc == 0)
+ sts_addr = p_reg_desc->sts_clr_addr;
+ else
+ sts_addr = p_reg_desc->sts_addr;
+
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
+ mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->mask_addr);
+ qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+ p_block,
+ QED_ATTN_TYPE_ATTN,
+ val, mask);
+ }
+ }
+
+ /* If the attention is benign, no need to prevent it */
+ if (!rc)
+ goto out;
+
+ /* Prevent this Attention from being asserted in the future */
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
+ DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+ p_aeu->bit_name);
+
+out:
+ return rc;
+}
+
+static void qed_int_parity_print(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ struct attn_hw_block *p_block,
+ u8 bit_index)
+{
+ int i;
+
+ for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) {
+ struct attn_hw_reg *p_reg_desc;
+ u32 val, mask;
+
+ p_reg_desc = p_block->chip_regs[0].prty_regs[i];
+
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->sts_clr_addr);
+ mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->mask_addr);
+ qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+ p_block,
+ QED_ATTN_TYPE_PARITY,
+ val, mask);
+ }
+}
+
+/**
+ * @brief qed_int_deassertion_parity - handle a single parity AEU source
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the parity
+ * @param bit_index
+ */
+static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u8 bit_index)
+{
+ u32 block_id = p_aeu->block_index;
+
+ DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n",
+ p_aeu->bit_name, bit_index);
+
+ if (block_id != MAX_BLOCK_ID) {
+ qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
+ bit_index);
+
+ /* In BB, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ qed_int_parity_print(p_hwfn, p_aeu,
+ &attn_blocks[BLOCK_OPTE],
+ bit_index);
+ qed_int_parity_print(p_hwfn, p_aeu,
+ &attn_blocks[BLOCK_MCP],
+ bit_index);
+ }
+ }
+}
+
/**
* @brief - handles deassertion of previously asserted attentions.
*
@@ -139,17 +2008,108 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
u16 deasserted_bits)
{
struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
- u32 aeu_mask;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
+ u8 i, j, k, bit_idx;
+ int rc = 0;
+
+ /* Read the attention registers in the AEU */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_AFTER_INVERT_1_IGU +
+ i * 0x4);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Deasserted bits [%d]: %08x\n",
+ i, aeu_inv_arr[i]);
+ }
+
+ /* Find parity attentions first */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
+ u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32));
+ u32 parities;
+
+ /* Skip register in which no parity bit is currently set */
+ parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+ if (!parities)
+ continue;
+
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
+
+ if ((p_bit->flags & ATTENTION_PARITY) &&
+ !!(parities & (1 << bit_idx)))
+ qed_int_deassertion_parity(p_hwfn, p_bit,
+ bit_idx);
- if (deasserted_bits != 0x100)
- DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n");
+ bit_idx += ATTENTION_LENGTH(p_bit->flags);
+ }
+ }
+
+ /* Find non-parity cause for attention and act */
+ for (k = 0; k < MAX_ATTN_GRPS; k++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ /* Handle only groups whose attention is currently deasserted */
+ if (!(deasserted_bits & (1 << k)))
+ continue;
+
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) +
+ k * sizeof(u32) * NUM_ATTN_REGS;
+ u32 en, bits;
+
+ en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ bits = aeu_inv_arr[i] & en;
+
+ /* Skip if no bit from this group is currently set */
+ if (!bits)
+ continue;
+
+ /* Find all set bits from current register which belong
+ * to current group, making them responsible for the
+ * previous assertion.
+ */
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ u8 bit, bit_len;
+ u32 bitmask;
+
+ p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+
+ /* No need to handle parity-only bits */
+ if (p_aeu->flags == ATTENTION_PAR)
+ continue;
+
+ bit = bit_idx;
+ bit_len = ATTENTION_LENGTH(p_aeu->flags);
+ if (p_aeu->flags & ATTENTION_PAR_INT) {
+ /* Skip Parity */
+ bit++;
+ bit_len--;
+ }
+
+ bitmask = bits & (((1 << bit_len) - 1) << bit);
+ if (bitmask) {
+ /* Handle source of the attention */
+ qed_int_deassertion_aeu_bit(p_hwfn,
+ p_aeu,
+ aeu_en,
+ bitmask);
+ }
+
+ bit_idx += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ }
+ }
/* Clear IGU indication for the deasserted bits */
DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_IGU_CMD +
- ((IGU_CMD_ATTN_BIT_CLR_UPPER -
- IGU_CMD_INT_ACK_BASE) << 3),
- ~((u32)deasserted_bits));
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ ~((u32)deasserted_bits));
/* Unmask deasserted attentions in IGU */
aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -160,7 +2120,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
/* Clear deassertion from inner state */
sb_attn_sw->known_attn &= ~deasserted_bits;
- return 0;
+ return rc;
}
static int qed_int_attentions(struct qed_hwfn *p_hwfn)
@@ -343,17 +2303,17 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
{
- struct qed_dev *cdev = p_hwfn->cdev;
- struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
-
- if (p_sb) {
- if (p_sb->sb_attn)
- dma_free_coherent(&cdev->pdev->dev,
- SB_ATTN_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_attn,
- p_sb->sb_phys);
- kfree(p_sb);
- }
+ struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_attn)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_attn,
+ p_sb->sb_phys);
+ kfree(p_sb);
}
static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
@@ -379,10 +2339,31 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
dma_addr_t sb_phy_addr)
{
struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+ int i, j, k;
sb_info->sb_attn = sb_virt_addr;
sb_info->sb_phys = sb_phy_addr;
+ /* Set the pointer to the AEU descriptors */
+ sb_info->p_aeu_desc = aeu_descs;
+
+ /* Calculate Parity Masks */
+ memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ /* j is array index, k is bit index */
+ for (j = 0, k = 0; k < 32; j++) {
+ unsigned int flags = aeu_descs[i].bits[j].flags;
+
+ if (flags & ATTENTION_PARITY)
+ sb_info->parity_mask[i] |= 1 << k;
+
+ k += ATTENTION_LENGTH(flags);
+ }
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Attn Mask [Reg %d]: 0x%08x\n",
+ i, sb_info->parity_mask[i]);
+ }
+
/* Set the address of cleanup for the mcp attention */
sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
MISC_REG_AEU_GENERAL_ATTN_0;
@@ -433,6 +2414,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
u16 vf_number,
u8 vf_valid)
{
+ struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
memset(p_sb_entry, 0, sizeof(*p_sb_entry));
@@ -451,14 +2433,12 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
cau_state = CAU_HC_DISABLE_STATE;
- if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
cau_state = CAU_HC_ENABLE_STATE;
- if (!p_hwfn->cdev->rx_coalesce_usecs)
- p_hwfn->cdev->rx_coalesce_usecs =
- QED_CAU_DEF_RX_USECS;
- if (!p_hwfn->cdev->tx_coalesce_usecs)
- p_hwfn->cdev->tx_coalesce_usecs =
- QED_CAU_DEF_TX_USECS;
+ if (!cdev->rx_coalesce_usecs)
+ cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
+ if (!cdev->tx_coalesce_usecs)
+ cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
}
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
@@ -638,8 +2618,10 @@ int qed_int_sb_release(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- p_hwfn->sbs_info[sb_id] = NULL;
- p_hwfn->num_sbs--;
+ if (p_hwfn->sbs_info[sb_id] != NULL) {
+ p_hwfn->sbs_info[sb_id] = NULL;
+ p_hwfn->num_sbs--;
+ }
return 0;
}
@@ -648,14 +2630,15 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
{
struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
- if (p_sb) {
- if (p_sb->sb_info.sb_virt)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- SB_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_info.sb_virt,
- p_sb->sb_info.sb_phys);
- kfree(p_sb);
- }
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_info.sb_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys);
+ kfree(p_sb);
}
static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
@@ -692,25 +2675,6 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
-{
- if (!p_hwfn)
- return;
-
- if (p_hwfn->p_sp_sb)
- qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
- else
- DP_NOTICE(p_hwfn->cdev,
- "Failed to setup Slow path status block - NULL pointer\n");
-
- if (p_hwfn->p_sb_attn)
- qed_int_sb_attn_setup(p_hwfn, p_ptt);
- else
- DP_NOTICE(p_hwfn->cdev,
- "Failed to setup attentions status block - NULL pointer\n");
-}
-
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
void *cookie,
@@ -718,36 +2682,36 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn,
__le16 **p_fw_cons)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
- int qed_status = -ENOMEM;
+ int rc = -ENOMEM;
u8 pi;
/* Look for a free index */
for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
- if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
- p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
- p_sp_sb->pi_info_arr[pi].cookie = cookie;
- *sb_idx = pi;
- *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
- qed_status = 0;
- break;
- }
+ if (p_sp_sb->pi_info_arr[pi].comp_cb)
+ continue;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ rc = 0;
+ break;
}
- return qed_status;
+ return rc;
}
int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
- int qed_status = -ENOMEM;
- if (p_sp_sb->pi_info_arr[pi].comp_cb) {
- p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
- p_sp_sb->pi_info_arr[pi].cookie = NULL;
- qed_status = 0;
- }
+ if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
+ return -ENOMEM;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = NULL;
- return qed_status;
+ return 0;
}
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
@@ -786,16 +2750,13 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_int_mode int_mode)
{
- int rc, i;
-
- /* Mask non-link attentions */
- for (i = 0; i < 9; i++)
- qed_wr(p_hwfn, p_ptt,
- MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
+ int rc;
- /* Configure AEU signal change to produce attentions for link */
+ /* Configure AEU signal change to produce attentions */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
/* Flush the writes to IGU */
mmiowb();
@@ -937,6 +2898,39 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
}
}
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 sb_id)
+{
+ u32 val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * sb_id);
+ struct qed_igu_block *p_block;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+
+ /* stop scanning when hit first invalid PF entry */
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ goto out;
+
+ /* Fill the block information */
+ p_block->status = QED_IGU_STATUS_VALID;
+ p_block->function_id = GET_FIELD(val,
+ IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ p_block->vector_number = GET_FIELD(val,
+ IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ sb_id, val, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+
+out:
+ return val;
+}
+
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -963,26 +2957,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id];
- val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+ val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
/* stop scanning when hit first invalid PF entry */
if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
break;
- blk->status = QED_IGU_STATUS_VALID;
- blk->function_id = GET_FIELD(val,
- IGU_MAPPING_LINE_FUNCTION_NUMBER);
- blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
- blk->vector_number = GET_FIELD(val,
- IGU_MAPPING_LINE_VECTOR_NUMBER);
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
- val, blk->function_id, blk->is_pf,
- blk->vector_number);
-
if (blk->is_pf) {
if (blk->function_id == p_hwfn->rel_pf_id) {
blk->status |= QED_IGU_STATUS_PF;
@@ -1117,22 +3098,22 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
- qed_int_sp_sb_setup(p_hwfn, p_ptt);
+ qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
qed_int_sp_dpc_setup(p_hwfn);
}
-int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
- int *p_iov_blks)
+void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_sb_cnt_info *p_sb_cnt_info)
{
struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
- if (!info)
- return 0;
-
- if (p_iov_blks)
- *p_iov_blks = info->free_blks;
+ if (!info || !p_sb_cnt_info)
+ return;
- return info->igu_sb_cnt;
+ p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
+ p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
+ p_sb_cnt_info->sb_free_blk = info->free_blks;
}
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 51e0b09a7f47..c57f2e680770 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -161,12 +161,12 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie);
* blocks configured for this funciton in the igu.
*
* @param p_hwfn
- * @param p_iov_blks - configured free blks for vfs
+ * @param p_sb_cnt_info
*
* @return int - number of status blocks configured
*/
-int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
- int *p_iov_blks);
+void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_sb_cnt_info *p_sb_cnt_info);
/**
* @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index bba59c51f72c..3f35c6ca9252 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -31,6 +31,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
@@ -131,16 +132,29 @@ struct qed_sp_vport_update_params {
struct qed_filter_accept_flags accept_flags;
};
+enum qed_tpa_mode {
+ QED_TPA_MODE_NONE,
+ QED_TPA_MODE_UNUSED,
+ QED_TPA_MODE_GRO,
+ QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+ enum qed_tpa_mode tpa_mode;
+ bool remove_inner_vlan;
+ bool drop_ttl0;
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id;
+ u16 mtu;
+};
+
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- u32 concrete_fid,
- u16 opaque_fid,
- u8 vport_id,
- u16 mtu,
- u8 drop_ttl0_flg,
- u8 inner_vlan_removal_en_flg)
+ struct qed_sp_vport_start_params *p_params)
{
struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -149,13 +163,13 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
u16 rx_mode = 0;
u8 abs_vport_id = 0;
- rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
return rc;
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
- init_data.opaque_fid = opaque_fid;
+ init_data.opaque_fid = p_params->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -167,9 +181,9 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.vport_start;
p_ramrod->vport_id = abs_vport_id;
- p_ramrod->mtu = cpu_to_le16(mtu);
- p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
- p_ramrod->drop_ttl0_en = drop_ttl0_flg;
+ p_ramrod->mtu = cpu_to_le16(p_params->mtu);
+ p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+ p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -180,9 +194,26 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
memset(&p_ramrod->tpa_param, 0,
sizeof(struct eth_vport_tpa_param));
+ p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+
+ switch (p_params->tpa_mode) {
+ case QED_TPA_MODE_GRO:
+ p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ p_ramrod->tpa_param.tpa_max_size = (u16)-1;
+ p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
+ p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
+ p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
+ p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+ break;
+ default:
+ break;
+ }
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
- concrete_fid);
+ p_params->concrete_fid);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -1231,6 +1262,328 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
return rc;
}
+/* Statistics related code */
+static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len,
+ u16 statistics_bin)
+{
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+}
+
+static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_pstorm_per_queue_stat pstats;
+ u32 pstats_addr = 0, pstats_len = 0;
+
+ __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+ statistics_bin);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats,
+ pstats_addr, pstats_len);
+
+ p_stats->tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len)
+{
+ *p_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ *p_len = sizeof(struct tstorm_per_port_stat);
+}
+
+static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ u32 tstats_addr = 0, tstats_len = 0;
+ struct tstorm_per_port_stat tstats;
+
+ __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
+
+ memset(&tstats, 0, sizeof(tstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats,
+ tstats_addr, tstats_len);
+
+ p_stats->mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+}
+
+static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len,
+ u16 statistics_bin)
+{
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+}
+
+static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_ustorm_per_queue_stat ustats;
+ u32 ustats_addr = 0, ustats_len = 0;
+
+ __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+ statistics_bin);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats,
+ ustats_addr, ustats_len);
+
+ p_stats->rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len,
+ u16 statistics_bin)
+{
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+}
+
+static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_mstorm_per_queue_stat mstats;
+ u32 mstats_addr = 0, mstats_len = 0;
+
+ __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+ statistics_bin);
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats,
+ mstats_addr, mstats_len);
+
+ p_stats->no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats)
+{
+ struct port_stats port_stats;
+ int j;
+
+ memset(&port_stats, 0, sizeof(port_stats));
+
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, stats),
+ sizeof(port_stats));
+
+ p_stats->rx_64_byte_packets += port_stats.pmm.r64;
+ p_stats->rx_127_byte_packets += port_stats.pmm.r127;
+ p_stats->rx_255_byte_packets += port_stats.pmm.r255;
+ p_stats->rx_511_byte_packets += port_stats.pmm.r511;
+ p_stats->rx_1023_byte_packets += port_stats.pmm.r1023;
+ p_stats->rx_1518_byte_packets += port_stats.pmm.r1518;
+ p_stats->rx_1522_byte_packets += port_stats.pmm.r1522;
+ p_stats->rx_2047_byte_packets += port_stats.pmm.r2047;
+ p_stats->rx_4095_byte_packets += port_stats.pmm.r4095;
+ p_stats->rx_9216_byte_packets += port_stats.pmm.r9216;
+ p_stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ p_stats->rx_crc_errors += port_stats.pmm.rfcs;
+ p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+ p_stats->rx_pause_frames += port_stats.pmm.rxpf;
+ p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
+ p_stats->rx_align_errors += port_stats.pmm.raln;
+ p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
+ p_stats->rx_oversize_packets += port_stats.pmm.rovr;
+ p_stats->rx_jabbers += port_stats.pmm.rjbr;
+ p_stats->rx_undersize_packets += port_stats.pmm.rund;
+ p_stats->rx_fragments += port_stats.pmm.rfrg;
+ p_stats->tx_64_byte_packets += port_stats.pmm.t64;
+ p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+ p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+ p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+ p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+ p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+ p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+ p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+ p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+ p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+ p_stats->tx_pause_frames += port_stats.pmm.txpf;
+ p_stats->tx_pfc_frames += port_stats.pmm.txpp;
+ p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+ p_stats->tx_total_collisions += port_stats.pmm.tncl;
+ p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
+ p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+ p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+ p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+ p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+ p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
+ p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+ p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+ p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+ p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+ for (j = 0; j < 8; j++) {
+ p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ }
+}
+
+static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *stats,
+ u16 statistics_bin)
+{
+ __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+ if (p_hwfn->mcp_info)
+ __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u8 fw_vport = 0;
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+
+ /* The main vport index is relative first */
+ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ continue;
+ }
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u32 i;
+
+ if (!cdev) {
+ memset(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ _qed_get_vport_stats(cdev, stats);
+
+ if (!cdev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 addr = 0, len = 0;
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ */
+ if (!cdev->reset_stats)
+ DP_INFO(cdev, "Reset stats not allocated\n");
+ else
+ _qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
@@ -1269,24 +1622,25 @@ static void qed_register_eth_ops(struct qed_dev *cdev,
}
static int qed_start_vport(struct qed_dev *cdev,
- u8 vport_id,
- u16 mtu,
- u8 drop_ttl0_flg,
- u8 inner_vlan_removal_en_flg)
+ struct qed_start_vport_params *params)
{
int rc, i;
for_each_hwfn(cdev, i) {
+ struct qed_sp_vport_start_params start = { 0 };
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- rc = qed_sp_vport_start(p_hwfn,
- p_hwfn->hw_info.concrete_fid,
- p_hwfn->hw_info.opaque_fid,
- vport_id,
- mtu,
- drop_ttl0_flg,
- inner_vlan_removal_en_flg);
-
+ start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
+ QED_TPA_MODE_NONE;
+ start.remove_inner_vlan = params->remove_inner_vlan;
+ start.drop_ttl0 = params->drop_ttl0;
+ start.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ start.vport_id = params->vport_id;
+ start.max_buffers_per_cqe = 16;
+ start.mtu = params->mtu;
+
+ rc = qed_sp_vport_start(p_hwfn, &start);
if (rc) {
DP_ERR(cdev, "Failed to start VPORT\n");
return rc;
@@ -1296,7 +1650,7 @@ static int qed_start_vport(struct qed_dev *cdev,
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
"Started V-PORT %d with MTU %d\n",
- vport_id, mtu);
+ start.vport_id, start.mtu);
}
qed_reset_vport_stats(cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 25d6e91335ea..26d40db07ddd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -634,15 +634,18 @@ static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
- int rc, i;
- u8 num_vectors = 0;
-
+ struct qed_sb_cnt_info sb_cnt_info;
+ int rc;
+ int i;
memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = int_mode;
- for_each_hwfn(cdev, i)
- num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
- cdev->int_params.in.num_vectors = num_vectors;
+ for_each_hwfn(cdev, i) {
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
+ cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
+ cdev->int_params.in.num_vectors++; /* slowpath */
+ }
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
@@ -776,7 +779,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
true, data);
if (rc)
- goto err3;
+ goto err2;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
@@ -795,12 +798,14 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return rc;
}
+ qed_reset_vport_stats(cdev);
+
return 0;
-err3:
- qed_free_stream_mem(cdev);
- qed_slowpath_irq_free(cdev);
err2:
+ qed_hw_timers_stop_all(cdev);
+ qed_slowpath_irq_free(cdev);
+ qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index e8df12335a97..c15b1622e636 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -127,8 +127,20 @@
0x00c000UL
#define DORQ_REG_IFEN \
0x100040UL
+#define DORQ_REG_DB_DROP_REASON \
+ 0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS \
+ 0x100a24UL
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS \
+ 0x100a1cUL
#define GRC_REG_TIMEOUT_EN \
0x050404UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID \
+ 0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 \
+ 0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 \
+ 0x050050UL
#define IGU_REG_BLOCK_CONFIGURATION \
0x180040UL
#define MCM_REG_INIT \
@@ -155,6 +167,40 @@
0x1100000UL
#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
0x2a9000UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 \
+ 0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 \
+ 0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 \
+ 0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS \
+ 0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 \
+ 0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 \
+ 0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS \
+ 0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 \
+ 0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL \
+ 0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS \
+ 0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 \
+ 0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 \
+ 0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 \
+ 0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 \
+ 0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS \
+ 0x2aae7cUL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 \
+ 0x2aae80UL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR \
+ 0x2aa3bcUL
#define PRM_REG_DISABLE_PRM \
0x230000UL
#define PRS_REG_SOFT_RST \
@@ -171,6 +217,14 @@
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
0x29e050UL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID \
+ 0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS \
+ 0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA \
+ 0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH \
+ 0x2a006cUL
#define PSWRD_REG_DBG_SELECT \
0x29c040UL
#define PSWRD2_REG_CONF11 \
@@ -333,6 +387,8 @@
0x180800UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU \
+ 0x0087b4UL
#define MISC_REG_AEU_MASK_ATTN_IGU \
0x008494UL
#define IGU_REG_CLEANUP_STATUS_0 \
@@ -363,6 +419,10 @@
0x7 << 0)
#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
0
+#define MCP_REG_CPU_STATE \
+ 0xe05004UL
+#define MCP_REG_CPU_EVENT_MASK \
+ 0xe05008UL
#define PGLUE_B_REG_PF_BAR0_SIZE \
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 02e17d331f22..d023251544d9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -160,6 +160,7 @@ struct qede_dev {
u16 q_num_rx_buffers; /* Must be a power of two */
u16 q_num_tx_buffers; /* Must be a power of two */
+ bool gro_disable;
struct list_head vlan_list;
u16 configured_vlans;
u16 non_configured_vlans;
@@ -188,6 +189,24 @@ struct sw_rx_data {
unsigned int page_offset;
};
+enum qede_agg_state {
+ QEDE_AGG_STATE_NONE = 0,
+ QEDE_AGG_STATE_START = 1,
+ QEDE_AGG_STATE_ERROR = 2
+};
+
+struct qede_agg_info {
+ struct sw_rx_data replace_buf;
+ dma_addr_t replace_buf_mapping;
+ struct sw_rx_data start_buf;
+ dma_addr_t start_buf_mapping;
+ struct eth_fast_path_rx_tpa_start_cqe start_cqe;
+ enum qede_agg_state agg_state;
+ struct sk_buff *skb;
+ int frag_id;
+ u16 vlan_tag;
+};
+
struct qede_rx_queue {
__le16 *hw_cons_ptr;
struct sw_rx_data *sw_rx_ring;
@@ -197,6 +216,9 @@ struct qede_rx_queue {
struct qed_chain rx_comp_ring;
void __iomem *hw_rxq_prod_addr;
+ /* GRO */
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+
int rx_buf_size;
unsigned int rx_buf_seg_size;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index ddd9e4aaa500..572862564ab6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -866,6 +866,278 @@ static inline void qede_skb_receive(struct qede_dev *edev,
napi_gro_receive(&fp->napi, skb);
}
+static void qede_set_gro_params(struct qede_dev *edev,
+ struct sk_buff *skb,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
+
+ if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
+ PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
+ cqe->header_len;
+}
+
+static int qede_fill_frag_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ u8 tpa_agg_index,
+ u16 len_on_bd)
+{
+ struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
+ NUM_RX_BDS_MAX];
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
+ struct sk_buff *skb = tpa_info->skb;
+
+ if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ goto out;
+
+ /* Add one frag and update the appropriate fields in the skb */
+ skb_fill_page_desc(skb, tpa_info->frag_id++,
+ current_bd->data, current_bd->page_offset,
+ len_on_bd);
+
+ if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ goto out;
+ }
+
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+
+ skb->data_len += len_on_bd;
+ skb->truesize += rxq->rx_buf_seg_size;
+ skb->len += len_on_bd;
+
+ return 0;
+
+out:
+ return -ENOMEM;
+}
+
+static void qede_tpa_start(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+ dma_addr_t mapping = tpa_info->replace_buf_mapping;
+ struct sw_rx_data *sw_rx_data_cons;
+ struct sw_rx_data *sw_rx_data_prod;
+ enum pkt_hash_types rxhash_type;
+ u32 rxhash;
+
+ sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+ /* Use pre-allocated replacement buffer - we can't release the agg.
+ * start until its over and we don't want to risk allocation failing
+ * here, so re-allocate when aggregation will be over.
+ */
+ dma_unmap_addr_set(sw_rx_data_prod, mapping,
+ dma_unmap_addr(replace_buf, mapping));
+
+ sw_rx_data_prod->data = replace_buf->data;
+ rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+ sw_rx_data_prod->page_offset = replace_buf->page_offset;
+
+ rxq->sw_rx_prod++;
+
+ /* move partial skb from cons to pool (don't unmap yet)
+ * save mapping, incase we drop the packet later on.
+ */
+ tpa_info->start_buf = *sw_rx_data_cons;
+ mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
+ le32_to_cpu(rx_bd_cons->addr.lo));
+
+ tpa_info->start_buf_mapping = mapping;
+ rxq->sw_rx_cons++;
+
+ /* set tpa state to start only if we are able to allocate skb
+ * for this aggregation, otherwise mark as error and aggregation will
+ * be dropped
+ */
+ tpa_info->skb = netdev_alloc_skb(edev->ndev,
+ le16_to_cpu(cqe->len_on_first_bd));
+ if (unlikely(!tpa_info->skb)) {
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ return;
+ }
+
+ skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
+ memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
+
+ /* Start filling in the aggregation info */
+ tpa_info->frag_id = 0;
+ tpa_info->agg_state = QEDE_AGG_STATE_START;
+
+ rxhash = qede_get_rxhash(edev, cqe->bitfields,
+ cqe->rss_hash, &rxhash_type);
+ skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
+ if ((le16_to_cpu(cqe->pars_flags.flags) >>
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+ tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+ else
+ tpa_info->vlan_tag = 0;
+
+ /* This is needed in order to enable forwarding support */
+ qede_set_gro_params(edev, tpa_info->skb, cqe);
+
+ if (likely(cqe->ext_bd_len_list[0]))
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->ext_bd_len_list[0]));
+
+ if (unlikely(cqe->ext_bd_len_list[1])) {
+ DP_ERR(edev,
+ "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ }
+}
+
+static void qede_gro_ip_csum(struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+
+ tcp_gro_complete(skb);
+}
+
+static void qede_gro_ipv6_csum(struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+ tcp_gro_complete(skb);
+}
+
+static void qede_gro_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct sk_buff *skb,
+ u16 vlan_tag)
+{
+ if (skb_shinfo(skb)->gso_size) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ qede_gro_ip_csum(skb);
+ break;
+ case htons(ETH_P_IPV6):
+ qede_gro_ipv6_csum(skb);
+ break;
+ default:
+ DP_ERR(edev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ ntohs(skb->protocol));
+ }
+ }
+
+ skb_record_rx_queue(skb, fp->rss_id);
+ qede_skb_receive(edev, fp, skb, vlan_tag);
+}
+
+static inline void qede_tpa_cont(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+ int i;
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA cont with more than a single len_list entry\n");
+}
+
+static void qede_tpa_end(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct qede_rx_queue *rxq = fp->rxq;
+ struct qede_agg_info *tpa_info;
+ struct sk_buff *skb;
+ int i;
+
+ tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ skb = tpa_info->skb;
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA emd with more than a single len_list entry\n");
+
+ if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ goto err;
+
+ /* Sanity */
+ if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
+ DP_ERR(edev,
+ "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
+ cqe->num_of_bds, tpa_info->frag_id);
+ if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
+ DP_ERR(edev,
+ "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
+ le16_to_cpu(cqe->total_packet_len), skb->len);
+
+ memcpy(skb->data,
+ page_address(tpa_info->start_buf.data) +
+ tpa_info->start_cqe.placement_offset +
+ tpa_info->start_buf.page_offset,
+ le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
+
+ /* Recycle [mapped] start buffer for the next replacement */
+ tpa_info->replace_buf = tpa_info->start_buf;
+ tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+
+ /* Finalize the SKB */
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
+
+ qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
+
+ tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+
+ return;
+err:
+ /* The BD starting the aggregation is still mapped; Re-use it for
+ * future aggregations [as replacement buffer]
+ */
+ memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
+ sizeof(struct sw_rx_data));
+ tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+ tpa_info->start_buf.data = NULL;
+ tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ dev_kfree_skb_any(tpa_info->skb);
+ tpa_info->skb = NULL;
+}
+
static u8 qede_check_csum(u16 flag)
{
u16 csum_flag = 0;
@@ -931,6 +1203,25 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
goto next_cqe;
}
+ if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
+ switch (cqe_type) {
+ case ETH_RX_CQE_TYPE_TPA_START:
+ qede_tpa_start(edev, rxq,
+ &cqe->fast_path_tpa_start);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_tpa_cont(edev, rxq,
+ &cqe->fast_path_tpa_cont);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_tpa_end(edev, fp,
+ &cqe->fast_path_tpa_end);
+ goto next_rx_only;
+ default:
+ break;
+ }
+ }
+
/* Get the data from the SW ring */
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
@@ -1057,9 +1348,9 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
qed_chain_consume(&rxq->rx_bd_ring);
-
next_rx:
rxq->sw_rx_cons++;
+next_rx_only:
rx_pkt++;
next_cqe: /* don't consume bd rx buffer */
@@ -1952,9 +2243,31 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
}
}
+static void qede_free_sge_mem(struct qede_dev *edev,
+ struct qede_rx_queue *rxq) {
+ int i;
+
+ if (edev->gro_disable)
+ return;
+
+ for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+ struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+
+ if (replace_buf) {
+ dma_unmap_page(&edev->pdev->dev,
+ dma_unmap_addr(replace_buf, mapping),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(replace_buf->data);
+ }
+ }
+}
+
static void qede_free_mem_rxq(struct qede_dev *edev,
struct qede_rx_queue *rxq)
{
+ qede_free_sge_mem(edev, rxq);
+
/* Free rx buffers */
qede_free_rx_buffers(edev, rxq);
@@ -2010,6 +2323,53 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
return 0;
}
+static int qede_alloc_sge_mem(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ dma_addr_t mapping;
+ int i;
+
+ if (edev->gro_disable)
+ return 0;
+
+ if (edev->ndev->mtu > PAGE_SIZE) {
+ edev->gro_disable = 1;
+ return 0;
+ }
+
+ for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+ struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+
+ replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
+ if (unlikely(!replace_buf->data)) {
+ DP_NOTICE(edev,
+ "Failed to allocate TPA skb pool [replacement buffer]\n");
+ goto err;
+ }
+
+ mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev,
+ "Failed to map TPA replacement buffer\n");
+ goto err;
+ }
+
+ dma_unmap_addr_set(replace_buf, mapping, mapping);
+ tpa_info->replace_buf.page_offset = 0;
+
+ tpa_info->replace_buf_mapping = mapping;
+ tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ }
+
+ return 0;
+err:
+ qede_free_sge_mem(edev, rxq);
+ edev->gro_disable = 1;
+ return -ENOMEM;
+}
+
/* This function allocates all memory needed per Rx queue */
static int qede_alloc_mem_rxq(struct qede_dev *edev,
struct qede_rx_queue *rxq)
@@ -2071,6 +2431,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
num_allocated);
}
+ qede_alloc_sge_mem(edev, rxq);
+
return 0;
err:
@@ -2233,6 +2595,8 @@ static void qede_init_fp(struct qede_dev *edev)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
edev->ndev->name, rss_id);
}
+
+ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
}
static int qede_set_real_num_queues(struct qede_dev *edev)
@@ -2466,11 +2830,12 @@ static int qede_stop_queues(struct qede_dev *edev)
static int qede_start_queues(struct qede_dev *edev)
{
int rc, tc, i;
- int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
+ int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params;
+ struct qed_start_vport_params start = {0};
if (!edev->num_rss) {
DP_ERR(edev,
@@ -2478,10 +2843,13 @@ static int qede_start_queues(struct qede_dev *edev)
return -EINVAL;
}
- rc = edev->ops->vport_start(cdev, vport_id,
- edev->ndev->mtu,
- drop_ttl0_flg,
- vlan_removal_en);
+ start.gro_enable = !edev->gro_disable;
+ start.mtu = edev->ndev->mtu;
+ start.vport_id = 0;
+ start.drop_ttl0 = true;
+ start.remove_inner_vlan = vlan_removal_en;
+
+ rc = edev->ops->vport_start(cdev, &start);
if (rc) {
DP_ERR(edev, "Start V-PORT failed %d\n", rc);
@@ -2490,7 +2858,7 @@ static int qede_start_queues(struct qede_dev *edev)
DP_VERBOSE(edev, NETIF_MSG_IFUP,
"Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
- vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+ start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
for_each_rss(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
@@ -2555,7 +2923,7 @@ static int qede_start_queues(struct qede_dev *edev)
/* Prepare and send the vport enable */
memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = vport_id;
+ vport_update_params.vport_id = start.vport_id;
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 689a4a5c8dcf..1ef03939d25f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
dev->netdev_ops = &qcaspi_netdev_ops;
qcaspi_set_ethtool_ops(dev);
dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
- dev->flags = IFF_MULTICAST;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->tx_queue_len = 100;
qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 537974cfd427..dd2cf3738b73 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4933,8 +4933,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
- RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
- break;
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
@@ -4943,8 +4941,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
- break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
@@ -7730,10 +7726,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
struct rtl8169_counters *counters = tp->counters;
unsigned int start;
- if (netif_running(dev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ if (netif_running(dev) && pm_runtime_active(&pdev->dev))
rtl8169_rx_missed(dev, ioaddr);
do {
@@ -7761,7 +7760,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
* Fetch additonal counter values missing in stats collected by driver
* from tally counters.
*/
- rtl8169_update_counters(dev);
+ if (pm_runtime_active(&pdev->dev))
+ rtl8169_update_counters(dev);
/*
* Subtract values fetched during initalization.
@@ -7774,6 +7774,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
le16_to_cpu(tp->tc_offset.tx_aborted);
+ pm_runtime_put_noidle(&pdev->dev);
+
return stats;
}
@@ -7853,6 +7855,10 @@ static int rtl8169_runtime_suspend(struct device *device)
rtl8169_net_suspend(dev);
+ /* Update counters before going runtime suspend */
+ rtl8169_rx_missed(dev, tp->mmio_addr);
+ rtl8169_update_counters(dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 270c4c9cac7f..4f132cf177cd 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_RENESAS
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
- depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
+ depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
select CRC32
select MII
select MDIO_BITBANG
@@ -32,7 +32,7 @@ config SH_ETH
config RAVB
tristate "Renesas Ethernet AVB support"
depends on HAS_DMA
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select CRC32
select MII
select MDIO_BITBANG
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 88656ceb6e29..8f2c4fb4c724 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1708,7 +1708,6 @@ static int ravb_set_gti(struct net_device *ndev)
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match;
struct ravb_private *priv;
enum ravb_chip_id chip_id;
struct net_device *ndev;
@@ -1740,8 +1739,7 @@ static int ravb_probe(struct platform_device *pdev)
ndev->base_addr = res->start;
ndev->dma = -1;
- match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
- chip_id = (enum ravb_chip_id)match->data;
+ chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
if (chip_id == RCAR_GEN3)
irq = platform_get_irq_byname(pdev, "ch22");
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a2767336b7c5..9c6448915b65 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3044,15 +3044,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp->ether_link_active_low = pd->ether_link_active_low;
/* set cpu data */
- if (id) {
+ if (id)
mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
- } else {
- const struct of_device_id *match;
+ else
+ mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
- match = of_match_device(of_match_ptr(sh_eth_match_table),
- &pdev->dev);
- mdp->cd = (struct sh_eth_cpu_data *)match->data;
- }
mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
if (!mdp->reg_offset) {
dev_err(&pdev->dev, "Unknown register type (%d)\n",
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 2cdb5718ed66..233778911557 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -571,7 +571,7 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
unsigned tc, num_tc;
int rc;
- if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO)
+ if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
num_tc = ntc->tc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index cf28daba4346..b3e669af3005 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -31,8 +31,7 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
- unsigned int txsize = priv->dma_tx_size;
- unsigned int entry = priv->cur_tx % txsize;
+ unsigned int entry = priv->cur_tx;
struct dma_desc *desc = priv->dma_tx + entry;
unsigned int nopaged_len = skb_headlen(skb);
unsigned int bmax;
@@ -50,11 +49,14 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
- priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
+ priv->tx_skbuff_dma[entry].len = bmax;
+ /* do not close the descriptor and do not set own bit */
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
+ 0, false);
while (len != 0) {
priv->tx_skbuff[entry] = NULL;
- entry = (++priv->cur_tx) % txsize;
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
desc = priv->dma_tx + entry;
if (len > bmax) {
@@ -64,9 +66,10 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = bmax;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
- STMMAC_CHAIN_MODE);
- priv->hw->desc->set_tx_owner(desc);
+ STMMAC_CHAIN_MODE, 1,
+ false);
len -= bmax;
i++;
} else {
@@ -76,12 +79,17 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = len;
+ /* last descriptor can be set now */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
- STMMAC_CHAIN_MODE);
- priv->hw->desc->set_tx_owner(desc);
+ STMMAC_CHAIN_MODE, 1,
+ true);
len = 0;
}
}
+
+ priv->cur_tx = entry;
+
return entry;
}
@@ -138,23 +146,24 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = (unsigned int)(priv->dma_rx_phy +
(((priv->dirty_rx) + 1) %
- priv->dma_rx_size) *
+ DMA_RX_SIZE) *
sizeof(struct dma_desc));
}
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+ unsigned int entry = priv->dirty_tx;
- if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc)
+ if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
+ priv->hwts_tx_en)
/* NOTE: Device will overwrite des3 with timestamp value if
* 1588-2002 time stamping is enabled, hence reinitialize it
* to keep explicit chaining in the descriptor.
*/
- p->des3 = (unsigned int)(priv->dma_tx_phy +
- (((priv->dirty_tx + 1) %
- priv->dma_tx_size) *
- sizeof(struct dma_desc)));
+ p->des3 = (unsigned int)((priv->dma_tx_phy +
+ ((priv->dirty_tx + 1) % DMA_TX_SIZE))
+ * sizeof(struct dma_desc));
}
const struct stmmac_mode_ops chain_mode_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1e19c8fd8b82..f96d257308b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -27,6 +27,7 @@
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/module.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -41,6 +42,10 @@
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
+#define DMA_TX_SIZE 512
+#define DMA_RX_SIZE 512
+#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
+
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
@@ -95,7 +100,7 @@ struct stmmac_extra_stats {
unsigned long napi_poll;
unsigned long tx_normal_irq_n;
unsigned long tx_clean;
- unsigned long tx_reset_ic_bit;
+ unsigned long tx_set_ic_bit;
unsigned long irq_receive_pmt_irq_n;
/* MMC info */
unsigned long mmc_tx_irq_n;
@@ -233,10 +238,19 @@ struct stmmac_extra_stats {
/* Rx IPC status */
enum rx_frame_status {
- good_frame = 0,
- discard_frame = 1,
- csum_none = 2,
- llc_snap = 4,
+ good_frame = 0x0,
+ discard_frame = 0x1,
+ csum_none = 0x2,
+ llc_snap = 0x4,
+ dma_own = 0x8,
+};
+
+/* Tx status */
+enum tx_frame_status {
+ tx_done = 0x0,
+ tx_not_ls = 0x1,
+ tx_err = 0x2,
+ tx_dma_own = 0x4,
};
enum dma_irq_status {
@@ -332,17 +346,16 @@ struct stmmac_desc_ops {
/* Invoked by the xmit function to prepare the tx descriptor */
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
- int csum_flag, int mode);
+ bool csum_flag, int mode, bool tx_own,
+ bool ls);
/* Set/get the owner of the descriptor */
void (*set_tx_owner) (struct dma_desc *p);
int (*get_tx_owner) (struct dma_desc *p);
- /* Invoked by the xmit function to close the tx descriptor */
- void (*close_tx_desc) (struct dma_desc *p);
/* Clean the tx descriptor as soon as the tx irq is received */
void (*release_tx_desc) (struct dma_desc *p, int mode);
/* Clear interrupt on tx frame completion. When this bit is
* set an interrupt happens as soon as the frame is transmitted */
- void (*clear_tx_ic) (struct dma_desc *p);
+ void (*set_tx_ic)(struct dma_desc *p);
/* Last tx segment reports the transmit status */
int (*get_tx_ls) (struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
@@ -351,7 +364,6 @@ struct stmmac_desc_ops {
/* Get the buffer size from the descriptor */
int (*get_tx_len) (struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
- int (*get_rx_owner) (struct dma_desc *p);
void (*set_rx_owner) (struct dma_desc *p);
/* Get the receive frame size */
int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
@@ -376,8 +388,11 @@ extern const struct stmmac_desc_ops ndesc_ops;
/* Specific DMA helpers */
struct stmmac_dma_ops {
/* DMA core initialization */
- int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
- int burst_len, u32 dma_tx, u32 dma_rx, int atds);
+ int (*reset)(void __iomem *ioaddr);
+ void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int aal, u32 dma_tx, u32 dma_rx, int atds);
+ /* Configure the AXI Bus Mode Register */
+ void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Set tx/rx threshold in the csr6 register
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 799c2929c536..2e4c171a2b41 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -1,6 +1,6 @@
/*******************************************************************************
- Header File to describe the DMA descriptors.
- Enhanced descriptors have been in case of DWMAC1000 Cores.
+ Header File to describe the DMA descriptors and related definitions.
+ This is for DWMAC100 and 1000 cores.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -24,198 +24,164 @@
#ifndef __DESCS_H__
#define __DESCS_H__
+#include <linux/bitops.h>
+
+/* Normal receive descriptor defines */
+
+/* RDES0 */
+#define RDES0_PAYLOAD_CSUM_ERR BIT(0)
+#define RDES0_CRC_ERROR BIT(1)
+#define RDES0_DRIBBLING BIT(2)
+#define RDES0_MII_ERROR BIT(3)
+#define RDES0_RECEIVE_WATCHDOG BIT(4)
+#define RDES0_FRAME_TYPE BIT(5)
+#define RDES0_COLLISION BIT(6)
+#define RDES0_IPC_CSUM_ERROR BIT(7)
+#define RDES0_LAST_DESCRIPTOR BIT(8)
+#define RDES0_FIRST_DESCRIPTOR BIT(9)
+#define RDES0_VLAN_TAG BIT(10)
+#define RDES0_OVERFLOW_ERROR BIT(11)
+#define RDES0_LENGTH_ERROR BIT(12)
+#define RDES0_SA_FILTER_FAIL BIT(13)
+#define RDES0_DESCRIPTOR_ERROR BIT(14)
+#define RDES0_ERROR_SUMMARY BIT(15)
+#define RDES0_FRAME_LEN_MASK GENMASK(29, 16)
+#define RDES0_FRAME_LEN_SHIFT 16
+#define RDES0_DA_FILTER_FAIL BIT(30)
+#define RDES0_OWN BIT(31)
+ /* RDES1 */
+#define RDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
+#define RDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
+#define RDES1_BUFFER2_SIZE_SHIFT 11
+#define RDES1_SECOND_ADDRESS_CHAINED BIT(24)
+#define RDES1_END_RING BIT(25)
+#define RDES1_DISABLE_IC BIT(31)
+
+/* Enhanced receive descriptor defines */
+
+/* RDES0 (similar to normal RDES) */
+#define ERDES0_RX_MAC_ADDR BIT(0)
+
+/* RDES1: completely differ from normal desc definitions */
+#define ERDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
+#define ERDES1_SECOND_ADDRESS_CHAINED BIT(14)
+#define ERDES1_END_RING BIT(15)
+#define ERDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
+#define ERDES1_BUFFER2_SIZE_SHIFT 16
+#define ERDES1_DISABLE_IC BIT(31)
+
+/* Normal transmit descriptor defines */
+/* TDES0 */
+#define TDES0_DEFERRED BIT(0)
+#define TDES0_UNDERFLOW_ERROR BIT(1)
+#define TDES0_EXCESSIVE_DEFERRAL BIT(2)
+#define TDES0_COLLISION_COUNT_MASK GENMASK(6, 3)
+#define TDES0_VLAN_FRAME BIT(7)
+#define TDES0_EXCESSIVE_COLLISIONS BIT(8)
+#define TDES0_LATE_COLLISION BIT(9)
+#define TDES0_NO_CARRIER BIT(10)
+#define TDES0_LOSS_CARRIER BIT(11)
+#define TDES0_PAYLOAD_ERROR BIT(12)
+#define TDES0_FRAME_FLUSHED BIT(13)
+#define TDES0_JABBER_TIMEOUT BIT(14)
+#define TDES0_ERROR_SUMMARY BIT(15)
+#define TDES0_IP_HEADER_ERROR BIT(16)
+#define TDES0_TIME_STAMP_STATUS BIT(17)
+#define TDES0_OWN BIT(31)
+/* TDES1 */
+#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
+#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
+#define TDES1_BUFFER2_SIZE_SHIFT 11
+#define TDES1_TIME_STAMP_ENABLE BIT(22)
+#define TDES1_DISABLE_PADDING BIT(23)
+#define TDES1_SECOND_ADDRESS_CHAINED BIT(24)
+#define TDES1_END_RING BIT(25)
+#define TDES1_CRC_DISABLE BIT(26)
+#define TDES1_CHECKSUM_INSERTION_MASK GENMASK(28, 27)
+#define TDES1_CHECKSUM_INSERTION_SHIFT 27
+#define TDES1_FIRST_SEGMENT BIT(29)
+#define TDES1_LAST_SEGMENT BIT(30)
+#define TDES1_INTERRUPT BIT(31)
+
+/* Enhanced transmit descriptor defines */
+/* TDES0 */
+#define ETDES0_DEFERRED BIT(0)
+#define ETDES0_UNDERFLOW_ERROR BIT(1)
+#define ETDES0_EXCESSIVE_DEFERRAL BIT(2)
+#define ETDES0_COLLISION_COUNT_MASK GENMASK(6, 3)
+#define ETDES0_VLAN_FRAME BIT(7)
+#define ETDES0_EXCESSIVE_COLLISIONS BIT(8)
+#define ETDES0_LATE_COLLISION BIT(9)
+#define ETDES0_NO_CARRIER BIT(10)
+#define ETDES0_LOSS_CARRIER BIT(11)
+#define ETDES0_PAYLOAD_ERROR BIT(12)
+#define ETDES0_FRAME_FLUSHED BIT(13)
+#define ETDES0_JABBER_TIMEOUT BIT(14)
+#define ETDES0_ERROR_SUMMARY BIT(15)
+#define ETDES0_IP_HEADER_ERROR BIT(16)
+#define ETDES0_TIME_STAMP_STATUS BIT(17)
+#define ETDES0_SECOND_ADDRESS_CHAINED BIT(20)
+#define ETDES0_END_RING BIT(21)
+#define ETDES0_CHECKSUM_INSERTION_MASK GENMASK(23, 22)
+#define ETDES0_CHECKSUM_INSERTION_SHIFT 22
+#define ETDES0_TIME_STAMP_ENABLE BIT(25)
+#define ETDES0_DISABLE_PADDING BIT(26)
+#define ETDES0_CRC_DISABLE BIT(27)
+#define ETDES0_FIRST_SEGMENT BIT(28)
+#define ETDES0_LAST_SEGMENT BIT(29)
+#define ETDES0_INTERRUPT BIT(30)
+#define ETDES0_OWN BIT(31)
+/* TDES1 */
+#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
+#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
+#define ETDES1_BUFFER2_SIZE_SHIFT 16
+
+/* Extended Receive descriptor definitions */
+#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(2, 6)
+#define ERDES4_IP_HDR_ERR BIT(3)
+#define ERDES4_IP_PAYLOAD_ERR BIT(4)
+#define ERDES4_IP_CSUM_BYPASSED BIT(5)
+#define ERDES4_IPV4_PKT_RCVD BIT(6)
+#define ERDES4_IPV6_PKT_RCVD BIT(7)
+#define ERDES4_MSG_TYPE_MASK GENMASK(11, 8)
+#define ERDES4_PTP_FRAME_TYPE BIT(12)
+#define ERDES4_PTP_VER BIT(13)
+#define ERDES4_TIMESTAMP_DROPPED BIT(14)
+#define ERDES4_AV_PKT_RCVD BIT(16)
+#define ERDES4_AV_TAGGED_PKT_RCVD BIT(17)
+#define ERDES4_VLAN_TAG_PRI_VAL_MASK GENMASK(20, 18)
+#define ERDES4_L3_FILTER_MATCH BIT(24)
+#define ERDES4_L4_FILTER_MATCH BIT(25)
+#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
+
+/* Extended RDES4 message type definitions */
+#define RDES_EXT_NO_PTP 0
+#define RDES_EXT_SYNC 1
+#define RDES_EXT_FOLLOW_UP 2
+#define RDES_EXT_DELAY_REQ 3
+#define RDES_EXT_DELAY_RESP 4
+#define RDES_EXT_PDELAY_REQ 5
+#define RDES_EXT_PDELAY_RESP 6
+#define RDES_EXT_PDELAY_FOLLOW_UP 7
+
/* Basic descriptor structure for normal and alternate descriptors */
struct dma_desc {
- /* Receive descriptor */
- union {
- struct {
- /* RDES0 */
- u32 payload_csum_error:1;
- u32 crc_error:1;
- u32 dribbling:1;
- u32 mii_error:1;
- u32 receive_watchdog:1;
- u32 frame_type:1;
- u32 collision:1;
- u32 ipc_csum_error:1;
- u32 last_descriptor:1;
- u32 first_descriptor:1;
- u32 vlan_tag:1;
- u32 overflow_error:1;
- u32 length_error:1;
- u32 sa_filter_fail:1;
- u32 descriptor_error:1;
- u32 error_summary:1;
- u32 frame_length:14;
- u32 da_filter_fail:1;
- u32 own:1;
- /* RDES1 */
- u32 buffer1_size:11;
- u32 buffer2_size:11;
- u32 reserved1:2;
- u32 second_address_chained:1;
- u32 end_ring:1;
- u32 reserved2:5;
- u32 disable_ic:1;
-
- } rx;
- struct {
- /* RDES0 */
- u32 rx_mac_addr:1;
- u32 crc_error:1;
- u32 dribbling:1;
- u32 error_gmii:1;
- u32 receive_watchdog:1;
- u32 frame_type:1;
- u32 late_collision:1;
- u32 ipc_csum_error:1;
- u32 last_descriptor:1;
- u32 first_descriptor:1;
- u32 vlan_tag:1;
- u32 overflow_error:1;
- u32 length_error:1;
- u32 sa_filter_fail:1;
- u32 descriptor_error:1;
- u32 error_summary:1;
- u32 frame_length:14;
- u32 da_filter_fail:1;
- u32 own:1;
- /* RDES1 */
- u32 buffer1_size:13;
- u32 reserved1:1;
- u32 second_address_chained:1;
- u32 end_ring:1;
- u32 buffer2_size:13;
- u32 reserved2:2;
- u32 disable_ic:1;
- } erx; /* -- enhanced -- */
-
- /* Transmit descriptor */
- struct {
- /* TDES0 */
- u32 deferred:1;
- u32 underflow_error:1;
- u32 excessive_deferral:1;
- u32 collision_count:4;
- u32 vlan_frame:1;
- u32 excessive_collisions:1;
- u32 late_collision:1;
- u32 no_carrier:1;
- u32 loss_carrier:1;
- u32 payload_error:1;
- u32 frame_flushed:1;
- u32 jabber_timeout:1;
- u32 error_summary:1;
- u32 ip_header_error:1;
- u32 time_stamp_status:1;
- u32 reserved1:13;
- u32 own:1;
- /* TDES1 */
- u32 buffer1_size:11;
- u32 buffer2_size:11;
- u32 time_stamp_enable:1;
- u32 disable_padding:1;
- u32 second_address_chained:1;
- u32 end_ring:1;
- u32 crc_disable:1;
- u32 checksum_insertion:2;
- u32 first_segment:1;
- u32 last_segment:1;
- u32 interrupt:1;
- } tx;
- struct {
- /* TDES0 */
- u32 deferred:1;
- u32 underflow_error:1;
- u32 excessive_deferral:1;
- u32 collision_count:4;
- u32 vlan_frame:1;
- u32 excessive_collisions:1;
- u32 late_collision:1;
- u32 no_carrier:1;
- u32 loss_carrier:1;
- u32 payload_error:1;
- u32 frame_flushed:1;
- u32 jabber_timeout:1;
- u32 error_summary:1;
- u32 ip_header_error:1;
- u32 time_stamp_status:1;
- u32 reserved1:2;
- u32 second_address_chained:1;
- u32 end_ring:1;
- u32 checksum_insertion:2;
- u32 reserved2:1;
- u32 time_stamp_enable:1;
- u32 disable_padding:1;
- u32 crc_disable:1;
- u32 first_segment:1;
- u32 last_segment:1;
- u32 interrupt:1;
- u32 own:1;
- /* TDES1 */
- u32 buffer1_size:13;
- u32 reserved3:3;
- u32 buffer2_size:13;
- u32 reserved4:3;
- } etx; /* -- enhanced -- */
-
- u64 all_flags;
- } des01;
+ unsigned int des0;
+ unsigned int des1;
unsigned int des2;
unsigned int des3;
};
-/* Extended descriptor structure (supported by new SYNP GMAC generations) */
+/* Extended descriptor structure (e.g. >= databook 3.50a) */
struct dma_extended_desc {
- struct dma_desc basic;
- union {
- struct {
- u32 ip_payload_type:3;
- u32 ip_hdr_err:1;
- u32 ip_payload_err:1;
- u32 ip_csum_bypassed:1;
- u32 ipv4_pkt_rcvd:1;
- u32 ipv6_pkt_rcvd:1;
- u32 msg_type:4;
- u32 ptp_frame_type:1;
- u32 ptp_ver:1;
- u32 timestamp_dropped:1;
- u32 reserved:1;
- u32 av_pkt_rcvd:1;
- u32 av_tagged_pkt_rcvd:1;
- u32 vlan_tag_priority_val:3;
- u32 reserved3:3;
- u32 l3_filter_match:1;
- u32 l4_filter_match:1;
- u32 l3_l4_filter_no_match:2;
- u32 reserved4:4;
- } erx;
- struct {
- u32 reserved;
- } etx;
- } des4;
+ struct dma_desc basic; /* Basic descriptors */
+ unsigned int des4; /* Extended Status */
unsigned int des5; /* Reserved */
unsigned int des6; /* Tx/Rx Timestamp Low */
unsigned int des7; /* Tx/Rx Timestamp High */
};
/* Transmit checksum insertion control */
-enum tdes_csum_insertion {
- cic_disabled = 0, /* Checksum Insertion Control */
- cic_only_ip = 1, /* Only IP header */
- /* IP header but pseudoheader is not calculated */
- cic_no_pseudoheader = 2,
- cic_full = 3, /* IP header and pseudoheader */
-};
-
-/* Extended RDES4 definitions */
-#define RDES_EXT_NO_PTP 0
-#define RDES_EXT_SYNC 0x1
-#define RDES_EXT_FOLLOW_UP 0x2
-#define RDES_EXT_DELAY_REQ 0x3
-#define RDES_EXT_DELAY_RESP 0x4
-#define RDES_EXT_PDELAY_REQ 0x5
-#define RDES_EXT_PDELAY_RESP 0x6
-#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
+#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */
#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 6f2cc78c5cf5..7635a464ce41 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -35,100 +35,91 @@
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
- p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
- if (end)
- p->des01.erx.end_ring = 1;
-}
+ p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK;
-static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
-{
if (end)
- p->des01.etx.end_ring = 1;
+ p->des1 |= ERDES1_END_RING;
}
-static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
+static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
{
- p->des01.etx.end_ring = ter;
+ if (end)
+ p->des0 |= ETDES0_END_RING;
+ else
+ p->des0 &= ~ETDES0_END_RING;
}
static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
- p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT)
+ & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
+ & ETDES1_BUFFER1_SIZE_MASK);
} else
- p->des01.etx.buffer1_size = len;
+ p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
}
/* Normal descriptors */
static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
{
- p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
- if (end)
- p->des01.rx.end_ring = 1;
-}
+ p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK;
-static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
-{
if (end)
- p->des01.tx.end_ring = 1;
+ p->des1 |= RDES1_END_RING;
}
-static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
+static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
{
- p->des01.tx.end_ring = ter;
+ if (end)
+ p->des1 |= TDES1_END_RING;
+ else
+ p->des1 &= ~TDES1_END_RING;
}
static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_2KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
- p->des01.etx.buffer2_size = len - p->des01.etx.buffer1_size;
+ unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
+ & TDES1_BUFFER1_SIZE_MASK;
+ p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT)
+ & TDES1_BUFFER2_SIZE_MASK) | buffer1);
} else
- p->des01.tx.buffer1_size = len;
+ p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK);
}
/* Specific functions used for Chain mode */
/* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
-{
- p->des01.erx.second_address_chained = 1;
-}
-
-static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
{
- p->des01.etx.second_address_chained = 1;
+ p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED;
}
-static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
+static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
{
- p->des01.etx.second_address_chained = 1;
+ p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED;
}
static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
- p->des01.etx.buffer1_size = len;
+ p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
}
/* Normal descriptors */
static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
{
- p->des01.rx.second_address_chained = 1;
-}
-
-static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int ring_size)
-{
- p->des01.tx.second_address_chained = 1;
+ p->des1 |= RDES1_SECOND_ADDRESS_CHAINED;
}
-static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
+static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
{
- p->des01.tx.second_address_chained = 1;
+ p->des1 |= TDES1_SECOND_ADDRESS_CHAINED;
}
static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
- p->des01.tx.buffer1_size = len;
+ p->des1 |= len & TDES1_BUFFER1_SIZE_MASK;
}
#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 2ec6aeae349e..1657acfa70c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -95,7 +95,6 @@
#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
-#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
#define DMA_BUS_MODE_DEFAULT 0x00000000
/* DMA Control register defines */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 8831a053ac13..b0593a4268ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -221,7 +221,6 @@ enum inter_frame_gap {
/*--- DMA BLOCK defines ---*/
/* DMA Bus Mode register defines */
-#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
@@ -241,7 +240,7 @@ enum rx_tx_priority_ratio {
#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
#define DMA_BUS_MODE_RPBL_SHIFT 17
#define DMA_BUS_MODE_USP 0x00800000
-#define DMA_BUS_MODE_PBL 0x01000000
+#define DMA_BUS_MODE_MAXPBL 0x01000000
#define DMA_BUS_MODE_AAL 0x02000000
/* DMA CRS Control and Status Register Mapping */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 0e8937c1184a..da32d6037e3e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,37 +30,76 @@
#include "dwmac1000.h"
#include "dwmac_dma.h"
-static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
- int burst_len, u32 dma_tx, u32 dma_rx, int atds)
+static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
- int limit;
+ u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
+ int i;
- /* DMA SW reset */
- value |= DMA_BUS_MODE_SFT_RESET;
- writel(value, ioaddr + DMA_BUS_MODE);
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ pr_info("dwmac1000: Master AXI performs %s burst length\n",
+ !(value & DMA_AXI_UNDEF) ? "fixed" : "any");
+
+ if (axi->axi_lpi_en)
+ value |= DMA_AXI_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= DMA_AXI_LPI_XIT_FRM;
+
+ value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
+ DMA_AXI_WR_OSR_LMT_SHIFT;
+
+ value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
+ DMA_AXI_RD_OSR_LMT_SHIFT;
+
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set).
+ */
+ for (i = 0; i < AXI_BLEN; i++) {
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= DMA_AXI_BLEN256;
break;
- mdelay(10);
+ case 128:
+ value |= DMA_AXI_BLEN128;
+ break;
+ case 64:
+ value |= DMA_AXI_BLEN64;
+ break;
+ case 32:
+ value |= DMA_AXI_BLEN32;
+ break;
+ case 16:
+ value |= DMA_AXI_BLEN16;
+ break;
+ case 8:
+ value |= DMA_AXI_BLEN8;
+ break;
+ case 4:
+ value |= DMA_AXI_BLEN4;
+ break;
+ }
}
- if (limit < 0)
- return -EBUSY;
+
+ writel(value, ioaddr + DMA_AXI_BUS_MODE);
+}
+
+static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int aal, u32 dma_tx, u32 dma_rx, int atds)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
/*
- * Set the DMA PBL (Programmable Burst Length) mode
- * Before stmmac core 3.50 this mode bit was 4xPBL, and
+ * Set the DMA PBL (Programmable Burst Length) mode.
+ *
+ * Note: before stmmac core 3.50 this mode bit was 4xPBL, and
* post 3.5 mode bit acts as 8*PBL.
- * For core rev < 3.5, when the core is set for 4xPBL mode, the
- * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats
- * depending on pbl value.
- * For core rev > 3.5, when the core is set for 8xPBL mode, the
- * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats
- * depending on pbl value.
+ *
+ * This configuration doesn't take care about the Separate PBL
+ * so only the bits: 13-8 are programmed with the PBL passed from the
+ * platform.
*/
- value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
- (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+ value |= DMA_BUS_MODE_MAXPBL;
+ value &= ~DMA_BUS_MODE_PBL_MASK;
+ value |= (pbl << DMA_BUS_MODE_PBL_SHIFT);
/* Set the Fixed burst mode */
if (fb)
@@ -73,26 +112,10 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
if (atds)
value |= DMA_BUS_MODE_ATDS;
- writel(value, ioaddr + DMA_BUS_MODE);
+ if (aal)
+ value |= DMA_BUS_MODE_AAL;
- /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
- * for supported bursts.
- *
- * Note: This is applicable only for revision GMACv3.61a. For
- * older version this register is reserved and shall have no
- * effect.
- *
- * Note:
- * For Fixed Burst Mode: if we directly write 0xFF to this
- * register using the configurations pass from platform code,
- * this would ensure that all bursts supported by core are set
- * and those which are not supported would remain ineffective.
- *
- * For Non Fixed Burst Mode: provide the maximum value of the
- * burst length. Any burst equal or below the provided burst
- * length would be allowed to perform.
- */
- writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
+ writel(value, ioaddr + DMA_BUS_MODE);
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
@@ -102,8 +125,6 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
*/
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
-
- return 0;
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
@@ -205,7 +226,9 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
}
const struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .reset = dwmac_dma_reset,
.init = dwmac1000_dma_init,
+ .axi = dwmac1000_dma_axi,
.dump_regs = dwmac1000_dump_dma_regs,
.dma_mode = dwmac1000_dma_operation_mode,
.enable_dma_transmission = dwmac_enable_dma_transmission,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 9d0971c1c2ee..61f54c99a7de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,24 +32,9 @@
#include "dwmac100.h"
#include "dwmac_dma.h"
-static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
- int burst_len, u32 dma_tx, u32 dma_rx, int atds)
+static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int aal, u32 dma_tx, u32 dma_rx, int atds)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
- int limit;
-
- /* DMA SW reset */
- value |= DMA_BUS_MODE_SFT_RESET;
- writel(value, ioaddr + DMA_BUS_MODE);
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
ioaddr + DMA_BUS_MODE);
@@ -62,8 +47,6 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
*/
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
-
- return 0;
}
/* Store and Forward capability is not used at all.
@@ -131,6 +114,7 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
}
const struct stmmac_dma_ops dwmac100_dma_ops = {
+ .reset = dwmac_dma_reset,
.init = dwmac100_dma_init,
.dump_regs = dwmac100_dump_dma_regs,
.dma_mode = dwmac100_dma_operation_mode,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index def266da55db..726d9d9aaf83 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -35,10 +35,46 @@
#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+
+/* SW Reset */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+
/* Rx watchdog register */
#define DMA_RX_WATCHDOG 0x00001024
-/* AXI Bus Mode */
+
+/* AXI Master Bus Mode */
#define DMA_AXI_BUS_MODE 0x00001028
+
+#define DMA_AXI_EN_LPI BIT(31)
+#define DMA_AXI_LPI_XIT_FRM BIT(30)
+#define DMA_AXI_WR_OSR_LMT GENMASK(23, 20)
+#define DMA_AXI_WR_OSR_LMT_SHIFT 20
+#define DMA_AXI_WR_OSR_LMT_MASK 0xf
+#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
+#define DMA_AXI_RD_OSR_LMT_SHIFT 16
+#define DMA_AXI_RD_OSR_LMT_MASK 0xf
+
+#define DMA_AXI_OSR_MAX 0xf
+#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
+ (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
+#define DMA_AXI_1KBBE BIT(13)
+#define DMA_AXI_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
+ DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
+ DMA_AXI_BLEN4)
+
+#define DMA_AXI_UNDEF BIT(0)
+
+#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
@@ -112,5 +148,6 @@ void dwmac_dma_stop_tx(void __iomem *ioaddr);
void dwmac_dma_start_rx(void __iomem *ioaddr);
void dwmac_dma_stop_rx(void __iomem *ioaddr);
int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+int dwmac_dma_reset(void __iomem *ioaddr);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 484e3cf9c414..84e3e84cec7d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -26,6 +26,27 @@
#define GMAC_HI_REG_AE 0x80000000
+int dwmac_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ mdelay(10);
+ }
+
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
/* CSR1 enables the transmit DMA to check for new descriptor */
void dwmac_enable_dma_transmission(void __iomem *ioaddr)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7d944449f5ef..cfb018c7c5eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
This contains the functions to handle the enhanced descriptors.
- Copyright (C) 2007-2009 STMicroelectronics Ltd
+ Copyright (C) 2007-2014 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,56 +29,64 @@
static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes0 = p->des0;
+ int ret = tx_done;
- if (unlikely(p->des01.etx.error_summary)) {
- if (unlikely(p->des01.etx.jabber_timeout))
+ /* Get tx owner first */
+ if (unlikely(tdes0 & ETDES0_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
+ return tx_not_ls;
+
+ if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
+ if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
x->tx_jabber++;
- if (unlikely(p->des01.etx.frame_flushed)) {
+ if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
x->tx_frame_flushed++;
dwmac_dma_flush_tx_fifo(ioaddr);
}
- if (unlikely(p->des01.etx.loss_carrier)) {
+ if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
x->tx_losscarrier++;
stats->tx_carrier_errors++;
}
- if (unlikely(p->des01.etx.no_carrier)) {
+ if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
x->tx_carrier++;
stats->tx_carrier_errors++;
}
- if (unlikely(p->des01.etx.late_collision))
- stats->collisions += p->des01.etx.collision_count;
-
- if (unlikely(p->des01.etx.excessive_collisions))
- stats->collisions += p->des01.etx.collision_count;
+ if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
+ (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
+ stats->collisions +=
+ (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
- if (unlikely(p->des01.etx.excessive_deferral))
+ if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
- if (unlikely(p->des01.etx.underflow_error)) {
+ if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
dwmac_dma_flush_tx_fifo(ioaddr);
x->tx_underflow++;
}
- if (unlikely(p->des01.etx.ip_header_error))
+ if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
x->tx_ip_header_error++;
- if (unlikely(p->des01.etx.payload_error)) {
+ if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
x->tx_payload_error++;
dwmac_dma_flush_tx_fifo(ioaddr);
}
- ret = -1;
+ ret = tx_err;
}
- if (unlikely(p->des01.etx.deferred))
+ if (unlikely(tdes0 & ETDES0_DEFERRED))
x->tx_deferred++;
#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.etx.vlan_frame)
+ if (tdes0 & ETDES0_VLAN_FRAME)
x->tx_vlan++;
#endif
@@ -87,7 +95,7 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
static int enh_desc_get_tx_len(struct dma_desc *p)
{
- return p->des01.etx.buffer1_size;
+ return (p->des1 & ETDES1_BUFFER1_SIZE_MASK);
}
static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
@@ -126,50 +134,55 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
struct dma_extended_desc *p)
{
- if (unlikely(p->basic.des01.erx.rx_mac_addr)) {
- if (p->des4.erx.ip_hdr_err)
+ unsigned int rdes0 = p->basic.des0;
+ unsigned int rdes4 = p->des4;
+
+ if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
+ int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
+
+ if (rdes4 & ERDES4_IP_HDR_ERR)
x->ip_hdr_err++;
- if (p->des4.erx.ip_payload_err)
+ if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
x->ip_payload_err++;
- if (p->des4.erx.ip_csum_bypassed)
+ if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
x->ip_csum_bypassed++;
- if (p->des4.erx.ipv4_pkt_rcvd)
+ if (rdes4 & ERDES4_IPV4_PKT_RCVD)
x->ipv4_pkt_rcvd++;
- if (p->des4.erx.ipv6_pkt_rcvd)
+ if (rdes4 & ERDES4_IPV6_PKT_RCVD)
x->ipv6_pkt_rcvd++;
- if (p->des4.erx.msg_type == RDES_EXT_SYNC)
+ if (message_type == RDES_EXT_SYNC)
x->rx_msg_type_sync++;
- else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP)
+ else if (message_type == RDES_EXT_FOLLOW_UP)
x->rx_msg_type_follow_up++;
- else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ else if (message_type == RDES_EXT_DELAY_REQ)
x->rx_msg_type_delay_req++;
- else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
+ else if (message_type == RDES_EXT_DELAY_RESP)
x->rx_msg_type_delay_resp++;
- else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
+ else if (message_type == RDES_EXT_PDELAY_REQ)
x->rx_msg_type_pdelay_req++;
- else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
+ else if (message_type == RDES_EXT_PDELAY_RESP)
x->rx_msg_type_pdelay_resp++;
- else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
x->rx_msg_type_pdelay_follow_up++;
else
x->rx_msg_type_ext_no_ptp++;
- if (p->des4.erx.ptp_frame_type)
+ if (rdes4 & ERDES4_PTP_FRAME_TYPE)
x->ptp_frame_type++;
- if (p->des4.erx.ptp_ver)
+ if (rdes4 & ERDES4_PTP_VER)
x->ptp_ver++;
- if (p->des4.erx.timestamp_dropped)
+ if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
x->timestamp_dropped++;
- if (p->des4.erx.av_pkt_rcvd)
+ if (rdes4 & ERDES4_AV_PKT_RCVD)
x->av_pkt_rcvd++;
- if (p->des4.erx.av_tagged_pkt_rcvd)
+ if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
x->av_tagged_pkt_rcvd++;
- if (p->des4.erx.vlan_tag_priority_val)
+ if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
x->vlan_tag_priority_val++;
- if (p->des4.erx.l3_filter_match)
+ if (rdes4 & ERDES4_L3_FILTER_MATCH)
x->l3_filter_match++;
- if (p->des4.erx.l4_filter_match)
+ if (rdes4 & ERDES4_L4_FILTER_MATCH)
x->l4_filter_match++;
- if (p->des4.erx.l3_l4_filter_no_match)
+ if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
x->l3_l4_filter_no_match++;
}
}
@@ -177,30 +190,33 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- int ret = good_frame;
struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int rdes0 = p->des0;
+ int ret = good_frame;
+
+ if (unlikely(rdes0 & RDES0_OWN))
+ return dma_own;
- if (unlikely(p->des01.erx.error_summary)) {
- if (unlikely(p->des01.erx.descriptor_error)) {
+ if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
+ if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
stats->rx_length_errors++;
}
- if (unlikely(p->des01.erx.overflow_error))
+ if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
x->rx_gmac_overflow++;
- if (unlikely(p->des01.erx.ipc_csum_error))
+ if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
pr_err("\tIPC Csum Error/Giant frame\n");
- if (unlikely(p->des01.erx.late_collision)) {
+ if (unlikely(rdes0 & RDES0_COLLISION))
stats->collisions++;
- }
- if (unlikely(p->des01.erx.receive_watchdog))
+ if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
x->rx_watchdog++;
- if (unlikely(p->des01.erx.error_gmii))
+ if (unlikely(rdes0 & RDES0_MII_ERROR)) /* GMII */
x->rx_mii++;
- if (unlikely(p->des01.erx.crc_error)) {
+ if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
x->rx_crc++;
stats->rx_crc_errors++;
}
@@ -211,26 +227,27 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
- p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
+ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+ !!(rdes0 & RDES0_FRAME_TYPE),
+ !!(rdes0 & ERDES0_RX_MAC_ADDR));
- if (unlikely(p->des01.erx.dribbling))
+ if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
- if (unlikely(p->des01.erx.sa_filter_fail)) {
+ if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
x->sa_rx_filter_fail++;
ret = discard_frame;
}
- if (unlikely(p->des01.erx.da_filter_fail)) {
+ if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
x->da_rx_filter_fail++;
ret = discard_frame;
}
- if (unlikely(p->des01.erx.length_error)) {
+ if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
x->rx_length++;
ret = discard_frame;
}
#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.erx.vlan_tag)
+ if (rdes0 & RDES0_VLAN_TAG)
x->rx_vlan++;
#endif
@@ -240,110 +257,125 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
- p->des01.all_flags = 0;
- p->des01.erx.own = 1;
- p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ p->des0 |= RDES0_OWN;
+ p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
- ehn_desc_rx_set_on_chain(p, end);
+ ehn_desc_rx_set_on_chain(p);
else
ehn_desc_rx_set_on_ring(p, end);
if (disable_rx_ic)
- p->des01.erx.disable_ic = 1;
+ p->des1 |= ERDES1_DISABLE_IC;
}
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des01.all_flags = 0;
+ p->des0 &= ~ETDES0_OWN;
if (mode == STMMAC_CHAIN_MODE)
- ehn_desc_tx_set_on_chain(p, end);
+ enh_desc_end_tx_desc_on_chain(p);
else
- ehn_desc_tx_set_on_ring(p, end);
+ enh_desc_end_tx_desc_on_ring(p, end);
}
static int enh_desc_get_tx_owner(struct dma_desc *p)
{
- return p->des01.etx.own;
-}
-
-static int enh_desc_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.erx.own;
+ return (p->des0 & ETDES0_OWN) >> 31;
}
static void enh_desc_set_tx_owner(struct dma_desc *p)
{
- p->des01.etx.own = 1;
+ p->des0 |= ETDES0_OWN;
}
static void enh_desc_set_rx_owner(struct dma_desc *p)
{
- p->des01.erx.own = 1;
+ p->des0 |= RDES0_OWN;
}
static int enh_desc_get_tx_ls(struct dma_desc *p)
{
- return p->des01.etx.last_segment;
+ return (p->des0 & ETDES0_LAST_SEGMENT) >> 29;
}
static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
{
- int ter = p->des01.etx.end_ring;
+ int ter = (p->des0 & ETDES0_END_RING) >> 21;
memset(p, 0, offsetof(struct dma_desc, des2));
if (mode == STMMAC_CHAIN_MODE)
- enh_desc_end_tx_desc_on_chain(p, ter);
+ enh_desc_end_tx_desc_on_chain(p);
else
enh_desc_end_tx_desc_on_ring(p, ter);
}
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag, int mode)
+ bool csum_flag, int mode, bool tx_own,
+ bool ls)
{
- p->des01.etx.first_segment = is_fs;
+ unsigned int tdes0 = p->des0;
if (mode == STMMAC_CHAIN_MODE)
enh_set_tx_desc_len_on_chain(p, len);
else
enh_set_tx_desc_len_on_ring(p, len);
+ if (is_fs)
+ tdes0 |= ETDES0_FIRST_SEGMENT;
+ else
+ tdes0 &= ~ETDES0_FIRST_SEGMENT;
+
if (likely(csum_flag))
- p->des01.etx.checksum_insertion = cic_full;
-}
+ tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
+ else
+ tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
-static void enh_desc_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.etx.interrupt = 0;
+ if (ls)
+ tdes0 |= ETDES0_LAST_SEGMENT;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes0 |= ETDES0_OWN;
+
+ if (is_fs & tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ wmb();
+
+ p->des0 = tdes0;
}
-static void enh_desc_close_tx_desc(struct dma_desc *p)
+static void enh_desc_set_tx_ic(struct dma_desc *p)
{
- p->des01.etx.last_segment = 1;
- p->des01.etx.interrupt = 1;
+ p->des0 |= ETDES0_INTERRUPT;
}
static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
{
+ unsigned int csum = 0;
/* The type-1 checksum offload engines append the checksum at
* the end of frame and the two bytes of checksum are added in
* the length.
* Adjust for that in the framelen for type-1 checksum offload
- * engines. */
+ * engines.
+ */
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
- return p->des01.erx.frame_length - 2;
- else
- return p->des01.erx.frame_length;
+ csum = 2;
+
+ return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
+ csum);
}
static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
{
- p->des01.etx.time_stamp_enable = 1;
+ p->des0 |= ETDES0_TIME_STAMP_ENABLE;
}
static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
{
- return p->des01.etx.time_stamp_status;
+ return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17;
}
static u64 enh_desc_get_timestamp(void *desc, u32 ats)
@@ -368,7 +400,7 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
{
if (ats) {
struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
- return p->basic.des01.erx.ipc_csum_error;
+ return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7;
} else {
struct dma_desc *p = (struct dma_desc *)desc;
if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
@@ -386,11 +418,9 @@ const struct stmmac_desc_ops enh_desc_ops = {
.init_rx_desc = enh_desc_init_rx_desc,
.init_tx_desc = enh_desc_init_tx_desc,
.get_tx_owner = enh_desc_get_tx_owner,
- .get_rx_owner = enh_desc_get_rx_owner,
.release_tx_desc = enh_desc_release_tx_desc,
.prepare_tx_desc = enh_desc_prepare_tx_desc,
- .clear_tx_ic = enh_desc_clear_tx_ic,
- .close_tx_desc = enh_desc_close_tx_desc,
+ .set_tx_ic = enh_desc_set_tx_ic,
.get_tx_ls = enh_desc_get_tx_ls,
.set_tx_owner = enh_desc_set_tx_owner,
.set_rx_owner = enh_desc_set_rx_owner,
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 48c3456445b2..e13228f115f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -29,33 +29,47 @@
static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes0 = p->des0;
+ unsigned int tdes1 = p->des1;
+ int ret = tx_done;
- if (unlikely(p->des01.tx.error_summary)) {
- if (unlikely(p->des01.tx.underflow_error)) {
+ /* Get tx owner first */
+ if (unlikely(tdes0 & TDES0_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes1 & TDES1_LAST_SEGMENT)))
+ return tx_not_ls;
+
+ if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
+ if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
x->tx_underflow++;
stats->tx_fifo_errors++;
}
- if (unlikely(p->des01.tx.no_carrier)) {
+ if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
x->tx_carrier++;
stats->tx_carrier_errors++;
}
- if (unlikely(p->des01.tx.loss_carrier)) {
+ if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
x->tx_losscarrier++;
stats->tx_carrier_errors++;
}
- if (unlikely((p->des01.tx.excessive_deferral) ||
- (p->des01.tx.excessive_collisions) ||
- (p->des01.tx.late_collision)))
- stats->collisions += p->des01.tx.collision_count;
- ret = -1;
+ if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
+ (tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
+ (tdes0 & TDES0_LATE_COLLISION))) {
+ unsigned int collisions;
+
+ collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
+ stats->collisions += collisions;
+ }
+ ret = tx_err;
}
- if (p->des01.etx.vlan_frame)
+ if (tdes0 & TDES0_VLAN_FRAME)
x->tx_vlan++;
- if (unlikely(p->des01.tx.deferred))
+ if (unlikely(tdes0 & TDES0_DEFERRED))
x->tx_deferred++;
return ret;
@@ -63,7 +77,7 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
static int ndesc_get_tx_len(struct dma_desc *p)
{
- return p->des01.tx.buffer1_size;
+ return (p->des1 & RDES1_BUFFER1_SIZE_MASK);
}
/* This function verifies if each incoming frame has some errors
@@ -74,47 +88,51 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = good_frame;
+ unsigned int rdes0 = p->des0;
struct net_device_stats *stats = (struct net_device_stats *)data;
- if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ if (unlikely(rdes0 & RDES0_OWN))
+ return dma_own;
+
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
pr_warn("%s: Oversized frame spanned multiple buffers\n",
__func__);
stats->rx_length_errors++;
return discard_frame;
}
- if (unlikely(p->des01.rx.error_summary)) {
- if (unlikely(p->des01.rx.descriptor_error))
+ if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
+ if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
x->rx_desc++;
- if (unlikely(p->des01.rx.sa_filter_fail))
+ if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
x->sa_filter_fail++;
- if (unlikely(p->des01.rx.overflow_error))
+ if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
x->overflow_error++;
- if (unlikely(p->des01.rx.ipc_csum_error))
+ if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
x->ipc_csum_error++;
- if (unlikely(p->des01.rx.collision)) {
+ if (unlikely(rdes0 & RDES0_COLLISION)) {
x->rx_collision++;
stats->collisions++;
}
- if (unlikely(p->des01.rx.crc_error)) {
+ if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
x->rx_crc++;
stats->rx_crc_errors++;
}
ret = discard_frame;
}
- if (unlikely(p->des01.rx.dribbling))
+ if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
- if (unlikely(p->des01.rx.length_error)) {
+ if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
x->rx_length++;
ret = discard_frame;
}
- if (unlikely(p->des01.rx.mii_error)) {
+ if (unlikely(rdes0 & RDES0_MII_ERROR)) {
x->rx_mii++;
ret = discard_frame;
}
#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.rx.vlan_tag)
+ if (rdes0 & RDES0_VLAN_TAG)
x->vlan_tag++;
#endif
return ret;
@@ -123,9 +141,8 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
int end)
{
- p->des01.all_flags = 0;
- p->des01.rx.own = 1;
- p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ p->des0 |= RDES0_OWN;
+ p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK;
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
@@ -133,99 +150,110 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
ndesc_rx_set_on_ring(p, end);
if (disable_rx_ic)
- p->des01.rx.disable_ic = 1;
+ p->des1 |= RDES1_DISABLE_IC;
}
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des01.all_flags = 0;
+ p->des0 &= ~TDES0_OWN;
if (mode == STMMAC_CHAIN_MODE)
- ndesc_tx_set_on_chain(p, end);
+ ndesc_tx_set_on_chain(p);
else
- ndesc_tx_set_on_ring(p, end);
+ ndesc_end_tx_desc_on_ring(p, end);
}
static int ndesc_get_tx_owner(struct dma_desc *p)
{
- return p->des01.tx.own;
-}
-
-static int ndesc_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.rx.own;
+ return (p->des0 & TDES0_OWN) >> 31;
}
static void ndesc_set_tx_owner(struct dma_desc *p)
{
- p->des01.tx.own = 1;
+ p->des0 |= TDES0_OWN;
}
static void ndesc_set_rx_owner(struct dma_desc *p)
{
- p->des01.rx.own = 1;
+ p->des0 |= RDES0_OWN;
}
static int ndesc_get_tx_ls(struct dma_desc *p)
{
- return p->des01.tx.last_segment;
+ return (p->des1 & TDES1_LAST_SEGMENT) >> 30;
}
static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
{
- int ter = p->des01.tx.end_ring;
+ int ter = (p->des1 & TDES1_END_RING) >> 25;
memset(p, 0, offsetof(struct dma_desc, des2));
if (mode == STMMAC_CHAIN_MODE)
- ndesc_end_tx_desc_on_chain(p, ter);
+ ndesc_tx_set_on_chain(p);
else
ndesc_end_tx_desc_on_ring(p, ter);
}
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag, int mode)
+ bool csum_flag, int mode, bool tx_own,
+ bool ls)
{
- p->des01.tx.first_segment = is_fs;
+ unsigned int tdes1 = p->des1;
+
if (mode == STMMAC_CHAIN_MODE)
norm_set_tx_desc_len_on_chain(p, len);
else
norm_set_tx_desc_len_on_ring(p, len);
+ if (is_fs)
+ tdes1 |= TDES1_FIRST_SEGMENT;
+ else
+ tdes1 &= ~TDES1_FIRST_SEGMENT;
+
if (likely(csum_flag))
- p->des01.tx.checksum_insertion = cic_full;
-}
+ tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
+ else
+ tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
-static void ndesc_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.tx.interrupt = 0;
+ if (ls)
+ tdes1 |= TDES1_LAST_SEGMENT;
+
+ if (tx_own)
+ tdes1 |= TDES0_OWN;
+
+ p->des1 = tdes1;
}
-static void ndesc_close_tx_desc(struct dma_desc *p)
+static void ndesc_set_tx_ic(struct dma_desc *p)
{
- p->des01.tx.last_segment = 1;
- p->des01.tx.interrupt = 1;
+ p->des1 |= TDES1_INTERRUPT;
}
static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
{
+ unsigned int csum = 0;
+
/* The type-1 checksum offload engines append the checksum at
* the end of frame and the two bytes of checksum are added in
* the length.
* Adjust for that in the framelen for type-1 checksum offload
- * engines. */
+ * engines
+ */
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
- return p->des01.rx.frame_length - 2;
- else
- return p->des01.rx.frame_length;
+ csum = 2;
+
+ return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
+ csum);
+
}
static void ndesc_enable_tx_timestamp(struct dma_desc *p)
{
- p->des01.tx.time_stamp_enable = 1;
+ p->des1 |= TDES1_TIME_STAMP_ENABLE;
}
static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
{
- return p->des01.tx.time_stamp_status;
+ return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17;
}
static u64 ndesc_get_timestamp(void *desc, u32 ats)
@@ -258,11 +286,9 @@ const struct stmmac_desc_ops ndesc_ops = {
.init_rx_desc = ndesc_init_rx_desc,
.init_tx_desc = ndesc_init_tx_desc,
.get_tx_owner = ndesc_get_tx_owner,
- .get_rx_owner = ndesc_get_rx_owner,
.release_tx_desc = ndesc_release_tx_desc,
.prepare_tx_desc = ndesc_prepare_tx_desc,
- .clear_tx_ic = ndesc_clear_tx_ic,
- .close_tx_desc = ndesc_close_tx_desc,
+ .set_tx_ic = ndesc_set_tx_ic,
.get_tx_ls = ndesc_get_tx_ls,
.set_tx_owner = ndesc_set_tx_owner,
.set_rx_owner = ndesc_set_rx_owner,
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 5dd50c6cda5b..7723b5d2499a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -31,8 +31,7 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
- unsigned int txsize = priv->dma_tx_size;
- unsigned int entry = priv->cur_tx % txsize;
+ unsigned int entry = priv->cur_tx;
struct dma_desc *desc;
unsigned int nopaged_len = skb_headlen(skb);
unsigned int bmax, len;
@@ -57,12 +56,14 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = bmax;
+ priv->tx_skbuff_dma[entry].is_jumbo = true;
+
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
- STMMAC_RING_MODE);
- wmb();
+ STMMAC_RING_MODE, 0, false);
priv->tx_skbuff[entry] = NULL;
- entry = (++priv->cur_tx) % txsize;
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (priv->extend_desc)
desc = (struct dma_desc *)(priv->dma_etx + entry);
@@ -74,22 +75,27 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = len;
+ priv->tx_skbuff_dma[entry].is_jumbo = true;
+
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
- STMMAC_RING_MODE);
- wmb();
- priv->hw->desc->set_tx_owner(desc);
+ STMMAC_RING_MODE, 1, true);
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = nopaged_len;
+ priv->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
- STMMAC_RING_MODE);
+ STMMAC_RING_MODE, 0, true);
}
+ priv->cur_tx = entry;
+
return entry;
}
@@ -120,7 +126,13 @@ static void stmmac_init_desc3(struct dma_desc *p)
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
- if (unlikely(p->des3))
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+ unsigned int entry = priv->dirty_tx;
+
+ /* des3 is only used for jumbo frames tx or time stamping */
+ if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
+ (priv->tx_skbuff_dma[entry].last_segment &&
+ !priv->extend_desc && priv->hwts_tx_en)))
p->des3 = 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 1f3b33a6c6a8..8bbab97895fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,7 +24,7 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "March_2013"
+#define DRV_MODULE_VERSION "Oct_2015"
#include <linux/clk.h>
#include <linux/stmmac.h>
@@ -45,6 +45,9 @@ struct stmmac_resources {
struct stmmac_tx_info {
dma_addr_t buf;
bool map_as_page;
+ unsigned len;
+ bool last_segment;
+ bool is_jumbo;
};
struct stmmac_priv {
@@ -54,7 +57,6 @@ struct stmmac_priv {
struct sk_buff **tx_skbuff;
unsigned int cur_tx;
unsigned int dirty_tx;
- unsigned int dma_tx_size;
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
@@ -71,8 +73,9 @@ struct stmmac_priv {
struct sk_buff **rx_skbuff;
unsigned int cur_rx;
unsigned int dirty_rx;
- unsigned int dma_rx_size;
unsigned int dma_buf_sz;
+ unsigned int rx_copybreak;
+ unsigned int rx_zeroc_thresh;
u32 rx_riwt;
int hwts_rx_en;
dma_addr_t *rx_skbuff_dma;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 4c6486cc80fb..3c7928edfebb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -97,7 +97,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(napi_poll),
STMMAC_STAT(tx_normal_irq_n),
STMMAC_STAT(tx_clean),
- STMMAC_STAT(tx_reset_ic_bit),
+ STMMAC_STAT(tx_set_ic_bit),
STMMAC_STAT(irq_receive_pmt_irq_n),
/* MMC info */
STMMAC_STAT(mmc_tx_irq_n),
@@ -781,6 +781,43 @@ static int stmmac_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
}
+static int stmmac_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int stmmac_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -803,6 +840,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
+ .get_tunable = stmmac_get_tunable,
+ .set_tunable = stmmac_set_tunable,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c21015b68097..4c5ce9848ca9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -71,15 +71,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
-#define DMA_TX_SIZE 256
-static int dma_txsize = DMA_TX_SIZE;
-module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
-
-#define DMA_RX_SIZE 256
-static int dma_rxsize = DMA_RX_SIZE;
-module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
+#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
+#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
static int flow_ctrl = FLOW_OFF;
module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
@@ -99,6 +92,8 @@ static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+#define STMMAC_RX_COPYBREAK 256
+
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
@@ -134,10 +129,6 @@ static void stmmac_verify_args(void)
{
if (unlikely(watchdog < 0))
watchdog = TX_TIMEO;
- if (unlikely(dma_rxsize < 0))
- dma_rxsize = DMA_RX_SIZE;
- if (unlikely(dma_txsize < 0))
- dma_txsize = DMA_TX_SIZE;
if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
buf_sz = DEFAULT_BUFSIZE;
if (unlikely(flow_ctrl > 1))
@@ -197,12 +188,28 @@ static void print_pkt(unsigned char *buf, int len)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
}
-/* minimum number of free TX descriptors required to wake up TX process */
-#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
-
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
{
- return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
+ unsigned avail;
+
+ if (priv->dirty_tx > priv->cur_tx)
+ avail = priv->dirty_tx - priv->cur_tx - 1;
+ else
+ avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
+
+ return avail;
+}
+
+static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
+{
+ unsigned dirty;
+
+ if (priv->dirty_rx <= priv->cur_rx)
+ dirty = priv->cur_rx - priv->dirty_rx;
+ else
+ dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
+
+ return dirty;
}
/**
@@ -862,6 +869,12 @@ static int stmmac_init_phy(struct net_device *dev)
phy_disconnect(phydev);
return -ENODEV;
}
+
+ /* If attached to a switch, there is no reason to poll phy handler */
+ if (priv->plat->phy_bus_name)
+ if (!strcmp(priv->plat->phy_bus_name, "fixed"))
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+
pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
" Link = %d\n", dev->name, phydev->phy_id, phydev->link);
@@ -906,19 +919,16 @@ static void stmmac_display_ring(void *head, int size, int extend_desc)
static void stmmac_display_rings(struct stmmac_priv *priv)
{
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
-
if (priv->extend_desc) {
pr_info("Extended RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
+ stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1);
pr_info("Extended TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
+ stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
} else {
pr_info("RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
+ stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0);
pr_info("TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+ stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
}
}
@@ -947,28 +957,26 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
static void stmmac_clear_descriptors(struct stmmac_priv *priv)
{
int i;
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
/* Clear the Rx/Tx descriptors */
- for (i = 0; i < rxsize; i++)
+ for (i = 0; i < DMA_RX_SIZE; i++)
if (priv->extend_desc)
priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == rxsize - 1));
+ (i == DMA_RX_SIZE - 1));
else
priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == rxsize - 1));
- for (i = 0; i < txsize; i++)
+ (i == DMA_RX_SIZE - 1));
+ for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
priv->mode,
- (i == txsize - 1));
+ (i == DMA_TX_SIZE - 1));
else
priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
priv->mode,
- (i == txsize - 1));
+ (i == DMA_TX_SIZE - 1));
}
/**
@@ -1031,8 +1039,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
unsigned int bfsize = 0;
int ret = -ENOMEM;
@@ -1044,10 +1050,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
priv->dma_buf_sz = bfsize;
- if (netif_msg_probe(priv))
- pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
- txsize, rxsize, bfsize);
-
if (netif_msg_probe(priv)) {
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1055,7 +1057,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
/* RX INITIALIZATION */
pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
}
- for (i = 0; i < rxsize; i++) {
+ for (i = 0; i < DMA_RX_SIZE; i++) {
struct dma_desc *p;
if (priv->extend_desc)
p = &((priv->dma_erx + i)->basic);
@@ -1072,26 +1074,26 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
(unsigned int)priv->rx_skbuff_dma[i]);
}
priv->cur_rx = 0;
- priv->dirty_rx = (unsigned int)(i - rxsize);
+ priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
buf_sz = bfsize;
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc) {
priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
- rxsize, 1);
+ DMA_RX_SIZE, 1);
priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
- txsize, 1);
+ DMA_TX_SIZE, 1);
} else {
priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
- rxsize, 0);
+ DMA_RX_SIZE, 0);
priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
- txsize, 0);
+ DMA_TX_SIZE, 0);
}
}
/* TX INITIALIZATION */
- for (i = 0; i < txsize; i++) {
+ for (i = 0; i < DMA_TX_SIZE; i++) {
struct dma_desc *p;
if (priv->extend_desc)
p = &((priv->dma_etx + i)->basic);
@@ -1100,6 +1102,8 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
p->des2 = 0;
priv->tx_skbuff_dma[i].buf = 0;
priv->tx_skbuff_dma[i].map_as_page = false;
+ priv->tx_skbuff_dma[i].len = 0;
+ priv->tx_skbuff_dma[i].last_segment = false;
priv->tx_skbuff[i] = NULL;
}
@@ -1123,7 +1127,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv)
{
int i;
- for (i = 0; i < priv->dma_rx_size; i++)
+ for (i = 0; i < DMA_RX_SIZE; i++)
stmmac_free_rx_buffers(priv, i);
}
@@ -1131,7 +1135,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
{
int i;
- for (i = 0; i < priv->dma_tx_size; i++) {
+ for (i = 0; i < DMA_TX_SIZE; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1143,12 +1147,12 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
if (priv->tx_skbuff_dma[i].map_as_page)
dma_unmap_page(priv->device,
priv->tx_skbuff_dma[i].buf,
- priv->hw->desc->get_tx_len(p),
+ priv->tx_skbuff_dma[i].len,
DMA_TO_DEVICE);
else
dma_unmap_single(priv->device,
priv->tx_skbuff_dma[i].buf,
- priv->hw->desc->get_tx_len(p),
+ priv->tx_skbuff_dma[i].len,
DMA_TO_DEVICE);
}
@@ -1171,33 +1175,31 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
*/
static int alloc_dma_desc_resources(struct stmmac_priv *priv)
{
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
int ret = -ENOMEM;
- priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
GFP_KERNEL);
if (!priv->rx_skbuff_dma)
return -ENOMEM;
- priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->rx_skbuff)
goto err_rx_skbuff;
- priv->tx_skbuff_dma = kmalloc_array(txsize,
+ priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
sizeof(*priv->tx_skbuff_dma),
GFP_KERNEL);
if (!priv->tx_skbuff_dma)
goto err_tx_skbuff_dma;
- priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skbuff)
goto err_tx_skbuff;
if (priv->extend_desc) {
- priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
+ priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct
dma_extended_desc),
&priv->dma_rx_phy,
@@ -1205,31 +1207,31 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
if (!priv->dma_erx)
goto err_dma;
- priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
+ priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
sizeof(struct
dma_extended_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
if (!priv->dma_etx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct dma_extended_desc),
priv->dma_erx, priv->dma_rx_phy);
goto err_dma;
}
} else {
- priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
+ priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
if (!priv->dma_rx)
goto err_dma;
- priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
+ priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
sizeof(struct dma_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
if (!priv->dma_tx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct dma_desc),
priv->dma_rx, priv->dma_rx_phy);
goto err_dma;
@@ -1258,16 +1260,16 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc) {
dma_free_coherent(priv->device,
- priv->dma_tx_size * sizeof(struct dma_desc),
+ DMA_TX_SIZE * sizeof(struct dma_desc),
priv->dma_tx, priv->dma_tx_phy);
dma_free_coherent(priv->device,
- priv->dma_rx_size * sizeof(struct dma_desc),
+ DMA_RX_SIZE * sizeof(struct dma_desc),
priv->dma_rx, priv->dma_rx_phy);
} else {
- dma_free_coherent(priv->device, priv->dma_tx_size *
+ dma_free_coherent(priv->device, DMA_TX_SIZE *
sizeof(struct dma_extended_desc),
priv->dma_etx, priv->dma_tx_phy);
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct dma_extended_desc),
priv->dma_erx, priv->dma_rx_phy);
}
@@ -1312,62 +1314,59 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
*/
static void stmmac_tx_clean(struct stmmac_priv *priv)
{
- unsigned int txsize = priv->dma_tx_size;
unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int entry = priv->dirty_tx;
spin_lock(&priv->tx_lock);
priv->xstats.tx_clean++;
- while (priv->dirty_tx != priv->cur_tx) {
- int last;
- unsigned int entry = priv->dirty_tx % txsize;
+ while (entry != priv->cur_tx) {
struct sk_buff *skb = priv->tx_skbuff[entry];
struct dma_desc *p;
+ int status;
if (priv->extend_desc)
p = (struct dma_desc *)(priv->dma_etx + entry);
else
p = priv->dma_tx + entry;
- /* Check if the descriptor is owned by the DMA. */
- if (priv->hw->desc->get_tx_owner(p))
- break;
-
- /* Verify tx error by looking at the last segment. */
- last = priv->hw->desc->get_tx_ls(p);
- if (likely(last)) {
- int tx_error =
- priv->hw->desc->tx_status(&priv->dev->stats,
+ status = priv->hw->desc->tx_status(&priv->dev->stats,
&priv->xstats, p,
priv->ioaddr);
- if (likely(tx_error == 0)) {
+ /* Check if the descriptor is owned by the DMA */
+ if (unlikely(status & tx_dma_own))
+ break;
+
+ /* Just consider the last segment and ...*/
+ if (likely(!(status & tx_not_ls))) {
+ /* ... verify the status error condition */
+ if (unlikely(status & tx_err)) {
+ priv->dev->stats.tx_errors++;
+ } else {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
- } else
- priv->dev->stats.tx_errors++;
-
+ }
stmmac_get_tx_hwtstamp(priv, entry, skb);
}
- if (netif_msg_tx_done(priv))
- pr_debug("%s: curr %d, dirty %d\n", __func__,
- priv->cur_tx, priv->dirty_tx);
if (likely(priv->tx_skbuff_dma[entry].buf)) {
if (priv->tx_skbuff_dma[entry].map_as_page)
dma_unmap_page(priv->device,
priv->tx_skbuff_dma[entry].buf,
- priv->hw->desc->get_tx_len(p),
+ priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
else
dma_unmap_single(priv->device,
priv->tx_skbuff_dma[entry].buf,
- priv->hw->desc->get_tx_len(p),
+ priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry].buf = 0;
priv->tx_skbuff_dma[entry].map_as_page = false;
}
priv->hw->mode->clean_desc3(priv, p);
+ priv->tx_skbuff_dma[entry].last_segment = false;
+ priv->tx_skbuff_dma[entry].is_jumbo = false;
if (likely(skb != NULL)) {
pkts_compl++;
@@ -1378,16 +1377,17 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->hw->desc->release_tx_desc(p, priv->mode);
- priv->dirty_tx++;
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
}
+ priv->dirty_tx = entry;
netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
if (unlikely(netif_queue_stopped(priv->dev) &&
- stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
netif_tx_lock(priv->dev);
if (netif_queue_stopped(priv->dev) &&
- stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
if (netif_msg_tx_done(priv))
pr_debug("%s: restart transmit\n", __func__);
netif_wake_queue(priv->dev);
@@ -1421,20 +1421,19 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
static void stmmac_tx_err(struct stmmac_priv *priv)
{
int i;
- int txsize = priv->dma_tx_size;
netif_stop_queue(priv->dev);
priv->hw->dma->stop_tx(priv->ioaddr);
dma_free_tx_skbufs(priv);
- for (i = 0; i < txsize; i++)
+ for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
priv->mode,
- (i == txsize - 1));
+ (i == DMA_TX_SIZE - 1));
else
priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
priv->mode,
- (i == txsize - 1));
+ (i == DMA_TX_SIZE - 1));
priv->dirty_tx = 0;
priv->cur_tx = 0;
netdev_reset_queue(priv->dev);
@@ -1635,23 +1634,35 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
*/
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
- int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
+ int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0;
int mixed_burst = 0;
int atds = 0;
+ int ret = 0;
if (priv->plat->dma_cfg) {
pbl = priv->plat->dma_cfg->pbl;
fixed_burst = priv->plat->dma_cfg->fixed_burst;
mixed_burst = priv->plat->dma_cfg->mixed_burst;
- burst_len = priv->plat->dma_cfg->burst_len;
+ aal = priv->plat->dma_cfg->aal;
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
atds = 1;
- return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
- burst_len, priv->dma_tx_phy,
- priv->dma_rx_phy, atds);
+ ret = priv->hw->dma->reset(priv->ioaddr);
+ if (ret) {
+ dev_err(priv->device, "Failed to reset the dma\n");
+ return ret;
+ }
+
+ priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
+ aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
+
+ if ((priv->synopsys_id >= DWMAC_CORE_3_50) &&
+ (priv->plat->axi && priv->hw->dma->axi))
+ priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
+
+ return ret;
}
/**
@@ -1799,10 +1810,8 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- /* Create and initialize the TX/RX descriptors chains. */
- priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
- priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+ priv->rx_copybreak = STMMAC_RX_COPYBREAK;
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
@@ -1943,13 +1952,12 @@ static int stmmac_release(struct net_device *dev)
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned int txsize = priv->dma_tx_size;
- int entry;
+ unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int entry, first_entry;
struct dma_desc *desc, *first;
- unsigned int nopaged_len = skb_headlen(skb);
- unsigned int enh_desc = priv->plat->enh_desc;
+ unsigned int enh_desc;
spin_lock(&priv->tx_lock);
@@ -1966,31 +1974,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
- entry = priv->cur_tx % txsize;
+ entry = priv->cur_tx;
+ first_entry = entry;
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
- if (priv->extend_desc)
+ if (likely(priv->extend_desc))
desc = (struct dma_desc *)(priv->dma_etx + entry);
else
desc = priv->dma_tx + entry;
first = desc;
+ priv->tx_skbuff[first_entry] = skb;
+
+ enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
- if (likely(!is_jumbo)) {
- desc->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
- goto dma_map_err;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
- priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion, priv->mode);
- } else {
- desc = first;
+ if (unlikely(is_jumbo)) {
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
if (unlikely(entry < 0))
goto dma_map_err;
@@ -1999,10 +2002,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
+ bool last_segment = (i == (nfrags - 1));
- priv->tx_skbuff[entry] = NULL;
- entry = (++priv->cur_tx) % txsize;
- if (priv->extend_desc)
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+
+ if (likely(priv->extend_desc))
desc = (struct dma_desc *)(priv->dma_etx + entry);
else
desc = priv->dma_tx + entry;
@@ -2012,53 +2016,37 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, desc->des2))
goto dma_map_err; /* should reuse desc w/o issues */
+ priv->tx_skbuff[entry] = NULL;
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].map_as_page = true;
+ priv->tx_skbuff_dma[entry].len = len;
+ priv->tx_skbuff_dma[entry].last_segment = last_segment;
+
+ /* Prepare the descriptor and set the own bit too */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
- priv->mode);
- wmb();
- priv->hw->desc->set_tx_owner(desc);
- wmb();
+ priv->mode, 1, last_segment);
}
- priv->tx_skbuff[entry] = skb;
-
- /* Finalize the latest segment. */
- priv->hw->desc->close_tx_desc(desc);
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
- wmb();
- /* According to the coalesce parameter the IC bit for the latest
- * segment could be reset and the timer re-started to invoke the
- * stmmac_tx function. This approach takes care about the fragments.
- */
- priv->tx_count_frames += nfrags + 1;
- if (priv->tx_coal_frames > priv->tx_count_frames) {
- priv->hw->desc->clear_tx_ic(desc);
- priv->xstats.tx_reset_ic_bit++;
- mod_timer(&priv->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer));
- } else
- priv->tx_count_frames = 0;
-
- /* To avoid raise condition */
- priv->hw->desc->set_tx_owner(first);
- wmb();
-
- priv->cur_tx++;
+ priv->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
- pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
- __func__, (priv->cur_tx % txsize),
- (priv->dirty_tx % txsize), entry, first, nfrags);
+ pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
+ __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+ entry, first, nfrags);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
+ stmmac_display_ring((void *)priv->dma_etx,
+ DMA_TX_SIZE, 1);
else
- stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+ stmmac_display_ring((void *)priv->dma_tx,
+ DMA_TX_SIZE, 0);
pr_debug(">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
}
+
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
if (netif_msg_hw(priv))
pr_debug("%s: stop transmitted packets\n", __func__);
@@ -2067,16 +2055,59 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- /* declare that device is doing timestamping */
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- priv->hw->desc->enable_tx_timestamp(first);
+ /* According to the coalesce parameter the IC bit for the latest
+ * segment is reset and the timer re-started to clean the tx status.
+ * This approach takes care about the fragments: desc is the first
+ * element in case of no SG.
+ */
+ priv->tx_count_frames += nfrags + 1;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ mod_timer(&priv->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ } else {
+ priv->tx_count_frames = 0;
+ priv->hw->desc->set_tx_ic(desc);
+ priv->xstats.tx_set_ic_bit++;
}
if (!priv->hwts_tx_en)
skb_tx_timestamp(skb);
+ /* Ready to fill the first descriptor and set the OWN bit w/o any
+ * problems because all the descriptors are actually ready to be
+ * passed to the DMA engine.
+ */
+ if (likely(!is_jumbo)) {
+ bool last_segment = (nfrags == 0);
+
+ first->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, first->des2))
+ goto dma_map_err;
+
+ priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ priv->tx_skbuff_dma[first_entry].len = nopaged_len;
+ priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->enable_tx_timestamp(first);
+ }
+
+ /* Prepare the first descriptor setting the OWN bit too */
+ priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
+ csum_insertion, priv->mode, 1,
+ last_segment);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ smp_wmb();
+ }
+
netdev_sent_queue(dev, skb->len);
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
@@ -2108,6 +2139,14 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
}
+static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
+{
+ if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
+ return 0;
+
+ return 1;
+}
+
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
@@ -2116,11 +2155,11 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static inline void stmmac_rx_refill(struct stmmac_priv *priv)
{
- unsigned int rxsize = priv->dma_rx_size;
int bfsize = priv->dma_buf_sz;
+ unsigned int entry = priv->dirty_rx;
+ int dirty = stmmac_rx_dirty(priv);
- for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
- unsigned int entry = priv->dirty_rx % rxsize;
+ while (dirty-- > 0) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -2132,9 +2171,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
-
- if (unlikely(skb == NULL))
+ if (unlikely(!skb)) {
+ /* so for a while no zero-copy! */
+ priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
+ if (unlikely(net_ratelimit()))
+ dev_err(priv->device,
+ "fail to alloc skb entry %d\n",
+ entry);
break;
+ }
priv->rx_skbuff[entry] = skb;
priv->rx_skbuff_dma[entry] =
@@ -2150,13 +2195,20 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
priv->hw->mode->refill_desc3(priv, p);
+ if (priv->rx_zeroc_thresh > 0)
+ priv->rx_zeroc_thresh--;
+
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
}
+
wmb();
priv->hw->desc->set_rx_owner(p);
wmb();
+
+ entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
+ priv->dirty_rx = entry;
}
/**
@@ -2168,8 +2220,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit)
{
- unsigned int rxsize = priv->dma_rx_size;
- unsigned int entry = priv->cur_rx % rxsize;
+ unsigned int entry = priv->cur_rx;
unsigned int next_entry;
unsigned int count = 0;
int coe = priv->hw->rx_csum;
@@ -2177,9 +2228,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) {
pr_debug("%s: descriptor ring:\n", __func__);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
+ stmmac_display_ring((void *)priv->dma_erx,
+ DMA_RX_SIZE, 1);
else
- stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
+ stmmac_display_ring((void *)priv->dma_rx,
+ DMA_RX_SIZE, 0);
}
while (count < limit) {
int status;
@@ -2190,20 +2243,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
else
p = priv->dma_rx + entry;
- if (priv->hw->desc->get_rx_owner(p))
+ /* read the status of the incoming frame */
+ status = priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p);
+ /* check if managed by the DMA otherwise go ahead */
+ if (unlikely(status & dma_own))
break;
count++;
- next_entry = (++priv->cur_rx) % rxsize;
+ priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
+ next_entry = priv->cur_rx;
+
if (priv->extend_desc)
prefetch(priv->dma_erx + next_entry);
else
prefetch(priv->dma_rx + next_entry);
- /* read the status of the incoming frame */
- status = priv->hw->desc->rx_status(&priv->dev->stats,
- &priv->xstats, p);
if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
priv->hw->desc->rx_extended_status(&priv->dev->stats,
&priv->xstats,
@@ -2248,23 +2304,54 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
pr_debug("\tframe size %d, COE: %d\n",
frame_len, status);
}
- skb = priv->rx_skbuff[entry];
- if (unlikely(!skb)) {
- pr_err("%s: Inconsistent Rx descriptor chain\n",
- priv->dev->name);
- priv->dev->stats.rx_dropped++;
- break;
+
+ if (unlikely((frame_len < priv->rx_copybreak) ||
+ stmmac_rx_threshold_count(priv))) {
+ skb = netdev_alloc_skb_ip_align(priv->dev,
+ frame_len);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ dev_warn(priv->device,
+ "packet dropped\n");
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+
+ dma_sync_single_for_cpu(priv->device,
+ priv->rx_skbuff_dma
+ [entry], frame_len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb,
+ priv->
+ rx_skbuff[entry]->data,
+ frame_len);
+
+ skb_put(skb, frame_len);
+ dma_sync_single_for_device(priv->device,
+ priv->rx_skbuff_dma
+ [entry], frame_len,
+ DMA_FROM_DEVICE);
+ } else {
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ pr_err("%s: Inconsistent Rx chain\n",
+ priv->dev->name);
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+ prefetch(skb->data - NET_IP_ALIGN);
+ priv->rx_skbuff[entry] = NULL;
+ priv->rx_zeroc_thresh++;
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device,
+ priv->rx_skbuff_dma[entry],
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
}
- prefetch(skb->data - NET_IP_ALIGN);
- priv->rx_skbuff[entry] = NULL;
stmmac_get_rx_hwtstamp(priv, entry, skb);
- skb_put(skb, frame_len);
- dma_unmap_single(priv->device,
- priv->rx_skbuff_dma[entry],
- priv->dma_buf_sz, DMA_FROM_DEVICE);
-
if (netif_msg_pktdata(priv)) {
pr_debug("frame received (%dbytes)", frame_len);
print_pkt(skb->data, frame_len);
@@ -2555,19 +2642,17 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
if (priv->extend_desc) {
seq_printf(seq, "Extended RX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
+ sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
seq_printf(seq, "Extended TX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
+ sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
} else {
seq_printf(seq, "RX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
+ sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
seq_printf(seq, "TX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
+ sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
}
return 0;
@@ -3137,12 +3222,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "phyaddr:", 8)) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "dma_txsize:", 11)) {
- if (kstrtoint(opt + 11, 0, &dma_txsize))
- goto err;
- } else if (!strncmp(opt, "dma_rxsize:", 11)) {
- if (kstrtoint(opt + 11, 0, &dma_rxsize))
- goto err;
} else if (!strncmp(opt, "buf_sz:", 7)) {
if (kstrtoint(opt + 7, 0, &buf_sz))
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0faf16336035..efb54f356a67 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -199,21 +199,12 @@ int stmmac_mdio_register(struct net_device *ndev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
int addr, found;
- struct device_node *mdio_node = NULL;
- struct device_node *child_node = NULL;
+ struct device_node *mdio_node = priv->plat->mdio_node;
if (!mdio_bus_data)
return 0;
if (IS_ENABLED(CONFIG_OF)) {
- for_each_child_of_node(priv->device->of_node, child_node) {
- if (of_device_is_compatible(child_node,
- "snps,dwmac-mdio")) {
- mdio_node = child_node;
- break;
- }
- }
-
if (mdio_node) {
netdev_dbg(ndev, "FOUND MDIO subnode\n");
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index d71a721ea61c..ae4388735b7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -81,7 +81,7 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
plat->mdio_bus_data->phy_mask = 0;
plat->dma_cfg->pbl = 32;
- plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
+ /* TODO: AXI */
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -115,8 +115,8 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
plat->mdio_bus_data->phy_mask = 0;
plat->dma_cfg->pbl = 16;
- plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
plat->dma_cfg->fixed_burst = 1;
+ /* AXI (TODO) */
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6a52fa18cbf2..dcbd2a1601e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -96,6 +96,42 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
}
/**
+ * stmmac_axi_setup - parse DT parameters for programming the AXI register
+ * @pdev: platform device
+ * @priv: driver private struct.
+ * Description:
+ * if required, from device-tree the AXI internal register can be tuned
+ * by using platform parameters.
+ */
+static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct stmmac_axi *axi;
+
+ np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
+ if (!np)
+ return NULL;
+
+ axi = kzalloc(sizeof(*axi), GFP_KERNEL);
+ if (!axi)
+ return ERR_PTR(-ENOMEM);
+
+ axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
+ axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
+ axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
+ axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all");
+ axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
+ axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
+ axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
+
+ of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt);
+ of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt);
+ of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+
+ return axi;
+}
+
+/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
* @plat: driver data platform structure
@@ -110,6 +146,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ struct device_node *child_node = NULL;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -140,13 +177,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->phy_node = of_node_get(np);
}
+ for_each_child_of_node(np, child_node)
+ if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
+ plat->mdio_node = child_node;
+ break;
+ }
+
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
*/
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name)
+ if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
plat->mdio_bus_data = NULL;
else
plat->mdio_bus_data =
@@ -216,13 +259,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
}
plat->dma_cfg = dma_cfg;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
+ dma_cfg->aal = of_property_read_bool(np, "snps,aal");
dma_cfg->fixed_burst =
of_property_read_bool(np, "snps,fixed-burst");
dma_cfg->mixed_burst =
of_property_read_bool(np, "snps,mixed-burst");
- of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len);
- if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256)
- dma_cfg->burst_len = 0;
}
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
if (plat->force_thresh_dma_mode) {
@@ -230,6 +271,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
}
+ plat->axi = stmmac_axi_setup(pdev);
+
return plat;
}
#else
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index fc8bbff2d7e3..af11ed1e0bcc 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -426,7 +426,7 @@
#define DWC_MMC_RXOCTETCOUNT_GB 0x0784
#define DWC_MMC_RXPACKETCOUNT_GB 0x0780
-static int debug = 3;
+static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
@@ -650,6 +650,11 @@ struct net_local {
u32 mmc_tx_counters_mask;
struct dwceqos_flowcontrol flowcontrol;
+
+ /* Tracks the intermediate state of phy started but hardware
+ * init not finished yet.
+ */
+ bool phy_defer;
};
static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
@@ -901,6 +906,9 @@ static void dwceqos_adjust_link(struct net_device *ndev)
struct phy_device *phydev = lp->phy_dev;
int status_change = 0;
+ if (lp->phy_defer)
+ return;
+
if (phydev->link) {
if ((lp->speed != phydev->speed) ||
(lp->duplex != phydev->duplex)) {
@@ -1113,7 +1121,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
/* Allocate DMA descriptors */
size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
- &lp->rx_descs_addr, 0);
+ &lp->rx_descs_addr, GFP_KERNEL);
if (!lp->rx_descs)
goto err_out;
lp->rx_descs_tail_addr = lp->rx_descs_addr +
@@ -1121,7 +1129,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
- &lp->tx_descs_addr, 0);
+ &lp->tx_descs_addr, GFP_KERNEL);
if (!lp->tx_descs)
goto err_out;
lp->tx_descs_tail_addr = lp->tx_descs_addr +
@@ -1635,6 +1643,12 @@ static void dwceqos_init_hw(struct net_local *lp)
regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+ lp->phy_defer = false;
+ mutex_lock(&lp->phy_dev->lock);
+ phy_read_status(lp->phy_dev);
+ dwceqos_adjust_link(lp->ndev);
+ mutex_unlock(&lp->phy_dev->lock);
}
static void dwceqos_tx_reclaim(unsigned long data)
@@ -1880,9 +1894,13 @@ static int dwceqos_open(struct net_device *ndev)
}
netdev_reset_queue(ndev);
+ /* The dwceqos reset state machine requires all phy clocks to complete,
+ * hence the unusual init order with phy_start first.
+ */
+ lp->phy_defer = true;
+ phy_start(lp->phy_dev);
dwceqos_init_hw(lp);
napi_enable(&lp->napi);
- phy_start(lp->phy_dev);
netif_start_queue(ndev);
tasklet_enable(&lp->tx_bdreclaim_tasklet);
@@ -1915,18 +1933,19 @@ static int dwceqos_stop(struct net_device *ndev)
{
struct net_local *lp = netdev_priv(ndev);
- phy_stop(lp->phy_dev);
-
tasklet_disable(&lp->tx_bdreclaim_tasklet);
- netif_stop_queue(ndev);
napi_disable(&lp->napi);
- dwceqos_drain_dma(lp);
+ /* Stop all tx before we drain the tx dma. */
+ netif_tx_lock_bh(lp->ndev);
+ netif_stop_queue(ndev);
+ netif_tx_unlock_bh(lp->ndev);
- netif_tx_lock(lp->ndev);
+ dwceqos_drain_dma(lp);
dwceqos_reset_hw(lp);
+ phy_stop(lp->phy_dev);
+
dwceqos_descriptor_free(lp);
- netif_tx_unlock(lp->ndev);
return 0;
}
@@ -2178,12 +2197,10 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
((trans.initial_descriptor + trans.nr_descriptors) %
DWCEQOS_TX_DCNT));
- dwceqos_tx_finalize(skb, lp, &trans);
-
- netdev_sent_queue(ndev, skb->len);
-
spin_lock_bh(&lp->tx_lock);
lp->tx_free -= trans.nr_descriptors;
+ dwceqos_tx_finalize(skb, lp, &trans);
+ netdev_sent_queue(ndev, skb->len);
spin_unlock_bh(&lp->tx_lock);
ndev->trans_start = jiffies;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index ed0c30f590d4..1d0942c53120 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1860,7 +1860,7 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
/* setup tc must be called under rtnl lock */
ASSERT_RTNL();
- if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
/* Sanity-check the number of traffic classes requested */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index bc5da357e16d..6a0cbbe03e5d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -775,10 +775,10 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct flowi4 *fl4,
struct ip_tunnel_info *info)
{
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
struct dst_cache *dst_cache;
struct rtable *rt = NULL;
- bool use_cache = true;
__u8 tos;
memset(fl4, 0, sizeof(*fl4));
@@ -804,7 +804,6 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
dst_cache = &geneve->dst_cache;
}
- use_cache = use_cache && !skb->mark;
if (use_cache) {
rt = dst_cache_get_ip4(dst_cache, &fl4->saddr);
if (rt)
@@ -832,11 +831,11 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
struct flowi6 *fl6,
struct ip_tunnel_info *info)
{
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
struct dst_cache *dst_cache;
- bool use_cache = true;
__u8 prio;
memset(fl6, 0, sizeof(*fl6));
@@ -862,7 +861,6 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
dst_cache = &geneve->dst_cache;
}
- use_cache = use_cache && !skb->mark;
if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
if (dst)
@@ -940,7 +938,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
u8 vni[3];
tunnel_id_to_vni(key->tun_id, vni);
- if (key->tun_flags & TUNNEL_GENEVE_OPT)
+ if (info->options_len)
opts = ip_tunnel_info_opts(info);
if (key->tun_flags & TUNNEL_CSUM)
@@ -1027,7 +1025,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
u8 vni[3];
tunnel_id_to_vni(key->tun_id, vni);
- if (key->tun_flags & TUNNEL_GENEVE_OPT)
+ if (info->options_len)
opts = ip_tunnel_info_opts(info);
if (key->tun_flags & TUNNEL_CSUM)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index fcb92c0d0eb9..b4c68783dfc3 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -658,6 +658,10 @@ struct net_device_context {
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
+
+ /* Ethtool settings */
+ u8 duplex;
+ u32 speed;
};
/* Per netvsc device */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2b6595e24f43..08608499fa17 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -799,6 +799,58 @@ static int netvsc_set_channels(struct net_device *net,
goto do_set;
}
+static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
+{
+ struct ethtool_cmd diff1 = *cmd;
+ struct ethtool_cmd diff2 = {};
+
+ ethtool_cmd_speed_set(&diff1, 0);
+ diff1.duplex = 0;
+ /* advertising and cmd are usually set */
+ diff1.advertising = 0;
+ diff1.cmd = 0;
+ /* We set port to PORT_OTHER */
+ diff2.port = PORT_OTHER;
+
+ return !memcmp(&diff1, &diff2, sizeof(diff1));
+}
+
+static void netvsc_init_settings(struct net_device *dev)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+
+ ndc->speed = SPEED_UNKNOWN;
+ ndc->duplex = DUPLEX_UNKNOWN;
+}
+
+static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+
+ ethtool_cmd_speed_set(cmd, ndc->speed);
+ cmd->duplex = ndc->duplex;
+ cmd->port = PORT_OTHER;
+
+ return 0;
+}
+
+static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ u32 speed;
+
+ speed = ethtool_cmd_speed(cmd);
+ if (!ethtool_validate_speed(speed) ||
+ !ethtool_validate_duplex(cmd->duplex) ||
+ !netvsc_validate_ethtool_ss_cmd(cmd))
+ return -EINVAL;
+
+ ndc->speed = speed;
+ ndc->duplex = cmd->duplex;
+
+ return 0;
+}
+
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
@@ -923,6 +975,8 @@ static const struct ethtool_ops ethtool_ops = {
.get_channels = netvsc_get_channels,
.set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_settings = netvsc_get_settings,
+ .set_settings = netvsc_set_settings,
};
static const struct net_device_ops device_ops = {
@@ -1115,6 +1169,8 @@ static int netvsc_probe(struct hv_device *dev,
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
+ netvsc_init_settings(net);
+
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index a37bbda37ffa..47d07c576a34 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1175,22 +1175,18 @@ int rndis_filter_device_add(struct hv_device *dev,
ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
/*
- * Wait for the host to send us the sub-channel offers.
+ * Set the number of sub-channels to be received.
*/
spin_lock_irqsave(&net_device->sc_lock, flags);
sc_delta = num_rss_qs - (net_device->num_chn - 1);
net_device->num_sc_offered -= sc_delta;
spin_unlock_irqrestore(&net_device->sc_lock, flags);
- while (net_device->num_sc_offered != 0) {
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ);
- if (t == 0)
- WARN(1, "Netvsc: Waiting for sub-channel processing");
- }
out:
if (ret) {
net_device->max_chn = 1;
net_device->num_chn = 1;
+ net_device->num_sc_offered = 0;
}
return 0; /* return 0 because primary channel can be used alone */
@@ -1204,6 +1200,17 @@ void rndis_filter_device_remove(struct hv_device *dev)
{
struct netvsc_device *net_dev = hv_get_drvdata(dev);
struct rndis_device *rndis_dev = net_dev->extension;
+ unsigned long t;
+
+ /* If not all subchannel offers are complete, wait for them until
+ * completion to avoid race.
+ */
+ while (net_dev->num_sc_offered > 0) {
+ t = wait_for_completion_timeout(&net_dev->channel_init_wait,
+ 10 * HZ);
+ if (t == 0)
+ WARN(1, "Netvsc: Waiting for sub-channel processing");
+ }
/* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 0fbbba7a0cae..cb9e9fe6d77a 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -343,16 +343,26 @@ static const struct regmap_config at86rf230_regmap_spi_config = {
};
static void
-at86rf230_async_error_recover(void *context)
+at86rf230_async_error_recover_complete(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- lp->is_tx = 0;
- at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
- ieee802154_wake_queue(lp->hw);
if (ctx->free)
kfree(ctx);
+
+ ieee802154_wake_queue(lp->hw);
+}
+
+static void
+at86rf230_async_error_recover(void *context)
+{
+ struct at86rf230_state_change *ctx = context;
+ struct at86rf230_local *lp = ctx->lp;
+
+ lp->is_tx = 0;
+ at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
+ at86rf230_async_error_recover_complete);
}
static inline void
@@ -892,14 +902,12 @@ at86rf230_xmit_start(void *context)
struct at86rf230_local *lp = ctx->lp;
/* check if we change from off state */
- if (lp->is_tx_from_off) {
- lp->is_tx_from_off = false;
+ if (lp->is_tx_from_off)
at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
at86rf230_write_frame);
- } else {
+ else
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
at86rf230_xmit_tx_on);
- }
}
static int
@@ -923,6 +931,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
at86rf230_xmit_start);
} else {
+ lp->is_tx_from_off = false;
at86rf230_xmit_start(ctx);
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 4cdf51638972..764a2bddfaee 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -310,6 +310,7 @@ mrf24j40_short_reg_writeable(struct device *dev, unsigned int reg)
case REG_TRISGPIO:
case REG_GPIO:
case REG_RFCTL:
+ case REG_SECCR2:
case REG_SLPACK:
case REG_BBREG0:
case REG_BBREG1:
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 48219c83fb00..4516c8a4fd82 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -297,6 +297,17 @@ static int kszphy_config_init(struct phy_device *phydev)
if (priv->led_mode >= 0)
kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
+ if (phy_interrupt_is_valid(phydev)) {
+ int ctl = phy_read(phydev, MII_BMCR);
+
+ if (ctl < 0)
+ return ctl;
+
+ ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -636,6 +647,21 @@ static void kszphy_get_stats(struct phy_device *phydev,
data[i] = kszphy_get_stat(phydev, i);
}
+static int kszphy_resume(struct phy_device *phydev)
+{
+ int value;
+
+ mutex_lock(&phydev->lock);
+
+ value = phy_read(phydev, MII_BMCR);
+ phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
+
+ kszphy_config_intr(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -845,7 +871,7 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8061,
.name = "Micrel KSZ8061",
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 04f4eb34fa80..931836e09a6b 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -443,9 +443,14 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
* network traffic (demand mode).
*/
struct ppp *ppp = PF_TO_PPP(pf);
+
+ ppp_recv_lock(ppp);
if (ppp->n_channels == 0 &&
- (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+ (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
+ ppp_recv_unlock(ppp);
break;
+ }
+ ppp_recv_unlock(ppp);
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
@@ -532,9 +537,12 @@ static unsigned int ppp_poll(struct file *file, poll_table *wait)
else if (pf->kind == INTERFACE) {
/* see comment in ppp_read */
struct ppp *ppp = PF_TO_PPP(pf);
+
+ ppp_recv_lock(ppp);
if (ppp->n_channels == 0 &&
(ppp->flags & SC_LOOP_TRAFFIC) == 0)
mask |= POLLIN | POLLRDNORM;
+ ppp_recv_unlock(ppp);
}
return mask;
@@ -2810,6 +2818,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
out2:
mutex_unlock(&pn->all_ppp_mutex);
+ rtnl_unlock();
free_netdev(dev);
out1:
*retp = ret;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 88bb8cc3555b..afdf950617c3 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -187,6 +187,7 @@ struct tun_struct {
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6|NETIF_F_UFO)
+ int align;
int vnet_hdr_sz;
int sndbuf;
struct tap_filter txflt;
@@ -934,6 +935,17 @@ static void tun_poll_controller(struct net_device *dev)
return;
}
#endif
+
+static void tun_set_headroom(struct net_device *dev, int new_hr)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ if (new_hr < NET_SKB_PAD)
+ new_hr = NET_SKB_PAD;
+
+ tun->align = new_hr;
+}
+
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -945,6 +957,7 @@ static const struct net_device_ops tun_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
+ .ndo_set_rx_headroom = tun_set_headroom,
};
static const struct net_device_ops tap_netdev_ops = {
@@ -962,6 +975,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_poll_controller = tun_poll_controller,
#endif
.ndo_features_check = passthru_features_check,
+ .ndo_set_rx_headroom = tun_set_headroom,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1086,7 +1100,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
size_t total_len = iov_iter_count(from);
- size_t len = total_len, align = NET_SKB_PAD, linear;
+ size_t len = total_len, align = tun->align, linear;
struct virtio_net_hdr gso = { 0 };
int good_linear;
int copylen;
@@ -1694,6 +1708,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->txflt.count = 0;
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+ tun->align = NET_SKB_PAD;
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 224e7d82de6d..cf77f2dffa69 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -134,7 +134,6 @@ static void ax88172a_remove_mdio(struct usbnet *dev)
netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id);
mdiobus_unregister(priv->mdio);
- kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dc0212c3cc28..86ba30ba35e8 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -837,7 +837,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
- /* reset data interface */
+ /* Reset data interface. Some devices will not reset properly
+ * unless they are configured first. Toggle the altsetting to
+ * force a reset
+ */
+ usb_set_interface(dev->udev, iface_no, data_altsetting);
temp = usb_set_interface(dev->udev, iface_no, 0);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");
@@ -984,8 +988,6 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int ret;
-
/* MBIM backwards compatible function? */
if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
return -ENODEV;
@@ -994,16 +996,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
* Additionally, generic NCM devices are assumed to accept arbitrarily
* placed NDP.
*/
- ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
-
- /*
- * We should get an event when network connection is "connected" or
- * "disconnected". Set network connection in "disconnected" state
- * (carrier is OFF) during attach, so the IP network stack does not
- * start IPv6 negotiation and more.
- */
- usbnet_link_change(dev, 0, 0);
- return ret;
+ return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
}
static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
@@ -1586,7 +1579,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
- .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_LINK_INTR,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
@@ -1599,7 +1593,7 @@ static const struct driver_info cdc_ncm_info = {
static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
- | FLAG_WWAN,
+ | FLAG_LINK_INTR | FLAG_WWAN,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
@@ -1612,7 +1606,7 @@ static const struct driver_info wwan_info = {
static const struct driver_info wwan_noarp_info = {
.description = "Mobile Broadband Network Device (NO ARP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
- | FLAG_WWAN | FLAG_NOARP,
+ | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 1c299b8a162d..705c180163c5 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -36,7 +36,7 @@
#define DRIVER_AUTHOR "WOOJUNG HUH <[email protected]>"
#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
#define DRIVER_NAME "lan78xx"
-#define DRIVER_VERSION "1.0.2"
+#define DRIVER_VERSION "1.0.3"
#define TX_TIMEOUT_JIFFIES (5 * HZ)
#define THROTTLE_JIFFIES (HZ / 8)
@@ -278,8 +278,12 @@ struct lan78xx_net {
int link_on;
u8 mdix_ctrl;
- u32 devid;
+ u32 chipid;
+ u32 chiprev;
struct mii_bus *mdiobus;
+
+ int fc_autoneg;
+ u8 fc_request_control;
};
/* use ethtool to change the level for any given device */
@@ -471,7 +475,7 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
saved = val;
- if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
}
@@ -505,7 +509,7 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
retval = 0;
exit:
- if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+ if (dev->chipid == ID_REV_CHIP_ID_7800_)
ret = lan78xx_write_reg(dev, HW_CFG, saved);
return retval;
@@ -539,7 +543,7 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
saved = val;
- if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
}
@@ -587,7 +591,7 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
retval = 0;
exit:
- if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+ if (dev->chipid == ID_REV_CHIP_ID_7800_)
ret = lan78xx_write_reg(dev, HW_CFG, saved);
return retval;
@@ -901,11 +905,15 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
{
u32 flow = 0, fct_flow = 0;
int ret;
+ u8 cap;
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ if (dev->fc_autoneg)
+ cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ else
+ cap = dev->fc_request_control;
if (cap & FLOW_CTRL_TX)
- flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
+ flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
if (cap & FLOW_CTRL_RX)
flow |= FLOW_CR_RX_FCEN_;
@@ -1385,6 +1393,62 @@ static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
return ret;
}
+static void lan78xx_get_pause(struct net_device *net,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct phy_device *phydev = net->phydev;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+
+ phy_ethtool_gset(phydev, &ecmd);
+
+ pause->autoneg = dev->fc_autoneg;
+
+ if (dev->fc_request_control & FLOW_CTRL_TX)
+ pause->tx_pause = 1;
+
+ if (dev->fc_request_control & FLOW_CTRL_RX)
+ pause->rx_pause = 1;
+}
+
+static int lan78xx_set_pause(struct net_device *net,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct phy_device *phydev = net->phydev;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ int ret;
+
+ phy_ethtool_gset(phydev, &ecmd);
+
+ if (pause->autoneg && !ecmd.autoneg) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ dev->fc_request_control = 0;
+ if (pause->rx_pause)
+ dev->fc_request_control |= FLOW_CTRL_RX;
+
+ if (pause->tx_pause)
+ dev->fc_request_control |= FLOW_CTRL_TX;
+
+ if (ecmd.autoneg) {
+ u32 mii_adv;
+
+ ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
+ ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+ phy_ethtool_sset(phydev, &ecmd);
+ }
+
+ dev->fc_autoneg = pause->autoneg;
+
+ ret = 0;
+exit:
+ return ret;
+}
+
static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_link = lan78xx_get_link,
.nway_reset = lan78xx_nway_reset,
@@ -1403,6 +1467,8 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.set_wol = lan78xx_set_wol,
.get_eee = lan78xx_get_eee,
.set_eee = lan78xx_set_eee,
+ .get_pauseparam = lan78xx_get_pause,
+ .set_pauseparam = lan78xx_set_pause,
};
static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1555,9 +1621,9 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
- switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
- case 0x78000000:
- case 0x78500000:
+ switch (dev->chipid) {
+ case ID_REV_CHIP_ID_7800_:
+ case ID_REV_CHIP_ID_7850_:
/* set to internal PHY id */
dev->mdiobus->phy_mask = ~(1 << 1);
break;
@@ -1590,6 +1656,7 @@ static void lan78xx_link_status_change(struct net_device *net)
static int lan78xx_phy_init(struct lan78xx_net *dev)
{
int ret;
+ u32 mii_adv;
struct phy_device *phydev = dev->net->phydev;
phydev = phy_find_first(dev->mdiobus);
@@ -1622,14 +1689,17 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
/* MAC doesn't support 1000T Half */
phydev->supported &= ~SUPPORTED_1000baseT_Half;
- phydev->supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+
+ /* support both flow controls */
+ dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
+ phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+
genphy_config_aneg(phydev);
+ dev->fc_autoneg = phydev->autoneg;
+
phy_start(phydev);
netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
@@ -1918,7 +1988,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
- dev->devid = buf;
+ dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
+ dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
/* Respond to the IN token with a NAK */
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
index a93fb653e7c5..40927906109a 100644
--- a/drivers/net/usb/lan78xx.h
+++ b/drivers/net/usb/lan78xx.h
@@ -107,6 +107,7 @@
#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000)
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_ID_7800_ (0x7800)
+#define ID_REV_CHIP_ID_7850_ (0x7850)
#define FPGA_REV (0x04)
#define FPGA_REV_MINOR_MASK_ (0x0000FF00)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 570deef53f74..a3a4ccf7cf52 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -861,8 +861,10 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
- {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -885,6 +887,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 0b0ba7ef14e4..10798128c03f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1769,6 +1769,13 @@ out3:
if (info->unbind)
info->unbind (dev, udev);
out1:
+ /* subdrivers must undo all they did in bind() if they
+ * fail it, but we may fail later and a deferred kevent
+ * may trigger an error resubmitting itself and, worse,
+ * schedule a timer. So we kill it all just in case.
+ */
+ cancel_work_sync(&dev->kevent);
+ del_timer_sync(&dev->delay);
free_netdev(net);
out:
return status;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ba21d072be31..4f30a6ae50d0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -35,6 +35,7 @@ struct pcpu_vstats {
struct veth_priv {
struct net_device __rcu *peer;
atomic64_t dropped;
+ unsigned requested_headroom;
};
/*
@@ -271,6 +272,29 @@ static int veth_get_iflink(const struct net_device *dev)
return iflink;
}
+static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
+{
+ struct veth_priv *peer_priv, *priv = netdev_priv(dev);
+ struct net_device *peer;
+
+ if (new_hr < 0)
+ new_hr = 0;
+
+ rcu_read_lock();
+ peer = rcu_dereference(priv->peer);
+ if (unlikely(!peer))
+ goto out;
+
+ peer_priv = netdev_priv(peer);
+ priv->requested_headroom = new_hr;
+ new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
+ dev->needed_headroom = new_hr;
+ peer->needed_headroom = new_hr;
+
+out:
+ rcu_read_unlock();
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -285,6 +309,7 @@ static const struct net_device_ops veth_netdev_ops = {
#endif
.ndo_get_iflink = veth_get_iflink,
.ndo_features_check = passthru_features_check,
+ .ndo_set_rx_headroom = veth_set_rx_headroom,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
@@ -301,6 +326,7 @@ static void veth_setup(struct net_device *dev)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_PHONY_HEADROOM;
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0cbf520cea77..fc895d0e85d9 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -814,7 +814,7 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
/*
- * parse and copy relevant protocol headers:
+ * parse relevant protocol headers:
* For a tso pkt, relevant headers are L2/3/4 including options
* For a pkt requesting csum offloading, they are L2/3 and may include L4
* if it's a TCP/UDP pkt
@@ -827,15 +827,14 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
* Other effects:
* 1. related *ctx fields are updated.
* 2. ctx->copy_size is # of bytes copied
- * 3. the portion copied is guaranteed to be in the linear part
+ * 3. the portion to be copied is guaranteed to be in the linear part
*
*/
static int
-vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
- struct vmxnet3_tx_ctx *ctx,
- struct vmxnet3_adapter *adapter)
+vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_tx_ctx *ctx,
+ struct vmxnet3_adapter *adapter)
{
- struct Vmxnet3_TxDataDesc *tdd;
u8 protocol = 0;
if (ctx->mss) { /* TSO */
@@ -892,16 +891,34 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
return 0;
}
+ return 1;
+err:
+ return -1;
+}
+
+/*
+ * copy relevant protocol headers to the transmit ring:
+ * For a tso pkt, relevant headers are L2/3/4 including options
+ * For a pkt requesting csum offloading, they are L2/3 and may include L4
+ * if it's a TCP/UDP pkt
+ *
+ *
+ * Note that this requires that vmxnet3_parse_hdr be called first to set the
+ * appropriate bits in ctx first
+ */
+static void
+vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_tx_ctx *ctx,
+ struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_TxDataDesc *tdd;
+
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
memcpy(tdd->data, skb->data, ctx->copy_size);
netdev_dbg(adapter->netdev,
"copy %u bytes to dataRing[%u]\n",
ctx->copy_size, tq->tx_ring.next2fill);
- return 1;
-
-err:
- return -1;
}
@@ -998,22 +1015,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
}
- spin_lock_irqsave(&tq->tx_lock, flags);
-
- if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
- tq->stats.tx_ring_full++;
- netdev_dbg(adapter->netdev,
- "tx queue stopped on %s, next2comp %u"
- " next2fill %u\n", adapter->netdev->name,
- tq->tx_ring.next2comp, tq->tx_ring.next2fill);
-
- vmxnet3_tq_stop(tq, adapter);
- spin_unlock_irqrestore(&tq->tx_lock, flags);
- return NETDEV_TX_BUSY;
- }
-
-
- ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
+ ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
if (ret >= 0) {
BUG_ON(ret <= 0 && ctx.copy_size != 0);
/* hdrs parsed, check against other limits */
@@ -1033,9 +1035,26 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
} else {
tq->stats.drop_hdr_inspect_err++;
- goto unlock_drop_pkt;
+ goto drop_pkt;
}
+ spin_lock_irqsave(&tq->tx_lock, flags);
+
+ if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
+ tq->stats.tx_ring_full++;
+ netdev_dbg(adapter->netdev,
+ "tx queue stopped on %s, next2comp %u"
+ " next2fill %u\n", adapter->netdev->name,
+ tq->tx_ring.next2comp, tq->tx_ring.next2fill);
+
+ vmxnet3_tq_stop(tq, adapter);
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+
+ vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
+
/* fill tx descs related to addr & len */
if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
goto unlock_drop_pkt;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9ce088bb28ab..9a9fabb900c1 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -103,20 +103,23 @@ static struct dst_ops vrf_dst_ops = {
#if IS_ENABLED(CONFIG_IPV6)
static bool check_ipv6_frame(const struct sk_buff *skb)
{
- const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
- size_t hlen = sizeof(*ipv6h);
+ const struct ipv6hdr *ipv6h;
+ struct ipv6hdr _ipv6h;
bool rc = true;
- if (skb->len < hlen)
+ ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
+ if (!ipv6h)
goto out;
if (ipv6h->nexthdr == NEXTHDR_ICMP) {
const struct icmp6hdr *icmph;
+ struct icmp6hdr _icmph;
- if (skb->len < hlen + sizeof(*icmph))
+ icmph = skb_header_pointer(skb, sizeof(_ipv6h),
+ sizeof(_icmph), &_icmph);
+ if (!icmph)
goto out;
- icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
switch (icmph->icmp6_type) {
case NDISC_ROUTER_SOLICITATION:
case NDISC_ROUTER_ADVERTISEMENT:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 775ddb48388d..2399099e68cf 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -948,8 +948,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
- if (err < 0)
+ if (err < 0) {
+ cb->args[1] = err;
goto out;
+ }
skip:
++idx;
}
@@ -1176,9 +1178,10 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
md->gbp = ntohs(gbp->policy_id);
tun_dst = (struct metadata_dst *)skb_dst(skb);
- if (tun_dst)
+ if (tun_dst) {
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
-
+ tun_dst->u.tun_info.options_len = sizeof(*md);
+ }
if (gbp->dont_learn)
md->gbp |= VXLAN_GBP_DONT_LEARN;
@@ -1460,7 +1463,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
reply->dev = dev;
skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
skb_push(reply, sizeof(struct ethhdr));
- skb_set_mac_header(reply, 0);
+ skb_reset_mac_header(reply);
ns = (struct nd_msg *)skb_transport_header(request);
@@ -1480,7 +1483,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
reply->protocol = htons(ETH_P_IPV6);
skb_pull(reply, sizeof(struct ethhdr));
- skb_set_network_header(reply, 0);
+ skb_reset_network_header(reply);
skb_put(reply, sizeof(struct ipv6hdr));
/* IPv6 header */
@@ -1495,7 +1498,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
pip6->saddr = *(struct in6_addr *)n->primary_key;
skb_pull(reply, sizeof(struct ipv6hdr));
- skb_set_transport_header(reply, 0);
+ skb_reset_transport_header(reply);
na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
@@ -1753,17 +1756,15 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
struct sk_buff *skb, int oif, u8 tos,
__be32 daddr, __be32 *saddr,
struct dst_cache *dst_cache,
- struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info)
{
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct rtable *rt = NULL;
- bool use_cache = false;
struct flowi4 fl4;
- /* when the ip_tunnel_info is availble, the tos used for lookup is
- * packet independent, so we can use the cache
- */
- if (!skb->mark && (!tos || info)) {
- use_cache = true;
+ if (tos && !info)
+ use_cache = false;
+ if (use_cache) {
rt = dst_cache_get_ip4(dst_cache, saddr);
if (rt)
return rt;
@@ -1788,16 +1789,20 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
- struct sk_buff *skb, int oif,
+ struct sk_buff *skb, int oif, u8 tos,
const struct in6_addr *daddr,
struct in6_addr *saddr,
- struct dst_cache *dst_cache)
+ struct dst_cache *dst_cache,
+ const struct ip_tunnel_info *info)
{
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
int err;
- if (!skb->mark) {
+ if (tos && !info)
+ use_cache = false;
+ if (use_cache) {
ndst = dst_cache_get_ip6(dst_cache, saddr);
if (ndst)
return ndst;
@@ -1805,6 +1810,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
+ fl6.flowi6_tos = RT_TOS(tos);
fl6.daddr = *daddr;
fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
fl6.flowi6_mark = skb->mark;
@@ -1817,7 +1823,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
return ERR_PTR(err);
*saddr = fl6.saddr;
- if (!skb->mark)
+ if (use_cache)
dst_cache_set_ip6(dst_cache, ndst, saddr);
return ndst;
}
@@ -2013,9 +2019,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
sk = vxlan->vn6_sock->sock->sk;
ndst = vxlan6_get_route(vxlan, skb,
- rdst ? rdst->remote_ifindex : 0,
+ rdst ? rdst->remote_ifindex : 0, tos,
&dst->sin6.sin6_addr, &saddr,
- dst_cache);
+ dst_cache, info);
if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n",
&dst->sin6.sin6_addr);
@@ -2050,6 +2056,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (!info)
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
@@ -2059,8 +2066,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
return;
}
udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
- &saddr, &dst->sin6.sin6_addr,
- 0, ttl, src_port, dst_port, !udp_sum);
+ &saddr, &dst->sin6.sin6_addr, tos, ttl,
+ src_port, dst_port, !udp_sum);
#endif
}
@@ -2382,9 +2389,9 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
if (!vxlan->vn6_sock)
return -EINVAL;
- ndst = vxlan6_get_route(vxlan, skb, 0,
+ ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
&info->key.u.ipv6.dst,
- &info->key.u.ipv6.src, NULL);
+ &info->key.u.ipv6.src, NULL, info);
if (IS_ERR(ndst))
return PTR_ERR(ndst);
dst_release(ndst);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 317bc79cc8b9..bb33b242ab48 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -826,7 +826,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* lmc_trace(dev, "lmc_init_one in"); */
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err) {
printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
return err;
@@ -835,23 +835,20 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_request_regions(pdev, "lmc");
if (err) {
printk(KERN_ERR "lmc: pci_request_region failed\n");
- goto err_req_io;
+ return err;
}
/*
* Allocate our own device structure
*/
- sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
- if (!sc) {
- err = -ENOMEM;
- goto err_kzalloc;
- }
+ sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL);
+ if (!sc)
+ return -ENOMEM;
dev = alloc_hdlcdev(sc);
if (!dev) {
printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
- err = -ENOMEM;
- goto err_hdlcdev;
+ return -ENOMEM;
}
@@ -888,7 +885,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
free_netdev(dev);
- goto err_hdlcdev;
+ return err;
}
sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
@@ -971,14 +968,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
lmc_trace(dev, "lmc_init_one out");
return 0;
-
-err_hdlcdev:
- kfree(sc);
-err_kzalloc:
- pci_release_regions(pdev);
-err_req_io:
- pci_disable_device(pdev);
- return err;
}
/*
@@ -992,8 +981,6 @@ static void lmc_remove_one(struct pci_dev *pdev)
printk(KERN_DEBUG "%s: removing...\n", dev->name);
unregister_hdlc_device(dev);
free_netdev(dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
}
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 6146a293601a..368de5e5a04f 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6366,12 +6366,13 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static int ath10k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
arvif->vdev_id, sta->addr, tid, action);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index fe1fd1a5ae15..639294a9e34d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1657,13 +1657,14 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw,
static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta,
- u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath9k_htc_sta *ista;
int ret = 0;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c1b33fdcca08..cf58a304e9f0 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1864,14 +1864,16 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static int ath9k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta,
- u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
bool flush = false;
int ret = 0;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
mutex_lock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 19d3d64416bf..4d1527a2e292 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1413,10 +1413,12 @@ static void carl9170_ampdu_work(struct work_struct *work)
static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta,
- u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
struct ar9170 *ar = hw->priv;
struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
struct carl9170_sta_tid *tid_info;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 7c169abdbafe..a27279c2c695 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -857,12 +857,14 @@ static int wcn36xx_resume(struct ieee80211_hw *hw)
static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_sta *sta_priv = NULL;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 20d07ef679e8..1f231cd08138 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -422,6 +422,11 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
if (sme->privacy && !rsn_eid)
wil_info(wil, "WSC connection\n");
+ if (sme->pbss) {
+ wil_err(wil, "connect - PBSS not yet supported\n");
+ return -EOPNOTSUPP;
+ }
+
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
@@ -870,6 +875,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
+ if (info->pbss) {
+ wil_err(wil, "AP: PBSS not yet supported\n");
+ return -EOPNOTSUPP;
+ }
+
switch (info->hidden_ssid) {
case NL80211_HIDDEN_SSID_NOT_IN_USE:
hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index bec2dc1ca2e4..61ae2768132a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -818,13 +818,15 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static int
brcms_ops_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct brcms_info *wl = hw->priv;
struct scb *scb = &wl->wlc->pri_scb;
int status;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u8 buf_size = params->buf_size;
if (WARN_ON(scb->magic != SCB_MAGIC))
return -EIDRM;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index fd38aa0763e4..b75f4ef3cdc7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -5982,12 +5982,14 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int
il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 * ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct il_priv *il = hw->priv;
int ret = -EINVAL;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
index 8ab8706f9422..e432715e02d8 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.h
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -182,9 +182,7 @@ void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 * ssn,
- u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 4841be2aa499..1799469268ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -943,14 +943,16 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (sta) {
+ u64 pn64;
+
tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
rx_p1ks = data->tkip->rx_uni;
- ieee80211_get_key_tx_seq(key, &seq);
- tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+ pn64 = atomic64_read(&key->tx_pn);
+ tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+ tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
@@ -996,19 +998,13 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
break;
case WLAN_CIPHER_SUITE_CCMP:
if (sta) {
- u8 *pn = seq.ccmp.pn;
+ u64 pn64;
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
- ieee80211_get_key_tx_seq(key, &seq);
- aes_tx_sc->pn = cpu_to_le64(
- (u64)pn[5] |
- ((u64)pn[4] << 8) |
- ((u64)pn[3] << 16) |
- ((u64)pn[2] << 24) |
- ((u64)pn[1] << 32) |
- ((u64)pn[0] << 40));
+ pn64 = atomic64_read(&key->tx_pn);
+ aes_tx_sc->pn = cpu_to_le64(pn64);
} else
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 4db4cb7aa73a..c63ea79571ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -732,12 +732,15 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret = -EINVAL;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u8 buf_size = params->buf_size;
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 346376187ef8..5214482a0403 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -251,16 +251,19 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
return;
case WLAN_CIPHER_SUITE_TKIP:
if (sta) {
+ u64 pn64;
+
tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
rx_p1ks = data->tkip->rx_uni;
- ieee80211_get_key_tx_seq(key, &seq);
- tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+ pn64 = atomic64_read(&key->tx_pn);
+ tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+ tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
- ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+ ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
+ p1k);
iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
memcpy(data->tkip->mic_keys.tx,
@@ -1614,7 +1617,9 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_TKIP:
iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
- ieee80211_set_key_tx_seq(key, &seq);
+ atomic64_set(&key->tx_pn,
+ (u64)seq.tkip.iv16 |
+ ((u64)seq.tkip.iv32 << 16));
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 070e2af05ca2..62ae43d2ab7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -126,7 +126,7 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
-static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
int i;
@@ -146,6 +146,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
get_order(mvm->fw_paging_db[i].fw_paging_size));
}
kfree(mvm->trans->paging_download_buf);
+ mvm->trans->paging_download_buf = NULL;
+
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 01476f545695..53156810185d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -837,13 +837,16 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
bool tx_agg_ref = false;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u8 buf_size = params->buf_size;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
@@ -2582,7 +2585,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_CCMP:
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ebe37bb0ce4c..cc279e0961fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1244,6 +1244,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+/* Paging */
+void iwl_free_fw_paging(struct iwl_mvm *mvm);
+
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 09a94a5efb61..fd8f4a80ddfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -699,6 +699,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ iwl_free_fw_paging(mvm);
+
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 8a1638ec7e28..dae2c40d605c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -299,6 +299,8 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_TKIP:
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
break;
@@ -423,6 +425,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1;
}
+ /*
+ * Increase the pending frames counter, so that later when a reply comes
+ * in and the counter is decreased - we don't start getting negative
+ * values.
+ * Note that we don't need to make sure it isn't agg'd, since we're
+ * TXing non-sta
+ */
+ atomic_inc(&mvm->pending_frames[sta_id]);
+
return 0;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index a28414c50edf..e85e0737771c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -844,7 +844,7 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
hdr->rt_chbitmask = cpu_to_le16(flags);
skb->dev = hwsim_mon;
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
@@ -887,7 +887,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
memcpy(hdr11->addr1, addr, ETH_ALEN);
skb->dev = hwsim_mon;
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
@@ -1334,10 +1334,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
data->tx_bytes += skb->len;
ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
- if (ack && skb->len >= 16) {
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ if (ack && skb->len >= 16)
mac80211_hwsim_monitor_ack(channel, hdr->addr2);
- }
ieee80211_tx_info_clear_status(txi);
@@ -1846,10 +1844,12 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+
switch (action) {
case IEEE80211_AMPDU_TX_START:
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 30e3aaae32e2..088429d0a634 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5421,11 +5421,13 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
static int
mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
-
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u8 buf_size = params->buf_size;
int i, rc = 0;
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_ampdu_stream *stream;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index f715eee39851..e70dd9523911 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -334,11 +334,13 @@ static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
static int
mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
- bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct mt7601u_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
WARN_ON(msta->wcid.idx > GROUP_WCID(0));
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a26afcab03ed..7fa0128de7e3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -7936,10 +7936,11 @@ u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
EXPORT_SYMBOL_GPL(rt2800_get_tsf);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv;
int ret = 0;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 440790b92b19..83f1a44fb9b4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -218,9 +218,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw,
const struct ieee80211_tx_queue_params *params);
u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
index 6aed923a709a..7d820c395375 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
@@ -5375,13 +5375,13 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static int
rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
- bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
u8 ampdu_factor, ampdu_density;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
switch (action) {
case IEEE80211_AMPDU_TX_START:
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 4ae421ef30d9..f2507610314b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1371,11 +1371,13 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
switch (action) {
case IEEE80211_AMPDU_TX_START:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index b5bcc933a2a6..4df992de7d07 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -659,29 +659,24 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
* informs the f/w regarding this.
* @hw: Pointer to the ieee80211_hw structure.
* @vif: Pointer to the ieee80211_vif structure.
- * @action: ieee80211_ampdu_mlme_action enum.
- * @sta: Pointer to the ieee80211_sta structure.
- * @tid: Traffic identifier.
- * @ssn: Pointer to ssn value.
- * @buf_size: Buffer size (for kernel version > 2.6.38).
- * @amsdu: is AMSDU in AMPDU allowed
+ * @params: Pointer to A-MPDU action parameters
*
* Return: status: 0 on success, negative error code on failure.
*/
static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta,
- unsigned short tid,
- unsigned short *ssn,
- unsigned char buf_size,
- bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
int status = -EOPNOTSUPP;
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
u16 seq_no = 0;
u8 ii = 0;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u8 buf_size = params->buf_size;
for (ii = 0; ii < RSI_MAX_VIFS; ii++) {
if (vif == adapter->vifs[ii])
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 06321c799c90..d0ddcde6c695 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -2129,9 +2129,7 @@ void cw1200_mcast_timeout(unsigned long arg)
int cw1200_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
/* Aggregation is implemented fully in firmware,
* including block ack negotiation. Do not allow
diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h
index bebb3379017f..a0bacaa39b31 100644
--- a/drivers/net/wireless/st/cw1200/sta.h
+++ b/drivers/net/wireless/st/cw1200/sta.h
@@ -109,9 +109,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
u32 changed);
int cw1200_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
void cw1200_suspend_resume(struct cw1200_common *priv,
struct wsm_suspend_resume *arg);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index d1109c4f0f0d..45662cf3169f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5187,14 +5187,16 @@ out:
static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u8 hlid, *ba_bitmap;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
tid);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7e2c43f701bc..5d28e9405f32 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -382,18 +382,18 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
[ND_CMD_ARS_CAP] = {
.in_num = 2,
.in_sizes = { 8, 8, },
- .out_num = 2,
- .out_sizes = { 4, 4, },
+ .out_num = 4,
+ .out_sizes = { 4, 4, 4, 4, },
},
[ND_CMD_ARS_START] = {
- .in_num = 4,
- .in_sizes = { 8, 8, 2, 6, },
- .out_num = 1,
- .out_sizes = { 4, },
+ .in_num = 5,
+ .in_sizes = { 8, 8, 2, 1, 5, },
+ .out_num = 2,
+ .out_sizes = { 4, 4, },
},
[ND_CMD_ARS_STATUS] = {
- .out_num = 2,
- .out_sizes = { 4, UINT_MAX, },
+ .out_num = 3,
+ .out_sizes = { 4, 4, UINT_MAX, },
},
};
@@ -442,8 +442,8 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
return in_field[1];
else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
return out_field[1];
- else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 1)
- return ND_CMD_ARS_STATUS_MAX;
+ else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
+ return out_field[1] - 8;
return UINT_MAX;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7edf31671dab..8d0b54670184 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -41,7 +41,7 @@ struct pmem_device {
phys_addr_t phys_addr;
/* when non-zero this device is hosting a 'pfn' instance */
phys_addr_t data_offset;
- unsigned long pfn_flags;
+ u64 pfn_flags;
void __pmem *virt_addr;
size_t size;
struct badblocks bb;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3cd921e6121e..03c46412fff4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -55,8 +55,9 @@ static void nvme_free_ns(struct kref *kref)
ns->disk->private_data = NULL;
spin_unlock(&dev_list_lock);
- nvme_put_ctrl(ns->ctrl);
put_disk(ns->disk);
+ ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
+ nvme_put_ctrl(ns->ctrl);
kfree(ns);
}
@@ -183,7 +184,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out_unmap;
}
- if (meta_buffer) {
+ if (meta_buffer && meta_len) {
struct bio_integrity_payload *bip;
meta = kmalloc(meta_len, GFP_KERNEL);
@@ -373,6 +374,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
+ if (io.flags)
+ return -EINVAL;
switch (io.opcode) {
case nvme_cmd_write:
@@ -424,6 +427,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
+ if (cmd.flags)
+ return -EINVAL;
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
@@ -556,6 +561,10 @@ static int nvme_revalidate_disk(struct gendisk *disk)
u16 old_ms;
unsigned short bs;
+ if (test_bit(NVME_NS_DEAD, &ns->flags)) {
+ set_capacity(disk, 0);
+ return -ENODEV;
+ }
if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
__func__, ns->ctrl->instance, ns->ns_id);
@@ -831,6 +840,23 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
return ret;
}
+static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+ struct request_queue *q)
+{
+ if (ctrl->max_hw_sectors) {
+ u32 max_segments =
+ (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
+
+ blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+ blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
+ }
+ if (ctrl->stripe_size)
+ blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_virt_boundary(q, ctrl->page_size - 1);
+}
+
/*
* Initialize the cached copies of the Identify data and various controller
* register in our nvme_ctrl structure. This should be called as soon as
@@ -888,6 +914,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
}
+ nvme_set_queue_limits(ctrl, ctrl->admin_q);
+
kfree(id);
return 0;
}
@@ -1118,9 +1146,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (!ns)
return;
+ ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
+ if (ns->instance < 0)
+ goto out_free_ns;
+
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
- goto out_free_ns;
+ goto out_release_instance;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
@@ -1134,17 +1166,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->disk = disk;
ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
+
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
- if (ctrl->max_hw_sectors) {
- blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
- blk_queue_max_segments(ns->queue,
- (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
- }
- if (ctrl->stripe_size)
- blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
- blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
+ nvme_set_queue_limits(ctrl, ns->queue);
disk->major = nvme_major;
disk->first_minor = 0;
@@ -1153,7 +1177,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
disk->queue = ns->queue;
disk->driverfs_dev = ctrl->device;
disk->flags = GENHD_FL_EXT_DEVT;
- sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, nsid);
+ sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
@@ -1173,40 +1197,29 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
kfree(disk);
out_free_queue:
blk_cleanup_queue(ns->queue);
+ out_release_instance:
+ ida_simple_remove(&ctrl->ns_ida, ns->instance);
out_free_ns:
kfree(ns);
}
static void nvme_ns_remove(struct nvme_ns *ns)
{
- bool kill = nvme_io_incapable(ns->ctrl) &&
- !blk_queue_dying(ns->queue);
-
- lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
- if (kill) {
- blk_set_queue_dying(ns->queue);
+ if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
+ return;
- /*
- * The controller was shutdown first if we got here through
- * device removal. The shutdown may requeue outstanding
- * requests. These need to be aborted immediately so
- * del_gendisk doesn't block indefinitely for their completion.
- */
- blk_mq_abort_requeue_list(ns->queue);
- }
if (ns->disk->flags & GENHD_FL_UP) {
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group);
del_gendisk(ns->disk);
- }
- if (kill || !blk_queue_dying(ns->queue)) {
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
+ mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
+ mutex_unlock(&ns->ctrl->namespaces_mutex);
nvme_put_ns(ns);
}
@@ -1300,10 +1313,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
- mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
- mutex_unlock(&ctrl->namespaces_mutex);
}
static DEFINE_IDA(nvme_instance_ida);
@@ -1350,6 +1361,7 @@ static void nvme_free_ctrl(struct kref *kref)
put_device(ctrl->device);
nvme_release_instance(ctrl);
+ ida_destroy(&ctrl->ns_ida);
ctrl->ops->free_ctrl(ctrl);
}
@@ -1390,6 +1402,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
}
get_device(ctrl->device);
dev_set_drvdata(ctrl->device, ctrl);
+ ida_init(&ctrl->ns_ida);
spin_lock(&dev_list_lock);
list_add_tail(&ctrl->node, &nvme_ctrl_list);
@@ -1402,6 +1415,38 @@ out:
return ret;
}
+/**
+ * nvme_kill_queues(): Ends all namespace queues
+ * @ctrl: the dead controller that needs to end
+ *
+ * Call this function when the driver determines it is unable to get the
+ * controller in a state capable of servicing IO.
+ */
+void nvme_kill_queues(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (!kref_get_unless_zero(&ns->kref))
+ continue;
+
+ /*
+ * Revalidating a dead namespace sets capacity to 0. This will
+ * end buffered writers dirtying pages that can't be synced.
+ */
+ if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ revalidate_disk(ns->disk);
+
+ blk_set_queue_dying(ns->queue);
+ blk_mq_abort_requeue_list(ns->queue);
+ blk_mq_start_stopped_hw_queues(ns->queue, true);
+
+ nvme_put_ns(ns);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+}
+
void nvme_stop_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9664d07d807d..fb15ba5f5d19 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -72,6 +72,7 @@ struct nvme_ctrl {
struct mutex namespaces_mutex;
struct device *device; /* char device */
struct list_head node;
+ struct ida ns_ida;
char name[12];
char serial[20];
@@ -102,6 +103,7 @@ struct nvme_ns {
struct request_queue *queue;
struct gendisk *disk;
struct kref kref;
+ int instance;
u8 eui[8];
u8 uuid[16];
@@ -112,6 +114,11 @@ struct nvme_ns {
bool ext;
u8 pi_type;
int type;
+ unsigned long flags;
+
+#define NVME_NS_REMOVING 0
+#define NVME_NS_DEAD 1
+
u64 mode_select_num_blocks;
u32 mode_select_block_len;
};
@@ -240,6 +247,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
+void nvme_kill_queues(struct nvme_ctrl *ctrl);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a128672472ec..680f5780750c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -86,7 +86,6 @@ struct nvme_queue;
static int nvme_reset(struct nvme_dev *dev);
static void nvme_process_cq(struct nvme_queue *nvmeq);
-static void nvme_remove_dead_ctrl(struct nvme_dev *dev);
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
/*
@@ -120,6 +119,7 @@ struct nvme_dev {
unsigned long flags;
#define NVME_CTRL_RESETTING 0
+#define NVME_CTRL_REMOVING 1
struct nvme_ctrl ctrl;
struct completion ioq_wait;
@@ -286,6 +286,17 @@ static int nvme_init_request(void *data, struct request *req,
return 0;
}
+static void nvme_queue_scan(struct nvme_dev *dev)
+{
+ /*
+ * Do not queue new scan work when a controller is reset during
+ * removal.
+ */
+ if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+ return;
+ queue_work(nvme_workq, &dev->scan_work);
+}
+
static void nvme_complete_async_event(struct nvme_dev *dev,
struct nvme_completion *cqe)
{
@@ -300,7 +311,7 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
switch (result & 0xff07) {
case NVME_AER_NOTICE_NS_CHANGED:
dev_info(dev->dev, "rescanning\n");
- queue_work(nvme_workq, &dev->scan_work);
+ nvme_queue_scan(dev);
default:
dev_warn(dev->dev, "async event result %08x\n", result);
}
@@ -679,7 +690,10 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_lock_irq(&nvmeq->q_lock);
if (unlikely(nvmeq->cq_vector < 0)) {
- ret = BLK_MQ_RQ_QUEUE_BUSY;
+ if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
+ else
+ ret = BLK_MQ_RQ_QUEUE_ERROR;
spin_unlock_irq(&nvmeq->q_lock);
goto out;
}
@@ -1250,6 +1264,12 @@ static struct blk_mq_ops nvme_mq_ops = {
static void nvme_dev_remove_admin(struct nvme_dev *dev)
{
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
+ /*
+ * If the controller was reset during removal, it's possible
+ * user requests may be waiting on a stopped queue. Start the
+ * queue to flush these to completion.
+ */
+ blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
blk_cleanup_queue(dev->ctrl.admin_q);
blk_mq_free_tag_set(&dev->admin_tagset);
}
@@ -1690,14 +1710,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
return 0;
dev->ctrl.tagset = &dev->tagset;
}
- queue_work(nvme_workq, &dev->scan_work);
+ nvme_queue_scan(dev);
return 0;
}
-static int nvme_dev_map(struct nvme_dev *dev)
+static int nvme_pci_enable(struct nvme_dev *dev)
{
u64 cap;
- int bars, result = -ENOMEM;
+ int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pci_enable_device_mem(pdev))
@@ -1705,24 +1725,14 @@ static int nvme_dev_map(struct nvme_dev *dev)
dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (!bars)
- goto disable_pci;
-
- if (pci_request_selected_regions(pdev, bars, "nvme"))
- goto disable_pci;
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
goto disable;
- dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
- if (!dev->bar)
- goto disable;
-
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
result = -ENODEV;
- goto unmap;
+ goto disable;
}
/*
@@ -1732,7 +1742,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (!pdev->irq) {
result = pci_enable_msix(pdev, dev->entry, 1);
if (result < 0)
- goto unmap;
+ goto disable;
}
cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1759,18 +1769,20 @@ static int nvme_dev_map(struct nvme_dev *dev)
pci_save_state(pdev);
return 0;
- unmap:
- iounmap(dev->bar);
- dev->bar = NULL;
disable:
- pci_release_regions(pdev);
- disable_pci:
pci_disable_device(pdev);
return result;
}
static void nvme_dev_unmap(struct nvme_dev *dev)
{
+ if (dev->bar)
+ iounmap(dev->bar);
+ pci_release_regions(to_pci_dev(dev->dev));
+}
+
+static void nvme_pci_disable(struct nvme_dev *dev)
+{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (pdev->msi_enabled)
@@ -1778,12 +1790,6 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
else if (pdev->msix_enabled)
pci_disable_msix(pdev);
- if (dev->bar) {
- iounmap(dev->bar);
- dev->bar = NULL;
- pci_release_regions(pdev);
- }
-
if (pci_is_enabled(pdev)) {
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
@@ -1842,7 +1848,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_dev_list_remove(dev);
mutex_lock(&dev->shutdown_lock);
- if (dev->bar) {
+ if (pci_is_enabled(to_pci_dev(dev->dev))) {
nvme_stop_queues(&dev->ctrl);
csts = readl(dev->bar + NVME_REG_CSTS);
}
@@ -1855,7 +1861,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
- nvme_dev_unmap(dev);
+ nvme_pci_disable(dev);
for (i = dev->queue_count - 1; i >= 0; i--)
nvme_clear_queue(dev->queues[i]);
@@ -1899,10 +1905,20 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
kfree(dev);
}
+static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
+{
+ dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
+
+ kref_get(&dev->ctrl.kref);
+ nvme_dev_disable(dev, false);
+ if (!schedule_work(&dev->remove_work))
+ nvme_put_ctrl(&dev->ctrl);
+}
+
static void nvme_reset_work(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
- int result;
+ int result = -ENODEV;
if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
goto out;
@@ -1911,37 +1927,37 @@ static void nvme_reset_work(struct work_struct *work)
* If we're called to reset a live controller first shut it down before
* moving on.
*/
- if (dev->bar)
+ if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
set_bit(NVME_CTRL_RESETTING, &dev->flags);
- result = nvme_dev_map(dev);
+ result = nvme_pci_enable(dev);
if (result)
goto out;
result = nvme_configure_admin_queue(dev);
if (result)
- goto unmap;
+ goto out;
nvme_init_queue(dev->queues[0], 0);
result = nvme_alloc_admin_tags(dev);
if (result)
- goto disable;
+ goto out;
result = nvme_init_identify(&dev->ctrl);
if (result)
- goto free_tags;
+ goto out;
result = nvme_setup_io_queues(dev);
if (result)
- goto free_tags;
+ goto out;
dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
result = nvme_dev_list_add(dev);
if (result)
- goto remove;
+ goto out;
/*
* Keep the controller around but remove all namespaces if we don't have
@@ -1958,19 +1974,8 @@ static void nvme_reset_work(struct work_struct *work)
clear_bit(NVME_CTRL_RESETTING, &dev->flags);
return;
- remove:
- nvme_dev_list_remove(dev);
- free_tags:
- nvme_dev_remove_admin(dev);
- blk_put_queue(dev->ctrl.admin_q);
- dev->ctrl.admin_q = NULL;
- dev->queues[0]->tags = NULL;
- disable:
- nvme_disable_admin_queue(dev, false);
- unmap:
- nvme_dev_unmap(dev);
out:
- nvme_remove_dead_ctrl(dev);
+ nvme_remove_dead_ctrl(dev, result);
}
static void nvme_remove_dead_ctrl_work(struct work_struct *work)
@@ -1978,19 +1983,12 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ nvme_kill_queues(&dev->ctrl);
if (pci_get_drvdata(pdev))
pci_stop_and_remove_bus_device_locked(pdev);
nvme_put_ctrl(&dev->ctrl);
}
-static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
-{
- dev_warn(dev->dev, "Removing after probe failure\n");
- kref_get(&dev->ctrl.kref);
- if (!schedule_work(&dev->remove_work))
- nvme_put_ctrl(&dev->ctrl);
-}
-
static int nvme_reset(struct nvme_dev *dev)
{
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
@@ -2042,6 +2040,27 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.free_ctrl = nvme_pci_free_ctrl,
};
+static int nvme_dev_map(struct nvme_dev *dev)
+{
+ int bars;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (!bars)
+ return -ENODEV;
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ return -ENODEV;
+
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar)
+ goto release;
+
+ return 0;
+ release:
+ pci_release_regions(pdev);
+ return -ENODEV;
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int node, result = -ENOMEM;
@@ -2066,6 +2085,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
+ result = nvme_dev_map(dev);
+ if (result)
+ goto free;
+
INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->scan_work, nvme_dev_scan);
INIT_WORK(&dev->reset_work, nvme_reset_work);
@@ -2089,6 +2112,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_release_prp_pools(dev);
put_pci:
put_device(dev->dev);
+ nvme_dev_unmap(dev);
free:
kfree(dev->queues);
kfree(dev->entry);
@@ -2112,10 +2136,16 @@ static void nvme_shutdown(struct pci_dev *pdev)
nvme_dev_disable(dev, true);
}
+/*
+ * The driver's remove may be called on a device in a partially initialized
+ * state. This function must not have any dependencies on the device state in
+ * order to proceed.
+ */
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+ set_bit(NVME_CTRL_REMOVING, &dev->flags);
pci_set_drvdata(pdev, NULL);
flush_work(&dev->scan_work);
nvme_remove_namespaces(&dev->ctrl);
@@ -2126,6 +2156,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_free_queues(dev, 0);
nvme_release_cmb(dev);
nvme_release_prp_pools(dev);
+ nvme_dev_unmap(dev);
nvme_put_ctrl(&dev->ctrl);
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 39c4be41ef83..5e7838290998 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -211,7 +211,6 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
struct device_node *child;
- const __be32 *paddr;
bool scanphys = false;
int addr, rc;
@@ -246,8 +245,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
/* auto scan for PHYs with empty reg property */
for_each_available_child_of_node(np, child) {
/* Skip PHYs with reg property set */
- paddr = of_get_property(child, "reg", NULL);
- if (paddr)
+ if (of_find_property(child, "reg", NULL))
continue;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
@@ -305,6 +303,7 @@ EXPORT_SYMBOL(of_phy_find_device);
* @dev: pointer to net_device claiming the phy
* @phy_np: Pointer to device tree node for the PHY
* @hndlr: Link state callback for the network device
+ * @flags: flags to pass to the PHY
* @iface: PHY data interface type
*
* If successful, returns a pointer to the phy_device with the embedded
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 75a605426538..d1cdd9c992ac 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -14,6 +14,7 @@ config PCI_DRA7XX
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
depends on ARCH_MVEBU || ARCH_DOVE
+ depends on ARM
depends on OF
config PCIE_DW
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index ed34c9520a02..6153853ca9c3 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -58,11 +58,6 @@
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
u32 *bit_pos)
{
@@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
struct pcie_port *pp;
msi = irq_data_get_msi_desc(d);
- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
@@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
u32 offset;
msi = irq_data_get_msi_desc(d);
- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
@@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
u32 offset;
msi = irq_data_get_msi_desc(d);
- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 3923bed93c7e..f39961bcf7aa 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
}
+/* Drop MSG TLP except for Vendor MSG */
+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
+{
+ u32 val;
+
+ val = ioread32(pcie->dbi + PCIE_STRFMR1);
+ val &= 0xDFFFFFFF;
+ iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+}
+
static int ls1021_pcie_link_up(struct pcie_port *pp)
{
u32 state;
@@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
static void ls1021_pcie_host_init(struct pcie_port *pp)
{
struct ls_pcie *pcie = to_ls_pcie(pp);
- u32 val, index[2];
+ u32 index[2];
pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
"fsl,pcie-scfg");
@@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
- /*
- * LS1021A Workaround for internal TKT228622
- * to fix the INTx hang issue
- */
- val = ioread32(pcie->dbi + PCIE_STRFMR1);
- val &= 0xffff;
- iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+ ls_pcie_drop_msg_tlp(pcie);
}
static int ls_pcie_link_up(struct pcie_port *pp)
@@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
ls_pcie_fix_class(pcie);
ls_pcie_clear_multifunction(pcie);
+ ls_pcie_drop_msg_tlp(pcie);
iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
}
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c777b97207d5..5f70fee59a94 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -53,7 +53,7 @@ struct pcifront_device {
};
struct pcifront_sd {
- int domain;
+ struct pci_sysdata sd;
struct pcifront_device *pdev;
};
@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
unsigned int domain, unsigned int bus,
struct pcifront_device *pdev)
{
- sd->domain = domain;
+ /* Because we do not expose that information via XenBus. */
+ sd->sd.node = first_online_node;
+ sd->sd.domain = domain;
sd->pdev = pdev;
}
@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
domain, bus);
- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
if (!bus_entry || !sd) {
err = -ENOMEM;
goto err_out;
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/bq27xxx_battery_i2c.c
index 9429e66be096..8eafc6f0df88 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/bq27xxx_battery_i2c.c
@@ -21,6 +21,9 @@
#include <linux/power/bq27xxx_battery.h>
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
+
static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
{
struct bq27xxx_device_info *di = data;
@@ -70,19 +73,33 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
{
struct bq27xxx_device_info *di;
int ret;
+ char *name;
+ int num;
+
+ /* Get new ID for the new battery device */
+ mutex_lock(&battery_mutex);
+ num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
+ mutex_unlock(&battery_mutex);
+ if (num < 0)
+ return num;
+
+ name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
+ if (!name)
+ goto err_mem;
di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
if (!di)
- return -ENOMEM;
+ goto err_mem;
+ di->id = num;
di->dev = &client->dev;
di->chip = id->driver_data;
- di->name = id->name;
+ di->name = name;
di->bus.read = bq27xxx_battery_i2c_read;
ret = bq27xxx_battery_setup(di);
if (ret)
- return ret;
+ goto err_failed;
/* Schedule a polling after about 1 min */
schedule_delayed_work(&di->work, 60 * HZ);
@@ -103,6 +120,16 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
}
return 0;
+
+err_mem:
+ ret = -ENOMEM;
+
+err_failed:
+ mutex_lock(&battery_mutex);
+ idr_remove(&battery_id, num);
+ mutex_unlock(&battery_mutex);
+
+ return ret;
}
static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
@@ -111,6 +138,10 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
bq27xxx_battery_teardown(di);
+ mutex_lock(&battery_mutex);
+ idr_remove(&battery_id, di->id);
+ mutex_unlock(&battery_mutex);
+
return 0;
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 804806e1cbb4..339f6b7f4803 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <scsi/scsi_host.h>
@@ -158,7 +159,6 @@ static struct scsi_transport_template *cxgb4i_stt;
* open/close/abort and data send/receive.
*/
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define RCV_BUFSIZ_MASK 0x3FFU
#define MAX_IMM_TX_PKT_LEN 256
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3b3e0998fa6e..d6a691e27d33 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4002,6 +4002,7 @@ static ssize_t ipr_store_update_fw(struct device *dev,
struct ipr_sglist *sglist;
char fname[100];
char *src;
+ char *endline;
int result, dnld_size;
if (!capable(CAP_SYS_ADMIN))
@@ -4009,6 +4010,10 @@ static ssize_t ipr_store_update_fw(struct device *dev,
snprintf(fname, sizeof(fname), "%s", buf);
+ endline = strchr(fname, '\n');
+ if (endline)
+ *endline = '\0';
+
if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
return -EIO;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fa6b2c4eb7a2..8c6e31874171 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1344,6 +1344,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
switch (ret) {
case BLKPREP_KILL:
+ case BLKPREP_INVALID:
req->errors = DID_NO_CONNECT << 16;
/* release the command and kill it */
if (req->special) {
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 91a003011acf..a9bac3bf20de 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
static int __init sh_pm_runtime_init(void)
{
- if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
+ if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
if (!of_find_compatible_node(NULL, NULL,
"renesas,cpg-mstp-clocks"))
return 0;
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 0c675861623f..d8e4219c2324 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -83,6 +83,7 @@ config SSB_SDIOHOST
config SSB_HOST_SOC
bool "Support for SSB bus on SoC"
depends on SSB && BCM47XX_NVRAM
+ select SSB_SPROM
help
Host interface for a SSB directly mapped into memory. This is
for some Broadcom SoCs from the BCM47xx and BCM53xx lines.
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 3ec7e65a3ffa..db49af90217e 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -147,7 +147,7 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
mutex_lock(&mdev->graph_mutex);
ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev);
if (ret) {
- mutex_unlock(&video->lock);
+ mutex_unlock(&mdev->graph_mutex);
return -ENOMEM;
}
media_entity_graph_walk_start(&graph, entity);
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index b668db6a45fb..1a2dda09b69d 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1210,7 +1210,7 @@ static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer,
struct sk_buff *skb, u16 payload_len,
struct vnt_mic_hdr *mic_hdr)
{
- struct ieee80211_key_seq seq;
+ u64 pn64;
u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
/* strip header and icv len from payload */
@@ -1243,9 +1243,13 @@ static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer,
mic_hdr->payload_len = cpu_to_be16(payload_len);
ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
- ieee80211_get_key_tx_seq(tx_key, &seq);
-
- memcpy(mic_hdr->ccmp_pn, seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
+ pn64 = atomic64_read(&tx_key->tx_pn);
+ mic_hdr->ccmp_pn[5] = pn64;
+ mic_hdr->ccmp_pn[4] = pn64 >> 8;
+ mic_hdr->ccmp_pn[3] = pn64 >> 16;
+ mic_hdr->ccmp_pn[2] = pn64 >> 24;
+ mic_hdr->ccmp_pn[1] = pn64 >> 32;
+ mic_hdr->ccmp_pn[0] = pn64 >> 40;
if (ieee80211_has_a4(hdr->frame_control))
mic_hdr->hlen = cpu_to_be16(28);
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index a0c69b697901..b74e32001318 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -716,7 +716,7 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
u16 payload_len, struct vnt_mic_hdr *mic_hdr)
{
struct ieee80211_hdr *hdr = tx_context->hdr;
- struct ieee80211_key_seq seq;
+ u64 pn64;
u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
/* strip header and icv len from payload */
@@ -749,9 +749,13 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
mic_hdr->payload_len = cpu_to_be16(payload_len);
ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
- ieee80211_get_key_tx_seq(tx_key, &seq);
-
- memcpy(mic_hdr->ccmp_pn, seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
+ pn64 = atomic64_read(&tx_key->tx_pn);
+ mic_hdr->ccmp_pn[5] = pn64;
+ mic_hdr->ccmp_pn[4] = pn64 >> 8;
+ mic_hdr->ccmp_pn[3] = pn64 >> 16;
+ mic_hdr->ccmp_pn[2] = pn64 >> 24;
+ mic_hdr->ccmp_pn[1] = pn64 >> 32;
+ mic_hdr->ccmp_pn[0] = pn64 >> 40;
if (ieee80211_has_a4(hdr->frame_control))
mic_hdr->hlen = cpu_to_be16(28);
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index b59195edf636..b635ab67490d 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -85,8 +85,8 @@ static int ci_hdrc_pci_probe(struct pci_dev *pdev,
/* register a nop PHY */
ci->phy = usb_phy_generic_register();
- if (!ci->phy)
- return -ENOMEM;
+ if (IS_ERR(ci->phy))
+ return PTR_ERR(ci->phy);
memset(res, 0, sizeof(res));
res[0].start = pci_resource_start(pdev, 0);
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index a4f7db2e18dd..df47110bad2d 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -100,6 +100,9 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
if (sscanf(buf, "%u", &mode) != 1)
return -EINVAL;
+ if (mode > 255)
+ return -EBADRQC;
+
pm_runtime_get_sync(ci->dev);
spin_lock_irqsave(&ci->lock, flags);
ret = hw_port_test_set(ci, mode);
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 45f86da1d6d3..03b6743461d1 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work)
int ci_hdrc_otg_init(struct ci_hdrc *ci)
{
INIT_WORK(&ci->work, ci_otg_work);
- ci->wq = create_singlethread_workqueue("ci_otg");
+ ci->wq = create_freezable_workqueue("ci_otg");
if (!ci->wq) {
dev_err(ci->dev, "can't create workqueue\n");
return -ENODEV;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 350dcd9af5d8..51b436918f78 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5401,6 +5401,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
}
bos = udev->bos;
+ udev->bos = NULL;
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
@@ -5493,11 +5494,8 @@ done:
usb_set_usb2_hardware_lpm(udev, 1);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
- /* release the new BOS descriptor allocated by hub_port_init() */
- if (udev->bos != bos) {
- usb_release_bos_descriptor(udev);
- udev->bos = bos;
- }
+ usb_release_bos_descriptor(udev);
+ udev->bos = bos;
return 0;
re_enumerate:
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index fd95ba6ec317..f0decc0d69b5 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -1,5 +1,6 @@
config USB_DWC2
tristate "DesignWare USB2 DRD Core Support"
+ depends on HAS_DMA
depends on USB || USB_GADGET
help
Say Y here if your system has a Dual Role Hi-Speed USB
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e991d55914db..46c4ba75dc2a 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -619,6 +619,12 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
__func__, hsotg->dr_mode);
break;
}
+
+ /*
+ * NOTE: This is required for some rockchip soc based
+ * platforms.
+ */
+ msleep(50);
}
/*
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 36606fc33c0d..a41274aa52ad 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -1174,14 +1174,11 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
halt_status, n_bytes,
xfer_done);
- if (*xfer_done && urb->status != -EINPROGRESS)
- failed = 1;
-
- if (failed) {
+ if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
dwc2_host_complete(hsotg, qtd, urb->status);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
- dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
- failed, *xfer_done, urb->status);
+ dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
+ failed, *xfer_done);
return failed;
}
@@ -1236,21 +1233,23 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
int i;
+ int qtd_desc_count;
qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
xfer_done = 0;
+ qtd_desc_count = qtd->n_desc;
- for (i = 0; i < qtd->n_desc; i++) {
+ for (i = 0; i < qtd_desc_count; i++) {
if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
desc_num, halt_status,
- &xfer_done)) {
- qtd = NULL;
- break;
- }
+ &xfer_done))
+ goto stop_scan;
+
desc_num++;
}
}
+stop_scan:
if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
/*
* Resetting the data toggle for bulk and interrupt endpoints
@@ -1258,7 +1257,7 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
*/
if (halt_status == DWC2_HC_XFER_STALL)
qh->data_toggle = DWC2_HC_PID_DATA0;
- else if (qtd)
+ else
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index f8253803a050..cadba8b13c48 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -525,11 +525,19 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
+ if (WARN(!chan || !chan->qh,
+ "chan->qh must be specified for non-control eps\n"))
+ return;
+
if (pid == TSIZ_SC_MC_PID_DATA0)
chan->qh->data_toggle = DWC2_HC_PID_DATA0;
else
chan->qh->data_toggle = DWC2_HC_PID_DATA1;
} else {
+ if (WARN(!qtd,
+ "qtd must be specified for control eps\n"))
+ return;
+
if (pid == TSIZ_SC_MC_PID_DATA0)
qtd->data_toggle = DWC2_HC_PID_DATA0;
else
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 29130682e547..e4f8b90d9627 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -856,7 +856,6 @@ struct dwc3 {
unsigned pullups_connected:1;
unsigned resize_fifos:1;
unsigned setup_packet_pending:1;
- unsigned start_config_issued:1;
unsigned three_stage_setup:1;
unsigned usb3_lpm_capable:1;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3a9354abcb68..8d6b75c2f53b 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
int ret;
u32 reg;
- dwc->start_config_issued = false;
cfg = le16_to_cpu(ctrl->wValue);
switch (state) {
@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
- case USB_REQ_SET_INTERFACE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
- dwc->start_config_issued = false;
- /* Fall through */
default:
dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7d1dd82a95ac..2363bad45af8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -385,24 +385,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
dep->trb_pool_dma = 0;
}
+static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
+
+/**
+ * dwc3_gadget_start_config - Configure EP resources
+ * @dwc: pointer to our controller context structure
+ * @dep: endpoint that is being enabled
+ *
+ * The assignment of transfer resources cannot perfectly follow the
+ * data book due to the fact that the controller driver does not have
+ * all knowledge of the configuration in advance. It is given this
+ * information piecemeal by the composite gadget framework after every
+ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
+ * programming model in this scenario can cause errors. For two
+ * reasons:
+ *
+ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
+ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
+ * multiple interfaces.
+ *
+ * 2) The databook does not mention doing more DEPXFERCFG for new
+ * endpoint on alt setting (8.1.6).
+ *
+ * The following simplified method is used instead:
+ *
+ * All hardware endpoints can be assigned a transfer resource and this
+ * setting will stay persistent until either a core reset or
+ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
+ * do DEPXFERCFG for every hardware endpoint as well. We are
+ * guaranteed that there are as many transfer resources as endpoints.
+ *
+ * This function is called for each endpoint when it is being enabled
+ * but is triggered only when called for EP0-out, which always happens
+ * first, and which should only happen in one of the above conditions.
+ */
static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
+ int i;
+ int ret;
+
+ if (dep->number)
+ return 0;
memset(&params, 0x00, sizeof(params));
+ cmd = DWC3_DEPCMD_DEPSTARTCFG;
- if (dep->number != 1) {
- cmd = DWC3_DEPCMD_DEPSTARTCFG;
- /* XferRscIdx == 0 for ep0 and 2 for the remaining */
- if (dep->number > 1) {
- if (dwc->start_config_issued)
- return 0;
- dwc->start_config_issued = true;
- cmd |= DWC3_DEPCMD_PARAM(2);
- }
+ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+ if (ret)
+ return ret;
- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
+
+ if (!dep)
+ continue;
+
+ ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+ if (ret)
+ return ret;
}
return 0;
@@ -516,10 +558,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
- ret = dwc3_gadget_set_xfer_resource(dwc, dep);
- if (ret)
- return ret;
-
dep->endpoint.desc = desc;
dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
@@ -1636,8 +1674,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
- dwc->start_config_issued = false;
-
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -2237,7 +2273,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc3_disconnect_gadget(dwc);
- dwc->start_config_issued = false;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
@@ -2288,7 +2323,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc3_stop_active_transfers(dwc);
dwc3_clear_stall_all_ep(dwc);
- dwc->start_config_issued = false;
/* Reset device address to zero */
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 7e179f81d05c..87fb0fd6aaab 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -130,7 +130,8 @@ struct dev_data {
setup_can_stall : 1,
setup_out_ready : 1,
setup_out_error : 1,
- setup_abort : 1;
+ setup_abort : 1,
+ gadget_registered : 1;
unsigned setup_wLength;
/* the rest is basically write-once */
@@ -1179,7 +1180,8 @@ dev_release (struct inode *inode, struct file *fd)
/* closing ep0 === shutdown all */
- usb_gadget_unregister_driver (&gadgetfs_driver);
+ if (dev->gadget_registered)
+ usb_gadget_unregister_driver (&gadgetfs_driver);
/* at this point "good" hardware has disconnected the
* device from USB; the host won't see it any more.
@@ -1847,6 +1849,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
* kick in after the ep0 descriptor is closed.
*/
value = len;
+ dev->gadget_registered = true;
}
return value;
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 53c0692f1b09..93d28cb00b76 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2340,7 +2340,7 @@ static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
{
struct qe_udc *udc;
struct device_node *np = ofdev->dev.of_node;
- unsigned int tmp_addr = 0;
+ unsigned long tmp_addr = 0;
struct usb_device_para __iomem *usbpram;
unsigned int i;
u64 size;
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 4dff60d34f73..0d32052bf16f 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -369,9 +369,20 @@ static inline void set_max_speed(struct net2280_ep *ep, u32 max)
static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80,
0x50, 0x20, 0x70, 0x40, 0x90 };
- if (ep->dev->enhanced_mode)
+ if (ep->dev->enhanced_mode) {
reg = ep_enhanced[ep->num];
- else{
+ switch (ep->dev->gadget.speed) {
+ case USB_SPEED_SUPER:
+ reg += 2;
+ break;
+ case USB_SPEED_FULL:
+ reg += 1;
+ break;
+ case USB_SPEED_HIGH:
+ default:
+ break;
+ }
+ } else {
reg = (ep->num + 1) * 0x10;
if (ep->dev->gadget.speed != USB_SPEED_HIGH)
reg += 1;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index fd73a3ea07c2..b86a6f03592e 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -413,9 +413,10 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
if (!driver->udc_name || strcmp(driver->udc_name,
dev_name(&udc->dev)) == 0) {
ret = udc_bind_to_driver(udc, driver);
+ if (ret != -EPROBE_DEFER)
+ list_del(&driver->pending);
if (ret)
goto err4;
- list_del(&driver->pending);
break;
}
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 795a45b1b25b..58487a473521 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -662,7 +662,7 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
}
- channel->desired_mode = mode;
+ channel->desired_mode = *mode;
musb_writew(epio, MUSB_TXCSR, csr);
return 0;
@@ -2003,10 +2003,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
qh->offset,
urb->transfer_buffer_length);
- done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh,
- urb, xfer_len,
- iso_err);
- if (done)
+ if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
+ xfer_len, iso_err))
goto finish;
else
dev_err(musb->controller, "error: rx_dma failed\n");
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 970a30e155cb..72b387d592c2 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -757,14 +757,8 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
otg->host = host;
dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
- /*
- * Kick the state machine work, if peripheral is not supported
- * or peripheral is already registered with us.
- */
- if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
- }
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ schedule_work(&motg->sm_work);
return 0;
}
@@ -827,14 +821,8 @@ static int msm_otg_set_peripheral(struct usb_otg *otg,
dev_dbg(otg->usb_phy->dev,
"peripheral driver registered w/ tranceiver\n");
- /*
- * Kick the state machine work, if host is not supported
- * or host is already registered with us.
- */
- if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
- }
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ schedule_work(&motg->sm_work);
return 0;
}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index f612dda9c977..56ecb8b5115d 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -475,22 +475,6 @@ config USB_SERIAL_MOS7840
To compile this driver as a module, choose M here: the
module will be called mos7840. If unsure, choose N.
-config USB_SERIAL_MXUPORT11
- tristate "USB Moxa UPORT 11x0 Serial Driver"
- ---help---
- Say Y here if you want to use a MOXA UPort 11x0 Serial hub.
-
- This driver supports:
-
- - UPort 1110 : 1 port RS-232 USB to Serial Hub.
- - UPort 1130 : 1 port RS-422/485 USB to Serial Hub.
- - UPort 1130I : 1 port RS-422/485 USB to Serial Hub with Isolation.
- - UPort 1150 : 1 port RS-232/422/485 USB to Serial Hub.
- - UPort 1150I : 1 port RS-232/422/485 USB to Serial Hub with Isolation.
-
- To compile this driver as a module, choose M here: the
- module will be called mxu11x0.
-
config USB_SERIAL_MXUPORT
tristate "USB Moxa UPORT Serial Driver"
---help---
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index f3fa5e53702d..349d9df0895f 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_USB_SERIAL_METRO) += metro-usb.o
obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o
obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o
obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o
-obj-$(CONFIG_USB_SERIAL_MXUPORT11) += mxu11x0.o
obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o
obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o
obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 987813b8a7f9..73a366de5102 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -163,6 +163,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/mxu11x0.c b/drivers/usb/serial/mxu11x0.c
deleted file mode 100644
index 619607323bfd..000000000000
--- a/drivers/usb/serial/mxu11x0.c
+++ /dev/null
@@ -1,1006 +0,0 @@
-/*
- * USB Moxa UPORT 11x0 Serial Driver
- *
- * Copyright (C) 2007 MOXA Technologies Co., Ltd.
- * Copyright (C) 2015 Mathieu Othacehe <[email protected]>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
- * Supports the following Moxa USB to serial converters:
- * UPort 1110, 1 port RS-232 USB to Serial Hub.
- * UPort 1130, 1 port RS-422/485 USB to Serial Hub.
- * UPort 1130I, 1 port RS-422/485 USB to Serial Hub with isolation
- * protection.
- * UPort 1150, 1 port RS-232/422/485 USB to Serial Hub.
- * UPort 1150I, 1 port RS-232/422/485 USB to Serial Hub with isolation
- * protection.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/firmware.h>
-#include <linux/jiffies.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-
-/* Vendor and product ids */
-#define MXU1_VENDOR_ID 0x110a
-#define MXU1_1110_PRODUCT_ID 0x1110
-#define MXU1_1130_PRODUCT_ID 0x1130
-#define MXU1_1150_PRODUCT_ID 0x1150
-#define MXU1_1151_PRODUCT_ID 0x1151
-#define MXU1_1131_PRODUCT_ID 0x1131
-
-/* Commands */
-#define MXU1_GET_VERSION 0x01
-#define MXU1_GET_PORT_STATUS 0x02
-#define MXU1_GET_PORT_DEV_INFO 0x03
-#define MXU1_GET_CONFIG 0x04
-#define MXU1_SET_CONFIG 0x05
-#define MXU1_OPEN_PORT 0x06
-#define MXU1_CLOSE_PORT 0x07
-#define MXU1_START_PORT 0x08
-#define MXU1_STOP_PORT 0x09
-#define MXU1_TEST_PORT 0x0A
-#define MXU1_PURGE_PORT 0x0B
-#define MXU1_RESET_EXT_DEVICE 0x0C
-#define MXU1_GET_OUTQUEUE 0x0D
-#define MXU1_WRITE_DATA 0x80
-#define MXU1_READ_DATA 0x81
-#define MXU1_REQ_TYPE_CLASS 0x82
-
-/* Module identifiers */
-#define MXU1_I2C_PORT 0x01
-#define MXU1_IEEE1284_PORT 0x02
-#define MXU1_UART1_PORT 0x03
-#define MXU1_UART2_PORT 0x04
-#define MXU1_RAM_PORT 0x05
-
-/* Modem status */
-#define MXU1_MSR_DELTA_CTS 0x01
-#define MXU1_MSR_DELTA_DSR 0x02
-#define MXU1_MSR_DELTA_RI 0x04
-#define MXU1_MSR_DELTA_CD 0x08
-#define MXU1_MSR_CTS 0x10
-#define MXU1_MSR_DSR 0x20
-#define MXU1_MSR_RI 0x40
-#define MXU1_MSR_CD 0x80
-#define MXU1_MSR_DELTA_MASK 0x0F
-#define MXU1_MSR_MASK 0xF0
-
-/* Line status */
-#define MXU1_LSR_OVERRUN_ERROR 0x01
-#define MXU1_LSR_PARITY_ERROR 0x02
-#define MXU1_LSR_FRAMING_ERROR 0x04
-#define MXU1_LSR_BREAK 0x08
-#define MXU1_LSR_ERROR 0x0F
-#define MXU1_LSR_RX_FULL 0x10
-#define MXU1_LSR_TX_EMPTY 0x20
-
-/* Modem control */
-#define MXU1_MCR_LOOP 0x04
-#define MXU1_MCR_DTR 0x10
-#define MXU1_MCR_RTS 0x20
-
-/* Mask settings */
-#define MXU1_UART_ENABLE_RTS_IN 0x0001
-#define MXU1_UART_DISABLE_RTS 0x0002
-#define MXU1_UART_ENABLE_PARITY_CHECKING 0x0008
-#define MXU1_UART_ENABLE_DSR_OUT 0x0010
-#define MXU1_UART_ENABLE_CTS_OUT 0x0020
-#define MXU1_UART_ENABLE_X_OUT 0x0040
-#define MXU1_UART_ENABLE_XA_OUT 0x0080
-#define MXU1_UART_ENABLE_X_IN 0x0100
-#define MXU1_UART_ENABLE_DTR_IN 0x0800
-#define MXU1_UART_DISABLE_DTR 0x1000
-#define MXU1_UART_ENABLE_MS_INTS 0x2000
-#define MXU1_UART_ENABLE_AUTO_START_DMA 0x4000
-#define MXU1_UART_SEND_BREAK_SIGNAL 0x8000
-
-/* Parity */
-#define MXU1_UART_NO_PARITY 0x00
-#define MXU1_UART_ODD_PARITY 0x01
-#define MXU1_UART_EVEN_PARITY 0x02
-#define MXU1_UART_MARK_PARITY 0x03
-#define MXU1_UART_SPACE_PARITY 0x04
-
-/* Stop bits */
-#define MXU1_UART_1_STOP_BITS 0x00
-#define MXU1_UART_1_5_STOP_BITS 0x01
-#define MXU1_UART_2_STOP_BITS 0x02
-
-/* Bits per character */
-#define MXU1_UART_5_DATA_BITS 0x00
-#define MXU1_UART_6_DATA_BITS 0x01
-#define MXU1_UART_7_DATA_BITS 0x02
-#define MXU1_UART_8_DATA_BITS 0x03
-
-/* Operation modes */
-#define MXU1_UART_232 0x00
-#define MXU1_UART_485_RECEIVER_DISABLED 0x01
-#define MXU1_UART_485_RECEIVER_ENABLED 0x02
-
-/* Pipe transfer mode and timeout */
-#define MXU1_PIPE_MODE_CONTINUOUS 0x01
-#define MXU1_PIPE_MODE_MASK 0x03
-#define MXU1_PIPE_TIMEOUT_MASK 0x7C
-#define MXU1_PIPE_TIMEOUT_ENABLE 0x80
-
-/* Config struct */
-struct mxu1_uart_config {
- __be16 wBaudRate;
- __be16 wFlags;
- u8 bDataBits;
- u8 bParity;
- u8 bStopBits;
- char cXon;
- char cXoff;
- u8 bUartMode;
-} __packed;
-
-/* Purge modes */
-#define MXU1_PURGE_OUTPUT 0x00
-#define MXU1_PURGE_INPUT 0x80
-
-/* Read/Write data */
-#define MXU1_RW_DATA_ADDR_SFR 0x10
-#define MXU1_RW_DATA_ADDR_IDATA 0x20
-#define MXU1_RW_DATA_ADDR_XDATA 0x30
-#define MXU1_RW_DATA_ADDR_CODE 0x40
-#define MXU1_RW_DATA_ADDR_GPIO 0x50
-#define MXU1_RW_DATA_ADDR_I2C 0x60
-#define MXU1_RW_DATA_ADDR_FLASH 0x70
-#define MXU1_RW_DATA_ADDR_DSP 0x80
-
-#define MXU1_RW_DATA_UNSPECIFIED 0x00
-#define MXU1_RW_DATA_BYTE 0x01
-#define MXU1_RW_DATA_WORD 0x02
-#define MXU1_RW_DATA_DOUBLE_WORD 0x04
-
-struct mxu1_write_data_bytes {
- u8 bAddrType;
- u8 bDataType;
- u8 bDataCounter;
- __be16 wBaseAddrHi;
- __be16 wBaseAddrLo;
- u8 bData[0];
-} __packed;
-
-/* Interrupt codes */
-#define MXU1_CODE_HARDWARE_ERROR 0xFF
-#define MXU1_CODE_DATA_ERROR 0x03
-#define MXU1_CODE_MODEM_STATUS 0x04
-
-static inline int mxu1_get_func_from_code(unsigned char code)
-{
- return code & 0x0f;
-}
-
-/* Download firmware max packet size */
-#define MXU1_DOWNLOAD_MAX_PACKET_SIZE 64
-
-/* Firmware image header */
-struct mxu1_firmware_header {
- __le16 wLength;
- u8 bCheckSum;
-} __packed;
-
-#define MXU1_UART_BASE_ADDR 0xFFA0
-#define MXU1_UART_OFFSET_MCR 0x0004
-
-#define MXU1_BAUD_BASE 923077
-
-#define MXU1_TRANSFER_TIMEOUT 2
-#define MXU1_DOWNLOAD_TIMEOUT 1000
-#define MXU1_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
-
-struct mxu1_port {
- u8 msr;
- u8 mcr;
- u8 uart_mode;
- spinlock_t spinlock; /* Protects msr */
- struct mutex mutex; /* Protects mcr */
- bool send_break;
-};
-
-struct mxu1_device {
- u16 mxd_model;
-};
-
-static const struct usb_device_id mxu1_idtable[] = {
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
- { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
- { }
-};
-
-MODULE_DEVICE_TABLE(usb, mxu1_idtable);
-
-/* Write the given buffer out to the control pipe. */
-static int mxu1_send_ctrl_data_urb(struct usb_serial *serial,
- u8 request,
- u16 value, u16 index,
- void *data, size_t size)
-{
- int status;
-
- status = usb_control_msg(serial->dev,
- usb_sndctrlpipe(serial->dev, 0),
- request,
- (USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_DEVICE), value, index,
- data, size,
- USB_CTRL_SET_TIMEOUT);
- if (status < 0) {
- dev_err(&serial->interface->dev,
- "%s - usb_control_msg failed: %d\n",
- __func__, status);
- return status;
- }
-
- if (status != size) {
- dev_err(&serial->interface->dev,
- "%s - short write (%d / %zd)\n",
- __func__, status, size);
- return -EIO;
- }
-
- return 0;
-}
-
-/* Send a vendor request without any data */
-static int mxu1_send_ctrl_urb(struct usb_serial *serial,
- u8 request, u16 value, u16 index)
-{
- return mxu1_send_ctrl_data_urb(serial, request, value, index,
- NULL, 0);
-}
-
-static int mxu1_download_firmware(struct usb_serial *serial,
- const struct firmware *fw_p)
-{
- int status = 0;
- int buffer_size;
- int pos;
- int len;
- int done;
- u8 cs = 0;
- u8 *buffer;
- struct usb_device *dev = serial->dev;
- struct mxu1_firmware_header *header;
- unsigned int pipe;
-
- pipe = usb_sndbulkpipe(dev, serial->port[0]->bulk_out_endpointAddress);
-
- buffer_size = fw_p->size + sizeof(*header);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- memcpy(buffer, fw_p->data, fw_p->size);
- memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
-
- for (pos = sizeof(*header); pos < buffer_size; pos++)
- cs = (u8)(cs + buffer[pos]);
-
- header = (struct mxu1_firmware_header *)buffer;
- header->wLength = cpu_to_le16(buffer_size - sizeof(*header));
- header->bCheckSum = cs;
-
- dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__);
-
- for (pos = 0; pos < buffer_size; pos += done) {
- len = min(buffer_size - pos, MXU1_DOWNLOAD_MAX_PACKET_SIZE);
-
- status = usb_bulk_msg(dev, pipe, buffer + pos, len, &done,
- MXU1_DOWNLOAD_TIMEOUT);
- if (status)
- break;
- }
-
- kfree(buffer);
-
- if (status) {
- dev_err(&dev->dev, "failed to download firmware: %d\n", status);
- return status;
- }
-
- msleep_interruptible(100);
- usb_reset_device(dev);
-
- dev_dbg(&dev->dev, "%s - download successful\n", __func__);
-
- return 0;
-}
-
-static int mxu1_port_probe(struct usb_serial_port *port)
-{
- struct mxu1_port *mxport;
- struct mxu1_device *mxdev;
-
- if (!port->interrupt_in_urb) {
- dev_err(&port->dev, "no interrupt urb\n");
- return -ENODEV;
- }
-
- mxport = kzalloc(sizeof(struct mxu1_port), GFP_KERNEL);
- if (!mxport)
- return -ENOMEM;
-
- spin_lock_init(&mxport->spinlock);
- mutex_init(&mxport->mutex);
-
- mxdev = usb_get_serial_data(port->serial);
-
- switch (mxdev->mxd_model) {
- case MXU1_1110_PRODUCT_ID:
- case MXU1_1150_PRODUCT_ID:
- case MXU1_1151_PRODUCT_ID:
- mxport->uart_mode = MXU1_UART_232;
- break;
- case MXU1_1130_PRODUCT_ID:
- case MXU1_1131_PRODUCT_ID:
- mxport->uart_mode = MXU1_UART_485_RECEIVER_DISABLED;
- break;
- }
-
- usb_set_serial_port_data(port, mxport);
-
- port->port.closing_wait =
- msecs_to_jiffies(MXU1_DEFAULT_CLOSING_WAIT * 10);
- port->port.drain_delay = 1;
-
- return 0;
-}
-
-static int mxu1_port_remove(struct usb_serial_port *port)
-{
- struct mxu1_port *mxport;
-
- mxport = usb_get_serial_port_data(port);
- kfree(mxport);
-
- return 0;
-}
-
-static int mxu1_startup(struct usb_serial *serial)
-{
- struct mxu1_device *mxdev;
- struct usb_device *dev = serial->dev;
- struct usb_host_interface *cur_altsetting;
- char fw_name[32];
- const struct firmware *fw_p = NULL;
- int err;
-
- dev_dbg(&serial->interface->dev, "%s - product 0x%04X, num configurations %d, configuration value %d\n",
- __func__, le16_to_cpu(dev->descriptor.idProduct),
- dev->descriptor.bNumConfigurations,
- dev->actconfig->desc.bConfigurationValue);
-
- /* create device structure */
- mxdev = kzalloc(sizeof(struct mxu1_device), GFP_KERNEL);
- if (!mxdev)
- return -ENOMEM;
-
- usb_set_serial_data(serial, mxdev);
-
- mxdev->mxd_model = le16_to_cpu(dev->descriptor.idProduct);
-
- cur_altsetting = serial->interface->cur_altsetting;
-
- /* if we have only 1 configuration, download firmware */
- if (cur_altsetting->desc.bNumEndpoints == 1) {
-
- snprintf(fw_name,
- sizeof(fw_name),
- "moxa/moxa-%04x.fw",
- mxdev->mxd_model);
-
- err = request_firmware(&fw_p, fw_name, &serial->interface->dev);
- if (err) {
- dev_err(&serial->interface->dev, "failed to request firmware: %d\n",
- err);
- goto err_free_mxdev;
- }
-
- err = mxu1_download_firmware(serial, fw_p);
- if (err)
- goto err_release_firmware;
-
- /* device is being reset */
- err = -ENODEV;
- goto err_release_firmware;
- }
-
- return 0;
-
-err_release_firmware:
- release_firmware(fw_p);
-err_free_mxdev:
- kfree(mxdev);
-
- return err;
-}
-
-static void mxu1_release(struct usb_serial *serial)
-{
- struct mxu1_device *mxdev;
-
- mxdev = usb_get_serial_data(serial);
- kfree(mxdev);
-}
-
-static int mxu1_write_byte(struct usb_serial_port *port, u32 addr,
- u8 mask, u8 byte)
-{
- int status;
- size_t size;
- struct mxu1_write_data_bytes *data;
-
- dev_dbg(&port->dev, "%s - addr 0x%08X, mask 0x%02X, byte 0x%02X\n",
- __func__, addr, mask, byte);
-
- size = sizeof(struct mxu1_write_data_bytes) + 2;
- data = kzalloc(size, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->bAddrType = MXU1_RW_DATA_ADDR_XDATA;
- data->bDataType = MXU1_RW_DATA_BYTE;
- data->bDataCounter = 1;
- data->wBaseAddrHi = cpu_to_be16(addr >> 16);
- data->wBaseAddrLo = cpu_to_be16(addr);
- data->bData[0] = mask;
- data->bData[1] = byte;
-
- status = mxu1_send_ctrl_data_urb(port->serial, MXU1_WRITE_DATA, 0,
- MXU1_RAM_PORT, data, size);
- if (status < 0)
- dev_err(&port->dev, "%s - failed: %d\n", __func__, status);
-
- kfree(data);
-
- return status;
-}
-
-static int mxu1_set_mcr(struct usb_serial_port *port, unsigned int mcr)
-{
- int status;
-
- status = mxu1_write_byte(port,
- MXU1_UART_BASE_ADDR + MXU1_UART_OFFSET_MCR,
- MXU1_MCR_RTS | MXU1_MCR_DTR | MXU1_MCR_LOOP,
- mcr);
- return status;
-}
-
-static void mxu1_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
-{
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- struct mxu1_uart_config *config;
- tcflag_t cflag, iflag;
- speed_t baud;
- int status;
- unsigned int mcr;
-
- cflag = tty->termios.c_cflag;
- iflag = tty->termios.c_iflag;
-
- if (old_termios &&
- !tty_termios_hw_change(&tty->termios, old_termios) &&
- tty->termios.c_iflag == old_termios->c_iflag) {
- dev_dbg(&port->dev, "%s - nothing to change\n", __func__);
- return;
- }
-
- dev_dbg(&port->dev,
- "%s - cflag 0x%08x, iflag 0x%08x\n", __func__, cflag, iflag);
-
- if (old_termios) {
- dev_dbg(&port->dev, "%s - old cflag 0x%08x, old iflag 0x%08x\n",
- __func__,
- old_termios->c_cflag,
- old_termios->c_iflag);
- }
-
- config = kzalloc(sizeof(*config), GFP_KERNEL);
- if (!config)
- return;
-
- /* these flags must be set */
- config->wFlags |= MXU1_UART_ENABLE_MS_INTS;
- config->wFlags |= MXU1_UART_ENABLE_AUTO_START_DMA;
- if (mxport->send_break)
- config->wFlags |= MXU1_UART_SEND_BREAK_SIGNAL;
- config->bUartMode = mxport->uart_mode;
-
- switch (C_CSIZE(tty)) {
- case CS5:
- config->bDataBits = MXU1_UART_5_DATA_BITS;
- break;
- case CS6:
- config->bDataBits = MXU1_UART_6_DATA_BITS;
- break;
- case CS7:
- config->bDataBits = MXU1_UART_7_DATA_BITS;
- break;
- default:
- case CS8:
- config->bDataBits = MXU1_UART_8_DATA_BITS;
- break;
- }
-
- if (C_PARENB(tty)) {
- config->wFlags |= MXU1_UART_ENABLE_PARITY_CHECKING;
- if (C_CMSPAR(tty)) {
- if (C_PARODD(tty))
- config->bParity = MXU1_UART_MARK_PARITY;
- else
- config->bParity = MXU1_UART_SPACE_PARITY;
- } else {
- if (C_PARODD(tty))
- config->bParity = MXU1_UART_ODD_PARITY;
- else
- config->bParity = MXU1_UART_EVEN_PARITY;
- }
- } else {
- config->bParity = MXU1_UART_NO_PARITY;
- }
-
- if (C_CSTOPB(tty))
- config->bStopBits = MXU1_UART_2_STOP_BITS;
- else
- config->bStopBits = MXU1_UART_1_STOP_BITS;
-
- if (C_CRTSCTS(tty)) {
- /* RTS flow control must be off to drop RTS for baud rate B0 */
- if (C_BAUD(tty) != B0)
- config->wFlags |= MXU1_UART_ENABLE_RTS_IN;
- config->wFlags |= MXU1_UART_ENABLE_CTS_OUT;
- }
-
- if (I_IXOFF(tty) || I_IXON(tty)) {
- config->cXon = START_CHAR(tty);
- config->cXoff = STOP_CHAR(tty);
-
- if (I_IXOFF(tty))
- config->wFlags |= MXU1_UART_ENABLE_X_IN;
-
- if (I_IXON(tty))
- config->wFlags |= MXU1_UART_ENABLE_X_OUT;
- }
-
- baud = tty_get_baud_rate(tty);
- if (!baud)
- baud = 9600;
- config->wBaudRate = MXU1_BAUD_BASE / baud;
-
- dev_dbg(&port->dev, "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
- __func__, baud, config->wBaudRate, config->wFlags,
- config->bDataBits, config->bParity, config->bStopBits,
- config->cXon, config->cXoff, config->bUartMode);
-
- cpu_to_be16s(&config->wBaudRate);
- cpu_to_be16s(&config->wFlags);
-
- status = mxu1_send_ctrl_data_urb(port->serial, MXU1_SET_CONFIG, 0,
- MXU1_UART1_PORT, config,
- sizeof(*config));
- if (status)
- dev_err(&port->dev, "cannot set config: %d\n", status);
-
- mutex_lock(&mxport->mutex);
- mcr = mxport->mcr;
-
- if (C_BAUD(tty) == B0)
- mcr &= ~(MXU1_MCR_DTR | MXU1_MCR_RTS);
- else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
- mcr |= MXU1_MCR_DTR | MXU1_MCR_RTS;
-
- status = mxu1_set_mcr(port, mcr);
- if (status)
- dev_err(&port->dev, "cannot set modem control: %d\n", status);
- else
- mxport->mcr = mcr;
-
- mutex_unlock(&mxport->mutex);
-
- kfree(config);
-}
-
-static int mxu1_get_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *ret_arg)
-{
- struct serial_struct ret_serial;
- unsigned cwait;
-
- if (!ret_arg)
- return -EFAULT;
-
- cwait = port->port.closing_wait;
- if (cwait != ASYNC_CLOSING_WAIT_NONE)
- cwait = jiffies_to_msecs(cwait) / 10;
-
- memset(&ret_serial, 0, sizeof(ret_serial));
-
- ret_serial.type = PORT_16550A;
- ret_serial.line = port->minor;
- ret_serial.port = 0;
- ret_serial.xmit_fifo_size = port->bulk_out_size;
- ret_serial.baud_base = MXU1_BAUD_BASE;
- ret_serial.close_delay = 5*HZ;
- ret_serial.closing_wait = cwait;
-
- if (copy_to_user(ret_arg, &ret_serial, sizeof(*ret_arg)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int mxu1_set_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *new_arg)
-{
- struct serial_struct new_serial;
- unsigned cwait;
-
- if (copy_from_user(&new_serial, new_arg, sizeof(new_serial)))
- return -EFAULT;
-
- cwait = new_serial.closing_wait;
- if (cwait != ASYNC_CLOSING_WAIT_NONE)
- cwait = msecs_to_jiffies(10 * new_serial.closing_wait);
-
- port->port.closing_wait = cwait;
-
- return 0;
-}
-
-static int mxu1_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
-
- switch (cmd) {
- case TIOCGSERIAL:
- return mxu1_get_serial_info(port,
- (struct serial_struct __user *)arg);
- case TIOCSSERIAL:
- return mxu1_set_serial_info(port,
- (struct serial_struct __user *)arg);
- }
-
- return -ENOIOCTLCMD;
-}
-
-static int mxu1_tiocmget(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- unsigned int result;
- unsigned int msr;
- unsigned int mcr;
- unsigned long flags;
-
- mutex_lock(&mxport->mutex);
- spin_lock_irqsave(&mxport->spinlock, flags);
-
- msr = mxport->msr;
- mcr = mxport->mcr;
-
- spin_unlock_irqrestore(&mxport->spinlock, flags);
- mutex_unlock(&mxport->mutex);
-
- result = ((mcr & MXU1_MCR_DTR) ? TIOCM_DTR : 0) |
- ((mcr & MXU1_MCR_RTS) ? TIOCM_RTS : 0) |
- ((mcr & MXU1_MCR_LOOP) ? TIOCM_LOOP : 0) |
- ((msr & MXU1_MSR_CTS) ? TIOCM_CTS : 0) |
- ((msr & MXU1_MSR_CD) ? TIOCM_CAR : 0) |
- ((msr & MXU1_MSR_RI) ? TIOCM_RI : 0) |
- ((msr & MXU1_MSR_DSR) ? TIOCM_DSR : 0);
-
- dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result);
-
- return result;
-}
-
-static int mxu1_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- int err;
- unsigned int mcr;
-
- mutex_lock(&mxport->mutex);
- mcr = mxport->mcr;
-
- if (set & TIOCM_RTS)
- mcr |= MXU1_MCR_RTS;
- if (set & TIOCM_DTR)
- mcr |= MXU1_MCR_DTR;
- if (set & TIOCM_LOOP)
- mcr |= MXU1_MCR_LOOP;
-
- if (clear & TIOCM_RTS)
- mcr &= ~MXU1_MCR_RTS;
- if (clear & TIOCM_DTR)
- mcr &= ~MXU1_MCR_DTR;
- if (clear & TIOCM_LOOP)
- mcr &= ~MXU1_MCR_LOOP;
-
- err = mxu1_set_mcr(port, mcr);
- if (!err)
- mxport->mcr = mcr;
-
- mutex_unlock(&mxport->mutex);
-
- return err;
-}
-
-static void mxu1_break(struct tty_struct *tty, int break_state)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
-
- if (break_state == -1)
- mxport->send_break = true;
- else
- mxport->send_break = false;
-
- mxu1_set_termios(tty, port, NULL);
-}
-
-static int mxu1_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- struct usb_serial *serial = port->serial;
- int status;
- u16 open_settings;
-
- open_settings = (MXU1_PIPE_MODE_CONTINUOUS |
- MXU1_PIPE_TIMEOUT_ENABLE |
- (MXU1_TRANSFER_TIMEOUT << 2));
-
- mxport->msr = 0;
-
- status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
- if (status) {
- dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
- status);
- return status;
- }
-
- if (tty)
- mxu1_set_termios(tty, port, NULL);
-
- status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
- open_settings, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot send open command: %d\n", status);
- goto unlink_int_urb;
- }
-
- status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
- 0, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot send start command: %d\n", status);
- goto unlink_int_urb;
- }
-
- status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
- MXU1_PURGE_INPUT, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot clear input buffers: %d\n",
- status);
-
- goto unlink_int_urb;
- }
-
- status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
- MXU1_PURGE_OUTPUT, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot clear output buffers: %d\n",
- status);
-
- goto unlink_int_urb;
- }
-
- /*
- * reset the data toggle on the bulk endpoints to work around bug in
- * host controllers where things get out of sync some times
- */
- usb_clear_halt(serial->dev, port->write_urb->pipe);
- usb_clear_halt(serial->dev, port->read_urb->pipe);
-
- if (tty)
- mxu1_set_termios(tty, port, NULL);
-
- status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
- open_settings, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot send open command: %d\n", status);
- goto unlink_int_urb;
- }
-
- status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
- 0, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "cannot send start command: %d\n", status);
- goto unlink_int_urb;
- }
-
- status = usb_serial_generic_open(tty, port);
- if (status)
- goto unlink_int_urb;
-
- return 0;
-
-unlink_int_urb:
- usb_kill_urb(port->interrupt_in_urb);
-
- return status;
-}
-
-static void mxu1_close(struct usb_serial_port *port)
-{
- int status;
-
- usb_serial_generic_close(port);
- usb_kill_urb(port->interrupt_in_urb);
-
- status = mxu1_send_ctrl_urb(port->serial, MXU1_CLOSE_PORT,
- 0, MXU1_UART1_PORT);
- if (status) {
- dev_err(&port->dev, "failed to send close port command: %d\n",
- status);
- }
-}
-
-static void mxu1_handle_new_msr(struct usb_serial_port *port, u8 msr)
-{
- struct mxu1_port *mxport = usb_get_serial_port_data(port);
- struct async_icount *icount;
- unsigned long flags;
-
- dev_dbg(&port->dev, "%s - msr 0x%02X\n", __func__, msr);
-
- spin_lock_irqsave(&mxport->spinlock, flags);
- mxport->msr = msr & MXU1_MSR_MASK;
- spin_unlock_irqrestore(&mxport->spinlock, flags);
-
- if (msr & MXU1_MSR_DELTA_MASK) {
- icount = &port->icount;
- if (msr & MXU1_MSR_DELTA_CTS)
- icount->cts++;
- if (msr & MXU1_MSR_DELTA_DSR)
- icount->dsr++;
- if (msr & MXU1_MSR_DELTA_CD)
- icount->dcd++;
- if (msr & MXU1_MSR_DELTA_RI)
- icount->rng++;
-
- wake_up_interruptible(&port->port.delta_msr_wait);
- }
-}
-
-static void mxu1_interrupt_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- unsigned char *data = urb->transfer_buffer;
- int length = urb->actual_length;
- int function;
- int status;
- u8 msr;
-
- switch (urb->status) {
- case 0:
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- dev_dbg(&port->dev, "%s - urb shutting down: %d\n",
- __func__, urb->status);
- return;
- default:
- dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
- __func__, urb->status);
- goto exit;
- }
-
- if (length != 2) {
- dev_dbg(&port->dev, "%s - bad packet size: %d\n",
- __func__, length);
- goto exit;
- }
-
- if (data[0] == MXU1_CODE_HARDWARE_ERROR) {
- dev_err(&port->dev, "hardware error: %d\n", data[1]);
- goto exit;
- }
-
- function = mxu1_get_func_from_code(data[0]);
-
- dev_dbg(&port->dev, "%s - function %d, data 0x%02X\n",
- __func__, function, data[1]);
-
- switch (function) {
- case MXU1_CODE_DATA_ERROR:
- dev_dbg(&port->dev, "%s - DATA ERROR, data 0x%02X\n",
- __func__, data[1]);
- break;
-
- case MXU1_CODE_MODEM_STATUS:
- msr = data[1];
- mxu1_handle_new_msr(port, msr);
- break;
-
- default:
- dev_err(&port->dev, "unknown interrupt code: 0x%02X\n",
- data[1]);
- break;
- }
-
-exit:
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev, "resubmit interrupt urb failed: %d\n",
- status);
- }
-}
-
-static struct usb_serial_driver mxu11x0_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "mxu11x0",
- },
- .description = "MOXA UPort 11x0",
- .id_table = mxu1_idtable,
- .num_ports = 1,
- .port_probe = mxu1_port_probe,
- .port_remove = mxu1_port_remove,
- .attach = mxu1_startup,
- .release = mxu1_release,
- .open = mxu1_open,
- .close = mxu1_close,
- .ioctl = mxu1_ioctl,
- .set_termios = mxu1_set_termios,
- .tiocmget = mxu1_tiocmget,
- .tiocmset = mxu1_tiocmset,
- .tiocmiwait = usb_serial_generic_tiocmiwait,
- .get_icount = usb_serial_generic_get_icount,
- .break_ctl = mxu1_break,
- .read_int_callback = mxu1_interrupt_callback,
-};
-
-static struct usb_serial_driver *const serial_drivers[] = {
- &mxu11x0_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, mxu1_idtable);
-
-MODULE_AUTHOR("Mathieu Othacehe <[email protected]>");
-MODULE_DESCRIPTION("MOXA UPort 11x0 USB to Serial Hub Driver");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("moxa/moxa-1110.fw");
-MODULE_FIRMWARE("moxa/moxa-1130.fw");
-MODULE_FIRMWARE("moxa/moxa-1131.fw");
-MODULE_FIRMWARE("moxa/moxa-1150.fw");
-MODULE_FIRMWARE("moxa/moxa-1151.fw");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index db86e512e0fc..348e19834b83 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -270,6 +270,7 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_UE910_V2 0x1012
#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
+#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
@@ -315,6 +316,7 @@ static void option_instat_callback(struct urb *urb);
#define TOSHIBA_PRODUCT_G450 0x0d45
#define ALINK_VENDOR_ID 0x1e0e
+#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
#define ALINK_PRODUCT_PH300 0x9100
#define ALINK_PRODUCT_3GU 0x9200
@@ -607,6 +609,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
.reserved = BIT(3) | BIT(4),
};
+static const struct option_blacklist_info simcom_sim7100e_blacklist = {
+ .reserved = BIT(5) | BIT(6),
+};
+
static const struct option_blacklist_info telit_le910_blacklist = {
.sendsetup = BIT(0),
.reserved = BIT(1) | BIT(2),
@@ -1122,9 +1128,13 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1176,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1645,6 +1657,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
+ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
},
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9919d2a9faf2..1bc6089b9008 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
- {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */
- {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */
+ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
+ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
+ {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
+ {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 2760a7ba3f30..8c80a48e3233 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
info.num_regions = VFIO_PCI_NUM_REGIONS;
info.num_irqs = VFIO_PCI_NUM_IRQS;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct pci_dev *pdev = vdev->pdev;
@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
return -EINVAL;
}
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
else
info.flags |= VFIO_IRQ_INFO_NORESIZE;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 418cdd9ba3f4..e65b142d3422 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data,
info.num_regions = vdev->num_regions;
info.num_irqs = vdev->num_irqs;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_region_info info;
@@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data,
info.size = vdev->regions[info.index].size;
info.flags = vdev->regions[info.index].flags;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
@@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data,
info.flags = vdev->irqs[info.index].flags;
info.count = vdev->irqs[info.index].count;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 6f1ea3dddbad..75b24e93cedb 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_IOMMU_MAP_DMA) {
struct vfio_iommu_type1_dma_map map;
@@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
if (ret)
return ret;
- return copy_to_user((void __user *)arg, &unmap, minsz);
+ return copy_to_user((void __user *)arg, &unmap, minsz) ?
+ -EFAULT : 0;
}
return -ENOTTY;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ad2146a9ab2d..236553e81027 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1156,6 +1156,8 @@ int vhost_init_used(struct vhost_virtqueue *vq)
{
__virtio16 last_used_idx;
int r;
+ bool is_le = vq->is_le;
+
if (!vq->private_data) {
vq->is_le = virtio_legacy_is_little_endian();
return 0;
@@ -1165,15 +1167,20 @@ int vhost_init_used(struct vhost_virtqueue *vq)
r = vhost_update_used_flags(vq);
if (r)
- return r;
+ goto err;
vq->signalled_used_valid = false;
- if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx))
- return -EFAULT;
+ if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
+ r = -EFAULT;
+ goto err;
+ }
r = __get_user(last_used_idx, &vq->used->idx);
if (r)
- return r;
+ goto err;
vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
return 0;
+err:
+ vq->is_le = is_le;
+ return r;
}
EXPORT_SYMBOL_GPL(vhost_init_used);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 92f394927f24..6e92917ba77a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
}
if (!err) {
+ ops->cur_blink_jiffies = HZ / 5;
info->fbcon_par = ops;
if (vc)
@@ -956,6 +957,7 @@ static const char *fbcon_startup(void)
ops->currcon = -1;
ops->graphics = 1;
ops->cur_rotate = -1;
+ ops->cur_blink_jiffies = HZ / 5;
info->fbcon_par = ops;
p->con_rotate = initial_rotation;
set_blitting_type(vc, info);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index c0c11fad4611..7760fc1a2218 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -679,7 +679,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
pci_read_config_dword(pci_dev,
notify + offsetof(struct virtio_pci_notify_cap,
- cap.length),
+ cap.offset),
&notify_offset);
/* We don't know how many VQs we'll map, ahead of the time.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0f6d8515ba4f..80825a7e8e48 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1569,6 +1569,17 @@ config WATCHDOG_RIO
machines. The watchdog timeout period is normally one minute but
can be changed with a boot-time parameter.
+config WATCHDOG_SUN4V
+ tristate "Sun4v Watchdog support"
+ select WATCHDOG_CORE
+ depends on SPARC64
+ help
+ Say Y here to support the hypervisor watchdog capability embedded
+ in the SPARC sun4v architecture.
+
+ To compile this driver as a module, choose M here. The module will
+ be called sun4v_wdt.
+
# XTENSA Architecture
# Xen Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index f566753256ab..f6a6a387c6c7 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -179,6 +179,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
obj-$(CONFIG_WATCHDOG_RIO) += riowd.o
obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
+obj-$(CONFIG_WATCHDOG_SUN4V) += sun4v_wdt.o
# XTENSA Architecture
diff --git a/drivers/watchdog/sun4v_wdt.c b/drivers/watchdog/sun4v_wdt.c
new file mode 100644
index 000000000000..1467fe50a76f
--- /dev/null
+++ b/drivers/watchdog/sun4v_wdt.c
@@ -0,0 +1,191 @@
+/*
+ * sun4v watchdog timer
+ * (c) Copyright 2016 Oracle Corporation
+ *
+ * Implement a simple watchdog driver using the built-in sun4v hypervisor
+ * watchdog support. If time expires, the hypervisor stops or bounces
+ * the guest domain.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/watchdog.h>
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#define WDT_TIMEOUT 60
+#define WDT_MAX_TIMEOUT 31536000
+#define WDT_MIN_TIMEOUT 1
+#define WDT_DEFAULT_RESOLUTION_MS 1000 /* 1 second */
+
+static unsigned int timeout;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
+ __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int sun4v_wdt_stop(struct watchdog_device *wdd)
+{
+ sun4v_mach_set_watchdog(0, NULL);
+
+ return 0;
+}
+
+static int sun4v_wdt_ping(struct watchdog_device *wdd)
+{
+ int hverr;
+
+ /*
+ * HV watchdog timer will round up the timeout
+ * passed in to the nearest multiple of the
+ * watchdog resolution in milliseconds.
+ */
+ hverr = sun4v_mach_set_watchdog(wdd->timeout * 1000, NULL);
+ if (hverr == HV_EINVAL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sun4v_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->timeout = timeout;
+
+ return 0;
+}
+
+static const struct watchdog_info sun4v_wdt_ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .identity = "sun4v hypervisor watchdog",
+ .firmware_version = 0,
+};
+
+static struct watchdog_ops sun4v_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sun4v_wdt_ping,
+ .stop = sun4v_wdt_stop,
+ .ping = sun4v_wdt_ping,
+ .set_timeout = sun4v_wdt_set_timeout,
+};
+
+static struct watchdog_device wdd = {
+ .info = &sun4v_wdt_ident,
+ .ops = &sun4v_wdt_ops,
+ .min_timeout = WDT_MIN_TIMEOUT,
+ .max_timeout = WDT_MAX_TIMEOUT,
+ .timeout = WDT_TIMEOUT,
+};
+
+static int __init sun4v_wdt_init(void)
+{
+ struct mdesc_handle *handle;
+ u64 node;
+ const u64 *value;
+ int err = 0;
+ unsigned long major = 1, minor = 1;
+
+ /*
+ * There are 2 properties that can be set from the control
+ * domain for the watchdog.
+ * watchdog-resolution
+ * watchdog-max-timeout
+ *
+ * We can expect a handle to be returned otherwise something
+ * serious is wrong. Correct to return -ENODEV here.
+ */
+
+ handle = mdesc_grab();
+ if (!handle)
+ return -ENODEV;
+
+ node = mdesc_node_by_name(handle, MDESC_NODE_NULL, "platform");
+ err = -ENODEV;
+ if (node == MDESC_NODE_NULL)
+ goto out_release;
+
+ /*
+ * This is a safe way to validate if we are on the right
+ * platform.
+ */
+ if (sun4v_hvapi_register(HV_GRP_CORE, major, &minor))
+ goto out_hv_unreg;
+
+ /* Allow value of watchdog-resolution up to 1s (default) */
+ value = mdesc_get_property(handle, node, "watchdog-resolution", NULL);
+ err = -EINVAL;
+ if (value) {
+ if (*value == 0 ||
+ *value > WDT_DEFAULT_RESOLUTION_MS)
+ goto out_hv_unreg;
+ }
+
+ value = mdesc_get_property(handle, node, "watchdog-max-timeout", NULL);
+ if (value) {
+ /*
+ * If the property value (in ms) is smaller than
+ * min_timeout, return -EINVAL.
+ */
+ if (*value < wdd.min_timeout * 1000)
+ goto out_hv_unreg;
+
+ /*
+ * If the property value is smaller than
+ * default max_timeout then set watchdog max_timeout to
+ * the value of the property in seconds.
+ */
+ if (*value < wdd.max_timeout * 1000)
+ wdd.max_timeout = *value / 1000;
+ }
+
+ watchdog_init_timeout(&wdd, timeout, NULL);
+
+ watchdog_set_nowayout(&wdd, nowayout);
+
+ err = watchdog_register_device(&wdd);
+ if (err)
+ goto out_hv_unreg;
+
+ pr_info("initialized (timeout=%ds, nowayout=%d)\n",
+ wdd.timeout, nowayout);
+
+ mdesc_release(handle);
+
+ return 0;
+
+out_hv_unreg:
+ sun4v_hvapi_unregister(HV_GRP_CORE);
+
+out_release:
+ mdesc_release(handle);
+ return err;
+}
+
+static void __exit sun4v_wdt_exit(void)
+{
+ sun4v_hvapi_unregister(HV_GRP_CORE);
+ watchdog_unregister_device(&wdd);
+}
+
+module_init(sun4v_wdt_init);
+module_exit(sun4v_wdt_exit);
+
+MODULE_AUTHOR("Wim Coekaerts <[email protected]>");
+MODULE_DESCRIPTION("sun4v watchdog driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 73dafdc494aa..fb0221434f81 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
/*
* PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
* to access the BARs where the MSI-X entries reside.
+ * But VF devices are unique in which the PF needs to be checked.
*/
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
return -ENXIO;
@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
struct xen_pcibk_dev_data *dev_data = NULL;
struct xen_pci_op *op = &pdev->op;
int test_intx = 0;
+#ifdef CONFIG_PCI_MSI
+ unsigned int nr = 0;
+#endif
*op = pdev->sh_info->op;
barrier();
@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
op->err = xen_pcibk_disable_msi(pdev, dev, op);
break;
case XEN_PCI_OP_enable_msix:
+ nr = op->value;
op->err = xen_pcibk_enable_msix(pdev, dev, op);
break;
case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
unsigned int i;
- for (i = 0; i < op->value; i++)
+ for (i = 0; i < nr; i++)
pdev->sh_info->op.msix_entries[i].vector =
op->msix_entries[i].vector;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ad4eb1024d1f..c46ee189466f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -849,15 +849,31 @@ static int scsiback_map(struct vscsibk_info *info)
}
/*
+ Check for a translation entry being present
+*/
+static struct v2p_entry *scsiback_chk_translation_entry(
+ struct vscsibk_info *info, struct ids_tuple *v)
+{
+ struct list_head *head = &(info->v2p_entry_lists);
+ struct v2p_entry *entry;
+
+ list_for_each_entry(entry, head, l)
+ if ((entry->v.chn == v->chn) &&
+ (entry->v.tgt == v->tgt) &&
+ (entry->v.lun == v->lun))
+ return entry;
+
+ return NULL;
+}
+
+/*
Add a new translation entry
*/
static int scsiback_add_translation_entry(struct vscsibk_info *info,
char *phy, struct ids_tuple *v)
{
int err = 0;
- struct v2p_entry *entry;
struct v2p_entry *new;
- struct list_head *head = &(info->v2p_entry_lists);
unsigned long flags;
char *lunp;
unsigned long long unpacked_lun;
@@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
spin_lock_irqsave(&info->v2p_lock, flags);
/* Check double assignment to identical virtual ID */
- list_for_each_entry(entry, head, l) {
- if ((entry->v.chn == v->chn) &&
- (entry->v.tgt == v->tgt) &&
- (entry->v.lun == v->lun)) {
- pr_warn("Virtual ID is already used. Assignment was not performed.\n");
- err = -EEXIST;
- goto out;
- }
-
+ if (scsiback_chk_translation_entry(info, v)) {
+ pr_warn("Virtual ID is already used. Assignment was not performed.\n");
+ err = -EEXIST;
+ goto out;
}
/* Create a new translation entry and add to the list */
@@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
new->v = *v;
new->tpg = tpg;
new->lun = unpacked_lun;
- list_add_tail(&new->l, head);
+ list_add_tail(&new->l, &info->v2p_entry_lists);
out:
spin_unlock_irqrestore(&info->v2p_lock, flags);
out_free:
- mutex_lock(&tpg->tv_tpg_mutex);
- tpg->tv_tpg_fe_count--;
- mutex_unlock(&tpg->tv_tpg_mutex);
-
- if (err)
+ if (err) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_fe_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
kfree(new);
+ }
return err;
}
@@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry)
}
/*
- Delete the translation entry specfied
+ Delete the translation entry specified
*/
static int scsiback_del_translation_entry(struct vscsibk_info *info,
struct ids_tuple *v)
{
struct v2p_entry *entry;
- struct list_head *head = &(info->v2p_entry_lists);
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&info->v2p_lock, flags);
/* Find out the translation entry specified */
- list_for_each_entry(entry, head, l) {
- if ((entry->v.chn == v->chn) &&
- (entry->v.tgt == v->tgt) &&
- (entry->v.lun == v->lun)) {
- goto found;
- }
- }
-
- spin_unlock_irqrestore(&info->v2p_lock, flags);
- return 1;
-
-found:
- /* Delete the translation entry specfied */
- __scsiback_del_translation_entry(entry);
+ entry = scsiback_chk_translation_entry(info, v);
+ if (entry)
+ __scsiback_del_translation_entry(entry);
+ else
+ ret = -ENOENT;
spin_unlock_irqrestore(&info->v2p_lock, flags);
- return 0;
+ return ret;
}
static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
char *phy, struct ids_tuple *vir, int try)
{
+ struct v2p_entry *entry;
+ unsigned long flags;
+
+ if (try) {
+ spin_lock_irqsave(&info->v2p_lock, flags);
+ entry = scsiback_chk_translation_entry(info, vir);
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+ if (entry)
+ return;
+ }
if (!scsiback_add_translation_entry(info, phy, vir)) {
if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
"%d", XenbusStateInitialised)) {
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 9433e46518c8..912b64edb42b 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
if (len == 0)
return 0;
+ if (len > XENSTORE_PAYLOAD_MAX)
+ return -EINVAL;
rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
if (rb == NULL)
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 0548c53f41d5..22fc7c802d69 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -511,8 +511,6 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
page->index, to);
BUG_ON(to > PAGE_CACHE_SIZE);
- kmap(page);
- data = page_address(page);
bsize = AFFS_SB(sb)->s_data_blksize;
tmp = page->index << PAGE_CACHE_SHIFT;
bidx = tmp / bsize;
@@ -524,14 +522,15 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
return PTR_ERR(bh);
tmp = min(bsize - boff, to - pos);
BUG_ON(pos + tmp > to || tmp > bsize);
+ data = kmap_atomic(page);
memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
+ kunmap_atomic(data);
affs_brelse(bh);
bidx++;
pos += tmp;
boff = 0;
}
flush_dcache_page(page);
- kunmap(page);
return 0;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 051ea4809c14..7d914c67a9d0 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -653,7 +653,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
- random_variable = (unsigned long) get_random_int();
+ random_variable = get_random_long();
random_variable &= STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 39b3a174a425..826b164a4b5b 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1201,7 +1201,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
- bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
+ if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
+ bdev->bd_inode->i_flags = S_DAX;
+ else
+ bdev->bd_inode->i_flags = 0;
+
if (!partno) {
ret = -ENXIO;
bdev->bd_part = disk_get_part(disk, partno);
@@ -1693,13 +1697,24 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
return try_to_free_buffers(page);
}
+static int blkdev_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ if (dax_mapping(mapping)) {
+ struct block_device *bdev = I_BDEV(mapping->host);
+
+ return dax_writeback_mapping_range(mapping, bdev, wbc);
+ }
+ return generic_writepages(mapping, wbc);
+}
+
static const struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.readpages = blkdev_readpages,
.writepage = blkdev_writepage,
.write_begin = blkdev_write_begin,
.write_end = blkdev_write_end,
- .writepages = generic_writepages,
+ .writepages = blkdev_writepages,
.releasepage = blkdev_releasepage,
.direct_IO = blkdev_direct_IO,
.is_dirty_writeback = buffer_check_dirty_writeback,
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 7cf8509deda7..2c849b08a91b 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -310,8 +310,16 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
err = btrfs_insert_fs_root(root->fs_info, root);
+ /*
+ * The root might have been inserted already, as before we look
+ * for orphan roots, log replay might have happened, which
+ * triggers a transaction commit and qgroup accounting, which
+ * in turn reads and inserts fs roots while doing backref
+ * walking.
+ */
+ if (err == -EEXIST)
+ err = 0;
if (err) {
- BUG_ON(err == -EEXIST);
btrfs_free_fs_root(root);
break;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c22213789090..19adeb0ef82a 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1756,6 +1756,10 @@ int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
u32 pool;
int ret, flags;
+ /* does not support pool namespace yet */
+ if (ci->i_pool_ns_len)
+ return -EIO;
+
if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
NOPOOLPERM))
return 0;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index cdbf8cf3d52c..6fe0ad26a7df 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2753,7 +2753,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
void *inline_data, int inline_len,
struct ceph_buffer *xattr_buf,
struct ceph_mds_session *session,
- struct ceph_cap *cap, int issued)
+ struct ceph_cap *cap, int issued,
+ u32 pool_ns_len)
__releases(ci->i_ceph_lock)
__releases(mdsc->snap_rwsem)
{
@@ -2873,6 +2874,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
/* file layout may have changed */
ci->i_layout = grant->layout;
+ ci->i_pool_ns_len = pool_ns_len;
+
/* size/truncate_seq? */
queue_trunc = ceph_fill_file_size(inode, issued,
le32_to_cpu(grant->truncate_seq),
@@ -3411,6 +3414,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
u32 inline_len = 0;
void *snaptrace;
size_t snaptrace_len;
+ u32 pool_ns_len = 0;
void *p, *end;
dout("handle_caps from mds%d\n", mds);
@@ -3463,6 +3467,21 @@ void ceph_handle_caps(struct ceph_mds_session *session,
p += inline_len;
}
+ if (le16_to_cpu(msg->hdr.version) >= 8) {
+ u64 flush_tid;
+ u32 caller_uid, caller_gid;
+ u32 osd_epoch_barrier;
+ /* version >= 5 */
+ ceph_decode_32_safe(&p, end, osd_epoch_barrier, bad);
+ /* version >= 6 */
+ ceph_decode_64_safe(&p, end, flush_tid, bad);
+ /* version >= 7 */
+ ceph_decode_32_safe(&p, end, caller_uid, bad);
+ ceph_decode_32_safe(&p, end, caller_gid, bad);
+ /* version >= 8 */
+ ceph_decode_32_safe(&p, end, pool_ns_len, bad);
+ }
+
/* lookup ino */
inode = ceph_find_inode(sb, vino);
ci = ceph_inode(inode);
@@ -3518,7 +3537,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
&cap, &issued);
handle_cap_grant(mdsc, inode, h,
inline_version, inline_data, inline_len,
- msg->middle, session, cap, issued);
+ msg->middle, session, cap, issued,
+ pool_ns_len);
if (realm)
ceph_put_snap_realm(mdsc, realm);
goto done_unlocked;
@@ -3542,7 +3562,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
issued |= __ceph_caps_dirty(ci);
handle_cap_grant(mdsc, inode, h,
inline_version, inline_data, inline_len,
- msg->middle, session, cap, issued);
+ msg->middle, session, cap, issued,
+ pool_ns_len);
goto done_unlocked;
case CEPH_CAP_OP_FLUSH_ACK:
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index fb4ba2e4e2a5..5849b88bbed3 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -396,6 +396,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_symlink = NULL;
memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
+ ci->i_pool_ns_len = 0;
ci->i_fragtree = RB_ROOT;
mutex_init(&ci->i_fragtree_mutex);
@@ -756,6 +757,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
ci->i_layout = info->layout;
+ ci->i_pool_ns_len = iinfo->pool_ns_len;
queue_trunc = ceph_fill_file_size(inode, issued,
le32_to_cpu(info->truncate_seq),
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index e7b130a637f9..911d64d865f1 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -100,6 +100,14 @@ static int parse_reply_info_in(void **p, void *end,
} else
info->inline_version = CEPH_INLINE_NONE;
+ if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
+ ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
+ ceph_decode_need(p, end, info->pool_ns_len, bad);
+ *p += info->pool_ns_len;
+ } else {
+ info->pool_ns_len = 0;
+ }
+
return 0;
bad:
return err;
@@ -2298,6 +2306,14 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
CEPH_CAP_PIN);
+ /* deny access to directories with pool_ns layouts */
+ if (req->r_inode && S_ISDIR(req->r_inode->i_mode) &&
+ ceph_inode(req->r_inode)->i_pool_ns_len)
+ return -EIO;
+ if (req->r_locked_dir &&
+ ceph_inode(req->r_locked_dir)->i_pool_ns_len)
+ return -EIO;
+
/* issue */
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, dir);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index ccf11ef0ca87..37712ccffcc6 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -44,6 +44,7 @@ struct ceph_mds_reply_info_in {
u64 inline_version;
u32 inline_len;
char *inline_data;
+ u32 pool_ns_len;
};
/*
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 75b7d125ce66..9c458eb52245 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -287,6 +287,7 @@ struct ceph_inode_info {
struct ceph_dir_layout i_dir_layout;
struct ceph_file_layout i_layout;
+ size_t i_pool_ns_len;
char *i_symlink;
/* for dirs */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c48ca13673e3..2eea40353e60 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1013,7 +1013,6 @@ const struct file_operations cifs_file_strict_ops = {
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.clone_file_range = cifs_clone_file_range,
- .clone_file_range = cifs_clone_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
};
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 68c4547528c4..83aac8ba50b0 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -31,19 +31,15 @@
* so that it will fit. We use hash_64 to convert the value to 31 bits, and
* then add 1, to ensure that we don't end up with a 0 as the value.
*/
-#if BITS_PER_LONG == 64
static inline ino_t
cifs_uniqueid_to_ino_t(u64 fileid)
{
+ if ((sizeof(ino_t)) < (sizeof(u64)))
+ return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
+
return (ino_t)fileid;
+
}
-#else
-static inline ino_t
-cifs_uniqueid_to_ino_t(u64 fileid)
-{
- return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
-}
-#endif
extern struct file_system_type cifs_fs_type;
extern const struct address_space_operations cifs_addr_ops;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 90b4f9f7de66..76fcb50295a3 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1396,11 +1396,10 @@ openRetry:
* current bigbuf.
*/
static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+discard_remaining_data(struct TCP_Server_Info *server)
{
unsigned int rfclen = get_rfc1002_length(server->smallbuf);
int remaining = rfclen + 4 - server->total_read;
- struct cifs_readdata *rdata = mid->callback_data;
while (remaining > 0) {
int length;
@@ -1414,10 +1413,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
remaining -= length;
}
- dequeue_mid(mid, rdata->result);
return 0;
}
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+ int length;
+ struct cifs_readdata *rdata = mid->callback_data;
+
+ length = discard_remaining_data(server);
+ dequeue_mid(mid, rdata->result);
+ return length;
+}
+
int
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
@@ -1446,6 +1455,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return length;
server->total_read += length;
+ if (server->ops->is_status_pending &&
+ server->ops->is_status_pending(buf, server, 0)) {
+ discard_remaining_data(server);
+ return -1;
+ }
+
/* Was the SMB read successful? */
rdata->result = server->ops->map_error(buf, false);
if (rdata->result != 0) {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 10f8d5cf5681..42e1f440eb1e 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1106,21 +1106,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
{
char *data_offset;
struct create_context *cc;
- unsigned int next = 0;
+ unsigned int next;
+ unsigned int remaining;
char *name;
data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
+ remaining = le32_to_cpu(rsp->CreateContextsLength);
cc = (struct create_context *)data_offset;
- do {
- cc = (struct create_context *)((char *)cc + next);
+ while (remaining >= sizeof(struct create_context)) {
name = le16_to_cpu(cc->NameOffset) + (char *)cc;
- if (le16_to_cpu(cc->NameLength) != 4 ||
- strncmp(name, "RqLs", 4)) {
- next = le32_to_cpu(cc->Next);
- continue;
- }
- return server->ops->parse_lease_buf(cc, epoch);
- } while (next != 0);
+ if (le16_to_cpu(cc->NameLength) == 4 &&
+ strncmp(name, "RqLs", 4) == 0)
+ return server->ops->parse_lease_buf(cc, epoch);
+
+ next = le32_to_cpu(cc->Next);
+ if (!next)
+ break;
+ remaining -= next;
+ cc = (struct create_context *)((char *)cc + next);
+ }
return 0;
}
diff --git a/fs/dax.c b/fs/dax.c
index fc2e3141138b..711172450da6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -79,15 +79,14 @@ struct page *read_dax_sector(struct block_device *bdev, sector_t n)
}
/*
- * dax_clear_blocks() is called from within transaction context from XFS,
+ * dax_clear_sectors() is called from within transaction context from XFS,
* and hence this means the stack from this point must follow GFP_NOFS
* semantics for all operations.
*/
-int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
+int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
{
- struct block_device *bdev = inode->i_sb->s_bdev;
struct blk_dax_ctl dax = {
- .sector = block << (inode->i_blkbits - 9),
+ .sector = _sector,
.size = _size,
};
@@ -109,7 +108,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
wmb_pmem();
return 0;
}
-EXPORT_SYMBOL_GPL(dax_clear_blocks);
+EXPORT_SYMBOL_GPL(dax_clear_sectors);
/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
@@ -485,11 +484,10 @@ static int dax_writeback_one(struct block_device *bdev,
* end]. This is required by data integrity operations to ensure file data is
* on persistent storage prior to completion of the operation.
*/
-int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
- loff_t end)
+int dax_writeback_mapping_range(struct address_space *mapping,
+ struct block_device *bdev, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
- struct block_device *bdev = inode->i_sb->s_bdev;
pgoff_t start_index, end_index, pmd_index;
pgoff_t indices[PAGEVEC_SIZE];
struct pagevec pvec;
@@ -500,8 +498,11 @@ int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
return -EIO;
- start_index = start >> PAGE_CACHE_SHIFT;
- end_index = end >> PAGE_CACHE_SHIFT;
+ if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
+ return 0;
+
+ start_index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end_index = wbc->range_end >> PAGE_CACHE_SHIFT;
pmd_index = DAX_PMD_INDEX(start_index);
rcu_read_lock();
diff --git a/fs/dcache.c b/fs/dcache.c
index 92d5140de851..2398f9f94337 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry)
return dentry->d_name.name != dentry->d_iname;
}
-/*
- * Make sure other CPUs see the inode attached before the type is set.
- */
static inline void __d_set_inode_and_type(struct dentry *dentry,
struct inode *inode,
unsigned type_flags)
@@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
unsigned flags;
dentry->d_inode = inode;
- smp_wmb();
flags = READ_ONCE(dentry->d_flags);
flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
flags |= type_flags;
WRITE_ONCE(dentry->d_flags, flags);
}
-/*
- * Ideally, we want to make sure that other CPUs see the flags cleared before
- * the inode is detached, but this is really a violation of RCU principles
- * since the ordering suggests we should always set inode before flags.
- *
- * We should instead replace or discard the entire dentry - but that sucks
- * performancewise on mass deletion/rename.
- */
static inline void __d_clear_type_and_inode(struct dentry *dentry)
{
unsigned flags = READ_ONCE(dentry->d_flags);
flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
WRITE_ONCE(dentry->d_flags, flags);
- smp_wmb();
dentry->d_inode = NULL;
}
@@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
__releases(dentry->d_inode->i_lock)
{
struct inode *inode = dentry->d_inode;
+
+ raw_write_seqcount_begin(&dentry->d_seq);
__d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
- dentry_rcuwalk_invalidate(dentry);
+ raw_write_seqcount_end(&dentry->d_seq);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
@@ -1758,8 +1747,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
spin_lock(&dentry->d_lock);
if (inode)
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
+ raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
- dentry_rcuwalk_invalidate(dentry);
+ raw_write_seqcount_end(&dentry->d_seq);
spin_unlock(&dentry->d_lock);
fsnotify_d_instantiate(dentry, inode);
}
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 2c88d683cd91..c1400b109805 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -80,23 +80,6 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
-static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct inode *inode = file_inode(vma->vm_file);
- struct ext2_inode_info *ei = EXT2_I(inode);
- int ret;
-
- sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
- down_read(&ei->dax_sem);
-
- ret = __dax_mkwrite(vma, vmf, ext2_get_block, NULL);
-
- up_read(&ei->dax_sem);
- sb_end_pagefault(inode->i_sb);
- return ret;
-}
-
static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
@@ -124,7 +107,7 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
static const struct vm_operations_struct ext2_dax_vm_ops = {
.fault = ext2_dax_fault,
.pmd_fault = ext2_dax_pmd_fault,
- .page_mkwrite = ext2_dax_mkwrite,
+ .page_mkwrite = ext2_dax_fault,
.pfn_mkwrite = ext2_dax_pfn_mkwrite,
};
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 338eefda70c6..6bd58e6ff038 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -737,8 +737,10 @@ static int ext2_get_blocks(struct inode *inode,
* so that it's not found by another thread before it's
* initialised
*/
- err = dax_clear_blocks(inode, le32_to_cpu(chain[depth-1].key),
- 1 << inode->i_blkbits);
+ err = dax_clear_sectors(inode->i_sb->s_bdev,
+ le32_to_cpu(chain[depth-1].key) <<
+ (inode->i_blkbits - 9),
+ 1 << inode->i_blkbits);
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
@@ -874,6 +876,14 @@ ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
static int
ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
+#ifdef CONFIG_FS_DAX
+ if (dax_mapping(mapping)) {
+ return dax_writeback_mapping_range(mapping,
+ mapping->host->i_sb->s_bdev,
+ wbc);
+ }
+#endif
+
return mpage_writepages(mapping, wbc, ext2_get_block);
}
@@ -1296,7 +1306,7 @@ void ext2_set_inode_flags(struct inode *inode)
inode->i_flags |= S_NOATIME;
if (flags & EXT2_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
- if (test_opt(inode->i_sb, DAX))
+ if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
inode->i_flags |= S_DAX;
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 474f1a4d2ca8..4cd318f31cbe 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -262,23 +262,8 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
return result;
}
-static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- int err;
- struct inode *inode = file_inode(vma->vm_file);
-
- sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
- down_read(&EXT4_I(inode)->i_mmap_sem);
- err = __dax_mkwrite(vma, vmf, ext4_dax_mmap_get_block, NULL);
- up_read(&EXT4_I(inode)->i_mmap_sem);
- sb_end_pagefault(inode->i_sb);
-
- return err;
-}
-
/*
- * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
+ * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
* handler we check for races agaist truncate. Note that since we cycle through
* i_mmap_sem, we are sure that also any hole punching that began before we
* were called is finished by now and so if it included part of the file we
@@ -311,7 +296,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
static const struct vm_operations_struct ext4_dax_vm_ops = {
.fault = ext4_dax_fault,
.pmd_fault = ext4_dax_pmd_fault,
- .page_mkwrite = ext4_dax_mkwrite,
+ .page_mkwrite = ext4_dax_fault,
.pfn_mkwrite = ext4_dax_pfn_mkwrite,
};
#else
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9cc57c3b4661..aee960b1af34 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2478,6 +2478,10 @@ static int ext4_writepages(struct address_space *mapping,
trace_ext4_writepages(inode, wbc);
+ if (dax_mapping(mapping))
+ return dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
+ wbc);
+
/*
* No pages to write? This is mainly a kludge to avoid starting
* a transaction for special inodes like journal inode on last iput()
@@ -4155,7 +4159,7 @@ void ext4_set_inode_flags(struct inode *inode)
new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
- if (test_opt(inode->i_sb, DAX))
+ if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
new_fl |= S_DAX;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index a99b010e2194..eae5917c534e 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -583,6 +583,11 @@ group_extend_out:
"Online defrag not supported with bigalloc");
err = -EOPNOTSUPP;
goto mext_out;
+ } else if (IS_DAX(inode)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported with DAX");
+ err = -EOPNOTSUPP;
+ goto mext_out;
}
err = mnt_want_write_file(filp);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1f76d8950a57..5c46ed9f3e14 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -223,6 +223,9 @@ static void wb_wait_for_completion(struct backing_dev_info *bdi,
#define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
/* one round can affect upto 5 slots */
+static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
+static struct workqueue_struct *isw_wq;
+
void __inode_attach_wb(struct inode *inode, struct page *page)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -317,7 +320,6 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
struct inode_switch_wbs_context *isw =
container_of(work, struct inode_switch_wbs_context, work);
struct inode *inode = isw->inode;
- struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
struct bdi_writeback *old_wb = inode->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
@@ -424,8 +426,9 @@ skip_switch:
wb_put(new_wb);
iput(inode);
- deactivate_super(sb);
kfree(isw);
+
+ atomic_dec(&isw_nr_in_flight);
}
static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
@@ -435,7 +438,7 @@ static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
/* needs to grab bh-unsafe locks, bounce to work item */
INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
- schedule_work(&isw->work);
+ queue_work(isw_wq, &isw->work);
}
/**
@@ -471,20 +474,20 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
-
- if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
- inode_to_wb(inode) == isw->new_wb)
- goto out_unlock;
-
- if (!atomic_inc_not_zero(&inode->i_sb->s_active))
- goto out_unlock;
-
+ if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
+ inode->i_state & (I_WB_SWITCH | I_FREEING) ||
+ inode_to_wb(inode) == isw->new_wb) {
+ spin_unlock(&inode->i_lock);
+ goto out_free;
+ }
inode->i_state |= I_WB_SWITCH;
spin_unlock(&inode->i_lock);
ihold(inode);
isw->inode = inode;
+ atomic_inc(&isw_nr_in_flight);
+
/*
* In addition to synchronizing among switchers, I_WB_SWITCH tells
* the RCU protected stat update paths to grab the mapping's
@@ -494,8 +497,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
return;
-out_unlock:
- spin_unlock(&inode->i_lock);
out_free:
if (isw->new_wb)
wb_put(isw->new_wb);
@@ -847,6 +848,33 @@ restart:
wb_put(last_wb);
}
+/**
+ * cgroup_writeback_umount - flush inode wb switches for umount
+ *
+ * This function is called when a super_block is about to be destroyed and
+ * flushes in-flight inode wb switches. An inode wb switch goes through
+ * RCU and then workqueue, so the two need to be flushed in order to ensure
+ * that all previously scheduled switches are finished. As wb switches are
+ * rare occurrences and synchronize_rcu() can take a while, perform
+ * flushing iff wb switches are in flight.
+ */
+void cgroup_writeback_umount(void)
+{
+ if (atomic_read(&isw_nr_in_flight)) {
+ synchronize_rcu();
+ flush_workqueue(isw_wq);
+ }
+}
+
+static int __init cgroup_writeback_init(void)
+{
+ isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
+ if (!isw_wq)
+ return -ENOMEM;
+ return 0;
+}
+fs_initcall(cgroup_writeback_init);
+
#else /* CONFIG_CGROUP_WRITEBACK */
static struct bdi_writeback *
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 506765afa1a3..bb8d67e2740a 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -376,12 +376,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
struct inode *inode = d_inode(dentry);
dnode_secno dno;
int r;
- int rep = 0;
int err;
hpfs_lock(dir->i_sb);
hpfs_adjust_length(name, &len);
-again:
+
err = -ENOENT;
de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
if (!de)
@@ -401,33 +400,9 @@ again:
hpfs_error(dir->i_sb, "there was error when removing dirent");
err = -EFSERROR;
break;
- case 2: /* no space for deleting, try to truncate file */
-
+ case 2: /* no space for deleting */
err = -ENOSPC;
- if (rep++)
- break;
-
- dentry_unhash(dentry);
- if (!d_unhashed(dentry)) {
- hpfs_unlock(dir->i_sb);
- return -ENOSPC;
- }
- if (generic_permission(inode, MAY_WRITE) ||
- !S_ISREG(inode->i_mode) ||
- get_write_access(inode)) {
- d_rehash(dentry);
- } else {
- struct iattr newattrs;
- /*pr_info("truncating file before delete.\n");*/
- newattrs.ia_size = 0;
- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
- err = notify_change(dentry, &newattrs, NULL);
- put_write_access(inode);
- if (!err)
- goto again;
- }
- hpfs_unlock(dir->i_sb);
- return -ENOSPC;
+ break;
default:
drop_nlink(inode);
err = 0;
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
index 3ea36554107f..8918ac905a3b 100644
--- a/fs/jffs2/README.Locking
+++ b/fs/jffs2/README.Locking
@@ -2,10 +2,6 @@
JFFS2 LOCKING DOCUMENTATION
---------------------------
-At least theoretically, JFFS2 does not require the Big Kernel Lock
-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
-code. It has its own locking, as described below.
-
This document attempts to describe the existing locking rules for
JFFS2. It is not expected to remain perfectly up to date, but ought to
be fairly close.
@@ -69,6 +65,7 @@ Ordering constraints:
any f->sem held.
2. Never attempt to lock two file mutexes in one thread.
No ordering rules have been made for doing so.
+ 3. Never lock a page cache page with f->sem held.
erase_completion_lock spinlock
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 0ae91ad6df2d..b288c8ae1236 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -50,7 +50,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
- struct jffs2_inode_cache *ic)
+ struct jffs2_inode_cache *ic,
+ int *dir_hardlinks)
{
struct jffs2_full_dirent *fd;
@@ -69,19 +70,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
fd->name, fd->ino, ic->ino);
jffs2_mark_node_obsolete(c, fd->raw);
+ /* Clear the ic/raw union so it doesn't cause problems later. */
+ fd->ic = NULL;
continue;
}
+ /* From this point, fd->raw is no longer used so we can set fd->ic */
+ fd->ic = child_ic;
+ child_ic->pino_nlink++;
+ /* If we appear (at this stage) to have hard-linked directories,
+ * set a flag to trigger a scan later */
if (fd->type == DT_DIR) {
- if (child_ic->pino_nlink) {
- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
- fd->name, fd->ino, ic->ino);
- /* TODO: What do we do about it? */
- } else {
- child_ic->pino_nlink = ic->ino;
- }
- } else
- child_ic->pino_nlink++;
+ child_ic->flags |= INO_FLAGS_IS_DIR;
+ if (child_ic->pino_nlink > 1)
+ *dir_hardlinks = 1;
+ }
dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
/* Can't free scan_dents so far. We might need them in pass 2 */
@@ -95,8 +98,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
*/
static int jffs2_build_filesystem(struct jffs2_sb_info *c)
{
- int ret;
- int i;
+ int ret, i, dir_hardlinks = 0;
struct jffs2_inode_cache *ic;
struct jffs2_full_dirent *fd;
struct jffs2_full_dirent *dead_fds = NULL;
@@ -120,7 +122,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
/* Now scan the directory tree, increasing nlink according to every dirent found. */
for_each_inode(i, c, ic) {
if (ic->scan_dents) {
- jffs2_build_inode_pass1(c, ic);
+ jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
cond_resched();
}
}
@@ -156,6 +158,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
}
dbg_fsbuild("pass 2a complete\n");
+
+ if (dir_hardlinks) {
+ /* If we detected directory hardlinks earlier, *hopefully*
+ * they are gone now because some of the links were from
+ * dead directories which still had some old dirents lying
+ * around and not yet garbage-collected, but which have
+ * been discarded above. So clear the pino_nlink field
+ * in each directory, so that the final scan below can
+ * print appropriate warnings. */
+ for_each_inode(i, c, ic) {
+ if (ic->flags & INO_FLAGS_IS_DIR)
+ ic->pino_nlink = 0;
+ }
+ }
dbg_fsbuild("freeing temporary data structures\n");
/* Finally, we can scan again and free the dirent structs */
@@ -163,6 +179,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
while(ic->scan_dents) {
fd = ic->scan_dents;
ic->scan_dents = fd->next;
+ /* We do use the pino_nlink field to count nlink of
+ * directories during fs build, so set it to the
+ * parent ino# now. Now that there's hopefully only
+ * one. */
+ if (fd->type == DT_DIR) {
+ if (!fd->ic) {
+ /* We'll have complained about it and marked the coresponding
+ raw node obsolete already. Just skip it. */
+ continue;
+ }
+
+ /* We *have* to have set this in jffs2_build_inode_pass1() */
+ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
+
+ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
+ * is set. Otherwise, we know this should never trigger anyway, so
+ * we don't do the check. And ic->pino_nlink still contains the nlink
+ * value (which is 1). */
+ if (dir_hardlinks && fd->ic->pino_nlink) {
+ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
+ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
+ /* Should we unlink it from its previous parent? */
+ }
+
+ /* For directories, ic->pino_nlink holds that parent inode # */
+ fd->ic->pino_nlink = ic->ino;
+ }
jffs2_free_full_dirent(fd);
}
ic->scan_dents = NULL;
@@ -241,11 +284,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
/* Reduce nlink of the child. If it's now zero, stick it on the
dead_fds list to be cleaned up later. Else just free the fd */
-
- if (fd->type == DT_DIR)
- child_ic->pino_nlink = 0;
- else
- child_ic->pino_nlink--;
+ child_ic->pino_nlink--;
if (!child_ic->pino_nlink) {
dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index c5ac5944bc1b..cad86bac3453 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
struct page *pg;
struct inode *inode = mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- struct jffs2_raw_inode ri;
- uint32_t alloc_len = 0;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
uint32_t pageofs = index << PAGE_CACHE_SHIFT;
int ret = 0;
- jffs2_dbg(1, "%s()\n", __func__);
-
- if (pageofs > inode->i_size) {
- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
- if (ret)
- return ret;
- }
-
- mutex_lock(&f->sem);
pg = grab_cache_page_write_begin(mapping, index, flags);
- if (!pg) {
- if (alloc_len)
- jffs2_complete_reservation(c);
- mutex_unlock(&f->sem);
+ if (!pg)
return -ENOMEM;
- }
*pagep = pg;
- if (alloc_len) {
+ jffs2_dbg(1, "%s()\n", __func__);
+
+ if (pageofs > inode->i_size) {
/* Make new hole frag from old EOF to new page */
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
+ uint32_t alloc_len;
jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
(unsigned int)inode->i_size, pageofs);
+ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+ if (ret)
+ goto out_page;
+
+ mutex_lock(&f->sem);
memset(&ri, 0, sizeof(ri));
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
jffs2_complete_reservation(c);
+ mutex_unlock(&f->sem);
goto out_page;
}
ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
jffs2_complete_reservation(c);
+ mutex_unlock(&f->sem);
goto out_page;
}
jffs2_complete_reservation(c);
inode->i_size = pageofs;
+ mutex_unlock(&f->sem);
}
/*
@@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
* case of a short-copy.
*/
if (!PageUptodate(pg)) {
+ mutex_lock(&f->sem);
ret = jffs2_do_readpage_nolock(inode, pg);
+ mutex_unlock(&f->sem);
if (ret)
goto out_page;
}
- mutex_unlock(&f->sem);
jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
return ret;
out_page:
unlock_page(pg);
page_cache_release(pg);
- mutex_unlock(&f->sem);
return ret;
}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 5a2dec2b064c..95d5880a63ee 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
BUG_ON(start > orig_start);
}
- /* First, use readpage() to read the appropriate page into the page cache */
- /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
- * triggered garbage collection in the first place?
- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
- * page OK. We'll actually write it out again in commit_write, which is a little
- * suboptimal, but at least we're correct.
- */
+ /* The rules state that we must obtain the page lock *before* f->sem, so
+ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
+ * actually going to *change* so we're safe; we only allow reading.
+ *
+ * It is important to note that jffs2_write_begin() will ensure that its
+ * page is marked Uptodate before allocating space. That means that if we
+ * end up here trying to GC the *same* page that jffs2_write_begin() is
+ * trying to write out, read_cache_page() will not deadlock. */
+ mutex_unlock(&f->sem);
pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
+ mutex_lock(&f->sem);
if (IS_ERR(pg_ptr)) {
pr_warn("read_cache_page() returned error: %ld\n",
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index fa35ff79ab35..0637271f3770 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -194,6 +194,7 @@ struct jffs2_inode_cache {
#define INO_STATE_CLEARING 6 /* In clear_inode() */
#define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
+#define INO_FLAGS_IS_DIR 0x02 /* is a directory */
#define RAWNODE_CLASS_INODE_CACHE 0
#define RAWNODE_CLASS_XATTR_DATUM 1
@@ -249,7 +250,10 @@ struct jffs2_readinode_info
struct jffs2_full_dirent
{
- struct jffs2_raw_node_ref *raw;
+ union {
+ struct jffs2_raw_node_ref *raw;
+ struct jffs2_inode_cache *ic; /* Just during part of build */
+ };
struct jffs2_full_dirent *next;
uint32_t version;
uint32_t ino; /* == zero for unlink */
diff --git a/fs/namei.c b/fs/namei.c
index f624d132e01e..9c590e0f66e9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1712,6 +1712,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
return 0;
if (!follow)
return 0;
+ /* make sure that d_is_symlink above matches inode */
+ if (nd->flags & LOOKUP_RCU) {
+ if (read_seqcount_retry(&link->dentry->d_seq, seq))
+ return -ECHILD;
+ }
return pick_link(nd, link, inode, seq);
}
@@ -1743,11 +1748,11 @@ static int walk_component(struct nameidata *nd, int flags)
if (err < 0)
return err;
- inode = d_backing_inode(path.dentry);
seq = 0; /* we are already out of RCU mode */
err = -ENOENT;
if (d_is_negative(path.dentry))
goto out_path_put;
+ inode = d_backing_inode(path.dentry);
}
if (flags & WALK_PUT)
@@ -3192,12 +3197,12 @@ retry_lookup:
return error;
BUG_ON(nd->flags & LOOKUP_RCU);
- inode = d_backing_inode(path.dentry);
seq = 0; /* out of RCU mode, so the value doesn't matter */
if (unlikely(d_is_negative(path.dentry))) {
path_to_nameidata(&path, nd);
return -ENOENT;
}
+ inode = d_backing_inode(path.dentry);
finish_lookup:
if (nd->depth)
put_link(nd);
@@ -3206,11 +3211,6 @@ finish_lookup:
if (unlikely(error))
return error;
- if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
- path_to_nameidata(&path, nd);
- return -ELOOP;
- }
-
if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
path_to_nameidata(&path, nd);
} else {
@@ -3229,6 +3229,10 @@ finish_open:
return error;
}
audit_inode(nd->name, nd->path.dentry, 0);
+ if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
+ error = -ELOOP;
+ goto out;
+ }
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
@@ -3273,6 +3277,10 @@ opened:
goto exit_fput;
}
out:
+ if (unlikely(error > 0)) {
+ WARN_ON(1);
+ error = -EINVAL;
+ }
if (got_write)
mnt_drop_write(nd->path.mnt);
path_put(&save_parent);
diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
index c59a59c37f3d..35ab51c04814 100644
--- a/fs/nfs/blocklayout/extent_tree.c
+++ b/fs/nfs/blocklayout/extent_tree.c
@@ -476,6 +476,7 @@ static void ext_tree_free_commitdata(struct nfs4_layoutcommit_args *arg,
for (i = 0; i < nr_pages; i++)
put_page(arg->layoutupdate_pages[i]);
+ vfree(arg->start_p);
kfree(arg->layoutupdate_pages);
} else {
put_page(arg->layoutupdate_page);
@@ -559,10 +560,15 @@ retry:
if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) {
void *p = start_p, *end = p + arg->layoutupdate_len;
+ struct page *page = NULL;
int i = 0;
- for ( ; p < end; p += PAGE_SIZE)
- arg->layoutupdate_pages[i++] = vmalloc_to_page(p);
+ arg->start_p = start_p;
+ for ( ; p < end; p += PAGE_SIZE) {
+ page = vmalloc_to_page(p);
+ arg->layoutupdate_pages[i++] = page;
+ get_page(page);
+ }
}
dprintk("%s found %zu ranges\n", __func__, count);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index bd25dc7077f7..dff83460e5a6 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -16,29 +16,8 @@
#define NFSDBG_FACILITY NFSDBG_PROC
-static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file,
- fmode_t fmode)
-{
- struct nfs_open_context *open;
- struct nfs_lock_context *lock;
- int ret;
-
- open = get_nfs_open_context(nfs_file_open_context(file));
- lock = nfs_get_lock_context(open);
- if (IS_ERR(lock)) {
- put_nfs_open_context(open);
- return PTR_ERR(lock);
- }
-
- ret = nfs4_set_rw_stateid(dst, open, lock, fmode);
-
- nfs_put_lock_context(lock);
- put_nfs_open_context(open);
- return ret;
-}
-
static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
- loff_t offset, loff_t len)
+ struct nfs_lock_context *lock, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(filep);
struct nfs_server *server = NFS_SERVER(inode);
@@ -56,7 +35,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
msg->rpc_argp = &args;
msg->rpc_resp = &res;
- status = nfs42_set_rw_stateid(&args.falloc_stateid, filep, FMODE_WRITE);
+ status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
+ lock, FMODE_WRITE);
if (status)
return status;
@@ -78,15 +58,26 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
{
struct nfs_server *server = NFS_SERVER(file_inode(filep));
struct nfs4_exception exception = { };
+ struct nfs_lock_context *lock;
int err;
+ lock = nfs_get_lock_context(nfs_file_open_context(filep));
+ if (IS_ERR(lock))
+ return PTR_ERR(lock);
+
+ exception.inode = file_inode(filep);
+ exception.state = lock->open_context->state;
+
do {
- err = _nfs42_proc_fallocate(msg, filep, offset, len);
- if (err == -ENOTSUPP)
- return -EOPNOTSUPP;
+ err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
+ if (err == -ENOTSUPP) {
+ err = -EOPNOTSUPP;
+ break;
+ }
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
+ nfs_put_lock_context(lock);
return err;
}
@@ -135,7 +126,8 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
return err;
}
-static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+static loff_t _nfs42_proc_llseek(struct file *filep,
+ struct nfs_lock_context *lock, loff_t offset, int whence)
{
struct inode *inode = file_inode(filep);
struct nfs42_seek_args args = {
@@ -156,7 +148,8 @@ static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
if (!nfs_server_capable(inode, NFS_CAP_SEEK))
return -ENOTSUPP;
- status = nfs42_set_rw_stateid(&args.sa_stateid, filep, FMODE_READ);
+ status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
+ lock, FMODE_READ);
if (status)
return status;
@@ -175,17 +168,28 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
{
struct nfs_server *server = NFS_SERVER(file_inode(filep));
struct nfs4_exception exception = { };
+ struct nfs_lock_context *lock;
loff_t err;
+ lock = nfs_get_lock_context(nfs_file_open_context(filep));
+ if (IS_ERR(lock))
+ return PTR_ERR(lock);
+
+ exception.inode = file_inode(filep);
+ exception.state = lock->open_context->state;
+
do {
- err = _nfs42_proc_llseek(filep, offset, whence);
+ err = _nfs42_proc_llseek(filep, lock, offset, whence);
if (err >= 0)
break;
- if (err == -ENOTSUPP)
- return -EOPNOTSUPP;
+ if (err == -ENOTSUPP) {
+ err = -EOPNOTSUPP;
+ break;
+ }
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
+ nfs_put_lock_context(lock);
return err;
}
@@ -298,8 +302,9 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
}
static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
- struct file *dst_f, loff_t src_offset,
- loff_t dst_offset, loff_t count)
+ struct file *dst_f, struct nfs_lock_context *src_lock,
+ struct nfs_lock_context *dst_lock, loff_t src_offset,
+ loff_t dst_offset, loff_t count)
{
struct inode *src_inode = file_inode(src_f);
struct inode *dst_inode = file_inode(dst_f);
@@ -320,11 +325,13 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
msg->rpc_argp = &args;
msg->rpc_resp = &res;
- status = nfs42_set_rw_stateid(&args.src_stateid, src_f, FMODE_READ);
+ status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
+ src_lock, FMODE_READ);
if (status)
return status;
- status = nfs42_set_rw_stateid(&args.dst_stateid, dst_f, FMODE_WRITE);
+ status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
+ dst_lock, FMODE_WRITE);
if (status)
return status;
@@ -349,22 +356,48 @@ int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
};
struct inode *inode = file_inode(src_f);
struct nfs_server *server = NFS_SERVER(file_inode(src_f));
- struct nfs4_exception exception = { };
- int err;
+ struct nfs_lock_context *src_lock;
+ struct nfs_lock_context *dst_lock;
+ struct nfs4_exception src_exception = { };
+ struct nfs4_exception dst_exception = { };
+ int err, err2;
if (!nfs_server_capable(inode, NFS_CAP_CLONE))
return -EOPNOTSUPP;
+ src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
+ if (IS_ERR(src_lock))
+ return PTR_ERR(src_lock);
+
+ src_exception.inode = file_inode(src_f);
+ src_exception.state = src_lock->open_context->state;
+
+ dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
+ if (IS_ERR(dst_lock)) {
+ err = PTR_ERR(dst_lock);
+ goto out_put_src_lock;
+ }
+
+ dst_exception.inode = file_inode(dst_f);
+ dst_exception.state = dst_lock->open_context->state;
+
do {
- err = _nfs42_proc_clone(&msg, src_f, dst_f, src_offset,
- dst_offset, count);
+ err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
+ src_offset, dst_offset, count);
if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
- err = nfs4_handle_exception(server, err, &exception);
- } while (exception.retry);
- return err;
+ err2 = nfs4_handle_exception(server, err, &src_exception);
+ err = nfs4_handle_exception(server, err, &dst_exception);
+ if (!err)
+ err = err2;
+ } while (src_exception.retry || dst_exception.retry);
+ nfs_put_lock_context(dst_lock);
+out_put_src_lock:
+ nfs_put_lock_context(src_lock);
+ return err;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4bfc33ad0563..14881594dd07 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2466,9 +2466,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
dentry = d_add_unique(dentry, igrab(state->inode));
if (dentry == NULL) {
dentry = opendata->dentry;
- } else if (dentry != ctx->dentry) {
+ } else {
dput(ctx->dentry);
- ctx->dentry = dget(dentry);
+ ctx->dentry = dentry;
}
nfs_set_verifier(dentry,
nfs_save_change_attribute(d_inode(opendata->dir)));
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 482b6e94bb37..2fa483e6dbe2 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -252,6 +252,27 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
}
}
+/*
+ * Mark a pnfs_layout_hdr and all associated layout segments as invalid
+ *
+ * In order to continue using the pnfs_layout_hdr, a full recovery
+ * is required.
+ * Note that caller must hold inode->i_lock.
+ */
+static int
+pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
+ struct list_head *lseg_list)
+{
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
+
+ set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+ return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range);
+}
+
static int
pnfs_iomode_to_fail_bit(u32 iomode)
{
@@ -554,9 +575,8 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
spin_lock(&nfsi->vfs_inode.i_lock);
lo = nfsi->layout;
if (lo) {
- lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
- pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
pnfs_get_layout_hdr(lo);
+ pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
spin_unlock(&nfsi->vfs_inode.i_lock);
@@ -617,11 +637,6 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
{
struct pnfs_layout_hdr *lo;
struct inode *inode;
- struct pnfs_layout_range range = {
- .iomode = IOMODE_ANY,
- .offset = 0,
- .length = NFS4_MAX_UINT64,
- };
LIST_HEAD(lseg_list);
int ret = 0;
@@ -636,11 +651,11 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
spin_lock(&inode->i_lock);
list_del_init(&lo->plh_bulk_destroy);
- lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
- if (is_bulk_recall)
- set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
- if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
+ if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
+ if (is_bulk_recall)
+ set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
ret = -EAGAIN;
+ }
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&lseg_list);
/* Free all lsegs that are attached to commit buckets */
@@ -1738,8 +1753,19 @@ pnfs_set_plh_return_iomode(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode)
if (lo->plh_return_iomode != 0)
iomode = IOMODE_ANY;
lo->plh_return_iomode = iomode;
+ set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
}
+/**
+ * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
+ * @lo: pointer to layout header
+ * @tmp_list: list header to be used with pnfs_free_lseg_list()
+ * @return_range: describe layout segment ranges to be returned
+ *
+ * This function is mainly intended for use by layoutrecall. It attempts
+ * to free the layout segment immediately, or else to mark it for return
+ * as soon as its reference count drops to zero.
+ */
int
pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
@@ -1762,12 +1788,11 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
lseg, lseg->pls_range.iomode,
lseg->pls_range.offset,
lseg->pls_range.length);
+ if (mark_lseg_invalid(lseg, tmp_list))
+ continue;
+ remaining++;
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
pnfs_set_plh_return_iomode(lo, return_range->iomode);
- if (!mark_lseg_invalid(lseg, tmp_list))
- remaining++;
- set_bit(NFS_LAYOUT_RETURN_REQUESTED,
- &lo->plh_flags);
}
return remaining;
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 794fd1587f34..cda0361e95a4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -956,6 +956,7 @@ clean_orphan:
tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
update_isize, end);
if (tmp_ret < 0) {
+ ocfs2_inode_unlock(inode, 1);
ret = tmp_ret;
mlog_errno(ret);
brelse(di_bh);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index ed95272d57a6..52f6de5d40a9 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -618,7 +618,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
* sole user of this dentry. Too tricky... Just unhash for
* now.
*/
- d_drop(dentry);
+ if (!err)
+ d_drop(dentry);
inode_unlock(dir);
return err;
@@ -903,6 +904,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
if (!overwrite && new_is_dir && !old_opaque && new_opaque)
ovl_remove_opaque(newdentry);
+ /*
+ * Old dentry now lives in different location. Dentries in
+ * lowerstack are stale. We cannot drop them here because
+ * access to them is lockless. This could be only pure upper
+ * or opaque directory - numlower is zero. Or upper non-dir
+ * entry - its pureness is tracked by flag opaque.
+ */
if (old_opaque != new_opaque) {
ovl_dentry_set_opaque(old, new_opaque);
if (!overwrite)
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 49e204560655..a4ff5d0d7db9 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -65,6 +65,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
inode_lock(upperdentry->d_inode);
err = notify_change(upperdentry, attr, NULL);
+ if (!err)
+ ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
inode_unlock(upperdentry->d_inode);
}
ovl_drop_write(dentry);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 8d826bd56b26..619ad4b016d2 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -76,12 +76,14 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
if (oe->__upperdentry) {
type = __OVL_PATH_UPPER;
- if (oe->numlower) {
- if (S_ISDIR(dentry->d_inode->i_mode))
- type |= __OVL_PATH_MERGE;
- } else if (!oe->opaque) {
+ /*
+ * Non-dir dentry can hold lower dentry from previous
+ * location. Its purity depends only on opaque flag.
+ */
+ if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode))
+ type |= __OVL_PATH_MERGE;
+ else if (!oe->opaque)
type |= __OVL_PATH_PURE;
- }
} else {
if (oe->numlower > 1)
type |= __OVL_PATH_MERGE;
@@ -341,6 +343,7 @@ static const struct dentry_operations ovl_dentry_operations = {
static const struct dentry_operations ovl_reval_dentry_operations = {
.d_release = ovl_dentry_release,
+ .d_select_inode = ovl_d_select_inode,
.d_revalidate = ovl_dentry_revalidate,
.d_weak_revalidate = ovl_dentry_weak_revalidate,
};
diff --git a/fs/pnode.c b/fs/pnode.c
index 6367e1e435c6..c524fdddc7fb 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -202,6 +202,11 @@ static struct mount *last_dest, *last_source, *dest_master;
static struct mountpoint *mp;
static struct hlist_head *list;
+static inline bool peers(struct mount *m1, struct mount *m2)
+{
+ return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
+}
+
static int propagate_one(struct mount *m)
{
struct mount *child;
@@ -212,7 +217,7 @@ static int propagate_one(struct mount *m)
/* skip if mountpoint isn't covered by it */
if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
return 0;
- if (m->mnt_group_id == last_dest->mnt_group_id) {
+ if (peers(m, last_dest)) {
type = CL_MAKE_SHARED;
} else {
struct mount *n, *p;
@@ -223,7 +228,7 @@ static int propagate_one(struct mount *m)
last_source = last_source->mnt_master;
last_dest = last_source->mnt_parent;
}
- if (n->mnt_group_id != last_dest->mnt_group_id) {
+ if (!peers(n, last_dest)) {
last_source = last_source->mnt_master;
last_dest = last_source->mnt_parent;
}
diff --git a/fs/read_write.c b/fs/read_write.c
index 324ec271cc4e..dadf24e5c95b 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -17,6 +17,7 @@
#include <linux/splice.h>
#include <linux/compat.h>
#include <linux/mount.h>
+#include <linux/fs.h>
#include "internal.h"
#include <asm/uaccess.h>
@@ -183,7 +184,7 @@ loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence)
switch (whence) {
case SEEK_SET: case SEEK_CUR:
return generic_file_llseek_size(file, offset, whence,
- ~0ULL, 0);
+ OFFSET_MAX, 0);
default:
return -EINVAL;
}
@@ -1532,10 +1533,12 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
if (!(file_in->f_mode & FMODE_READ) ||
!(file_out->f_mode & FMODE_WRITE) ||
- (file_out->f_flags & O_APPEND) ||
- !file_in->f_op->clone_file_range)
+ (file_out->f_flags & O_APPEND))
return -EBADF;
+ if (!file_in->f_op->clone_file_range)
+ return -EOPNOTSUPP;
+
ret = clone_verify_area(file_in, pos_in, len, false);
if (ret)
return ret;
diff --git a/fs/super.c b/fs/super.c
index 1182af8fd5ff..74914b1bae70 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -415,6 +415,7 @@ void generic_shutdown_super(struct super_block *sb)
sb->s_flags &= ~MS_ACTIVE;
fsnotify_unmount_inodes(sb);
+ cgroup_writeback_umount();
evict_inodes(sb);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 50311703135b..66cdb44616d5 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -287,6 +287,12 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
goto out;
/*
+ * We don't do userfault handling for the final child pid update.
+ */
+ if (current->flags & PF_EXITING)
+ goto out;
+
+ /*
* Check that we can return VM_FAULT_RETRY.
*
* NOTE: it should become possible to return VM_FAULT_RETRY
diff --git a/fs/xattr.c b/fs/xattr.c
index 07d0e47f6a7f..4861322e28e8 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -940,7 +940,7 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
bool trusted = capable(CAP_SYS_ADMIN);
struct simple_xattr *xattr;
ssize_t remaining_size = size;
- int err;
+ int err = 0;
#ifdef CONFIG_FS_POSIX_ACL
if (inode->i_acl) {
@@ -965,11 +965,11 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
err = xattr_list_one(&buffer, &remaining_size, xattr->name);
if (err)
- return err;
+ break;
}
spin_unlock(&xattrs->lock);
- return size - remaining_size;
+ return err ? err : size - remaining_size;
}
/*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 379c089fb051..a9ebabfe7587 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -55,7 +55,7 @@ xfs_count_page_state(
} while ((bh = bh->b_this_page) != head);
}
-STATIC struct block_device *
+struct block_device *
xfs_find_bdev_for_inode(
struct inode *inode)
{
@@ -1208,6 +1208,10 @@ xfs_vm_writepages(
struct writeback_control *wbc)
{
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
+ if (dax_mapping(mapping))
+ return dax_writeback_mapping_range(mapping,
+ xfs_find_bdev_for_inode(mapping->host), wbc);
+
return generic_writepages(mapping, wbc);
}
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index f6ffc9ae5ceb..a4343c63fb38 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -62,5 +62,6 @@ int xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
struct buffer_head *map_bh, int create);
extern void xfs_count_page_state(struct page *, int *, int *);
+extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 45ec9e40150c..6c876012b2e5 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -75,7 +75,8 @@ xfs_zero_extent(
ssize_t size = XFS_FSB_TO_B(mp, count_fsb);
if (IS_DAX(VFS_I(ip)))
- return dax_clear_blocks(VFS_I(ip), block, size);
+ return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
+ sector, size);
/*
* let the block layer decide on the fastest method of
diff --git a/include/linux/ata.h b/include/linux/ata.h
index d2992bfa1706..c1a2f345cbe6 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -487,8 +487,8 @@ enum ata_tf_protocols {
};
enum ata_ioctls {
- ATA_IOC_GET_IO32 = 0x309,
- ATA_IOC_SET_IO32 = 0x324,
+ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */
+ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */
};
/* core structures */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5349e6816cbb..cb6888824108 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -310,6 +310,43 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio->bi_flags &= ~(1U << bit);
}
+static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ *bv = bio_iovec(bio);
+}
+
+static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ struct bvec_iter iter = bio->bi_iter;
+ int idx;
+
+ if (!bio_flagged(bio, BIO_CLONED)) {
+ *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
+ return;
+ }
+
+ if (unlikely(!bio_multiple_segments(bio))) {
+ *bv = bio_iovec(bio);
+ return;
+ }
+
+ bio_advance_iter(bio, &iter, iter.bi_size);
+
+ if (!iter.bi_bvec_done)
+ idx = iter.bi_idx - 1;
+ else /* in the middle of bvec */
+ idx = iter.bi_idx;
+
+ *bv = bio->bi_io_vec[idx];
+
+ /*
+ * iter.bi_bvec_done records actual length of the last bvec
+ * if this bio ends in the middle of one io vector
+ */
+ if (iter.bi_bvec_done)
+ bv->bv_len = iter.bi_bvec_done;
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4571ef1a12a9..413c84fbc4ed 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -895,7 +895,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
{
struct request_queue *q = rq->q;
- if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
+ if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
@@ -1372,6 +1372,13 @@ static inline void put_dev_sector(Sector p)
page_cache_release(p.v);
}
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ return offset ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
@@ -1381,18 +1388,22 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
{
if (!queue_virt_boundary(q))
return false;
- return offset ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+ return __bvec_gap_to_prev(q, bprv, offset);
}
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
struct bio *next)
{
- if (!bio_has_data(prev))
- return false;
+ if (bio_has_data(prev) && queue_virt_boundary(q)) {
+ struct bio_vec pb, nb;
+
+ bio_get_last_bvec(prev, &pb);
+ bio_get_first_bvec(next, &nb);
- return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
- next->bi_io_vec[0].bv_offset);
+ return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+ }
+
+ return false;
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index c1ef6f14e7be..15151f3c4120 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -75,6 +75,7 @@
#define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */
// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */
+#define CEPH_FEATURE_FS_FILE_LAYOUT_V2 (1ULL<<58) /* file_layout_t */
/*
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 818e45078929..636dd59ab505 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,7 +7,7 @@
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
get_block_t, dio_iodone_t, int flags);
-int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
@@ -52,6 +52,8 @@ static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
}
-int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
- loff_t end);
+
+struct writeback_control;
+int dax_writeback_mapping_range(struct address_space *mapping,
+ struct block_device *bdev, struct writeback_control *wbc);
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 7781ce110503..c4b5f4b3f8f8 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -409,9 +409,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
*/
static inline unsigned __d_entry_type(const struct dentry *dentry)
{
- unsigned type = READ_ONCE(dentry->d_flags);
- smp_rmb();
- return type & DCACHE_ENTRY_TYPE;
+ return dentry->d_flags & DCACHE_ENTRY_TYPE;
}
static inline bool d_is_miss(const struct dentry *dentry)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 452c0b0d2f32..3b1f6cef9513 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -163,6 +163,14 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
#define IEEE80211_MAX_FRAME_LEN 2352
+/* Maximal size of an A-MSDU */
+#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
+#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
+
+#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
+#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
+#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
+
#define IEEE80211_MAX_SSID_LEN 32
#define IEEE80211_MAX_MESH_ID_LEN 32
@@ -843,6 +851,8 @@ enum ieee80211_vht_opmode_bits {
};
#define WLAN_SA_QUERY_TR_ID_LEN 2
+#define WLAN_MEMBERSHIP_LEN 8
+#define WLAN_USER_POSITION_LEN 16
/**
* struct ieee80211_tpc_report_ie
@@ -991,6 +1001,11 @@ struct ieee80211_mgmt {
} __packed vht_opmode_notif;
struct {
u8 action_code;
+ u8 membership[WLAN_MEMBERSHIP_LEN];
+ u8 position[WLAN_USER_POSITION_LEN];
+ } __packed vht_group_notif;
+ struct {
+ u8 action_code;
u8 dialog_token;
u8 tpc_elem_id;
u8 tpc_elem_length;
@@ -1498,6 +1513,7 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
+#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
@@ -2079,6 +2095,16 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
+/* Defines the maximal number of MSDUs in an A-MSDU. */
+#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB BIT(7)
+#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB BIT(0)
+
+/*
+ * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY
+ * information element
+ */
+#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f31638c6e873..ac1923957236 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -64,7 +64,7 @@
#define round_down(x, y) ((x) & ~__round_mask(x, y))
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_UP_ULL(ll,d) \
({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bec2abbd7ab2..2c4ebef79d0c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -720,7 +720,7 @@ struct ata_device {
union {
u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
- };
+ } ____cacheline_aligned;
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index bed40dff0e86..141ffdd59960 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -26,9 +26,8 @@ enum {
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
- ND_CMD_MAX_ELEM = 4,
+ ND_CMD_MAX_ELEM = 5,
ND_CMD_MAX_ENVELOPE = 16,
- ND_CMD_ARS_STATUS_MAX = SZ_4K,
ND_MAX_MAPPINGS = 32,
/* region flag indicating to direct-map persistent memory by default */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 2e8af001c5da..bd0e7075ea6d 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -33,6 +33,7 @@
#ifndef MLX4_DRIVER_H
#define MLX4_DRIVER_H
+#include <net/devlink.h>
#include <linux/mlx4/device.h>
struct mlx4_dev;
@@ -89,6 +90,8 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
+
static inline u64 mlx4_mac_to_u64(u8 *addr)
{
u64 mac = 0;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a815da92d4eb..bb1a880a5bc5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -54,7 +54,7 @@ enum {
/* one minute for the sake of bringup. Generally, commands must always
* complete and we may need to increase this timeout value
*/
- MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
+ MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -460,8 +460,6 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
- struct io_mapping *bf_mapping;
-
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
@@ -719,7 +717,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
+ bool map_wc);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 03ffe9530365..9d91ce39eb0f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -4259,7 +4259,9 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved_at_20[0x1b];
u8 self_lb_en[0x1];
- u8 reserved_at_3c[0x3];
+ u8 reserved_at_3c[0x1];
+ u8 hash[0x1];
+ u8 reserved_at_3e[0x1];
u8 lro[0x1];
};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e52077ffe5ed..efe7cec111fa 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1093,6 +1093,12 @@ struct tc_to_netdev {
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
* sampling packet.
+ * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
+ * This function is used to specify the headroom that the skb must
+ * consider when allocation skb during packet reception. Setting
+ * appropriate rx headroom value allows avoiding skb head copy on
+ * forward. Setting a negative value reset the rx headroom to the
+ * default value.
*
*/
struct net_device_ops {
@@ -1278,6 +1284,8 @@ struct net_device_ops {
bool proto_down);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
+ void (*ndo_set_rx_headroom)(struct net_device *dev,
+ int needed_headroom);
};
/**
@@ -1315,6 +1323,8 @@ struct net_device_ops {
* @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
* @IFF_TEAM: device is a team device
* @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
+ * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
+ * entity (i.e. the master device for bridged veth)
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1343,6 +1353,7 @@ enum netdev_priv_flags {
IFF_L3MDEV_SLAVE = 1<<23,
IFF_TEAM = 1<<24,
IFF_RXFH_CONFIGURED = 1<<25,
+ IFF_PHONY_HEADROOM = 1<<26,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1937,6 +1948,26 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
+/* returns the headroom that the master device needs to take in account
+ * when forwarding to this dev
+ */
+static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
+}
+
+static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
+{
+ if (dev->netdev_ops->ndo_set_rx_headroom)
+ dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
+}
+
+/* set the device rx headroom to the dev's default */
+static inline void netdev_reset_rx_headroom(struct net_device *dev)
+{
+ netdev_set_rx_headroom(dev, -1);
+}
+
/*
* Net namespace inlines
*/
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 48e0320cd643..67300f8e5f2f 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -550,9 +550,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
static inline loff_t nfs_size_to_loff_t(__u64 size)
{
- if (size > (__u64) OFFSET_MAX - 1)
- return OFFSET_MAX - 1;
- return (loff_t) size;
+ return min_t(u64, size, OFFSET_MAX);
}
static inline ino_t
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 791098a08a87..d320906cf13e 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -275,6 +275,7 @@ struct nfs4_layoutcommit_args {
size_t layoutupdate_len;
struct page *layoutupdate_page;
struct page **layoutupdate_pages;
+ __be32 *start_p;
};
struct nfs4_layoutcommit_res {
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 27df4a6585da..27716254dcc5 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -988,23 +988,6 @@ static inline int pci_is_managed(struct pci_dev *pdev)
return pdev->is_managed;
}
-static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
-{
- pdev->irq = irq;
- pdev->irq_managed = 1;
-}
-
-static inline void pci_reset_managed_irq(struct pci_dev *pdev)
-{
- pdev->irq = 0;
- pdev->irq_managed = 0;
-}
-
-static inline bool pci_has_managed_irq(struct pci_dev *pdev)
-{
- return pdev->irq_managed && pdev->irq > 0;
-}
-
void pci_disable_device(struct pci_dev *dev);
extern unsigned int pcibios_max_latency;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 7da3c25999df..0967a2467457 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -397,6 +397,7 @@ struct pmu {
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
+ PERF_EVENT_STATE_DEAD = -4,
PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1,
@@ -905,7 +906,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
}
}
-extern struct static_key_deferred perf_sched_events;
+extern struct static_key_false perf_sched_events;
static __always_inline bool
perf_sw_migrate_enabled(void)
@@ -924,7 +925,7 @@ static inline void perf_event_task_migrate(struct task_struct *task)
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
- if (static_key_false(&perf_sched_events.key))
+ if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_in(prev, task);
if (perf_sw_migrate_enabled() && task->sched_migrated) {
@@ -941,7 +942,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
{
perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
- if (static_key_false(&perf_sched_events.key))
+ if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_out(prev, next);
}
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 998d8f1c3c91..b50c0492629d 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -49,6 +49,7 @@ struct bq27xxx_reg_cache {
struct bq27xxx_device_info {
struct device *dev;
+ int id;
enum bq27xxx_chip chip;
const char *name;
struct bq27xxx_access_methods bus;
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index e53b0ca49e41..e1d69834a11f 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -39,6 +39,14 @@ struct qed_update_vport_params {
struct qed_update_vport_rss_params rss_params;
};
+struct qed_start_vport_params {
+ bool remove_inner_vlan;
+ bool gro_enable;
+ bool drop_ttl0;
+ u8 vport_id;
+ u16 mtu;
+};
+
struct qed_stop_rxq_params {
u8 rss_id;
u8 rx_queue_id;
@@ -118,9 +126,7 @@ struct qed_eth_ops {
void *cookie);
int (*vport_start)(struct qed_dev *cdev,
- u8 vport_id, u16 mtu,
- u8 drop_ttl0_flg,
- u8 inner_vlan_removal_en_flg);
+ struct qed_start_vport_params *params);
int (*vport_stop)(struct qed_dev *cdev,
u8 vport_id);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 3d43c1d4ecef..1f7599c77cd4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -446,6 +446,12 @@ struct qed_eth_stats {
#define RX_PI 0
#define TX_PI(tc) (RX_PI + 1 + tc)
+struct qed_sb_cnt_info {
+ int sb_cnt;
+ int sb_iov_cnt;
+ int sb_free_blk;
+};
+
static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
{
u32 prod = 0;
diff --git a/include/linux/random.h b/include/linux/random.h
index a75840c1aa71..9c29122037f9 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -34,6 +34,7 @@ extern const struct file_operations random_fops, urandom_fops;
#endif
unsigned int get_random_int(void);
+unsigned long get_random_long(void);
unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
u32 prandom_u32(void);
diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h
deleted file mode 100644
index 20bcb55498cd..000000000000
--- a/include/linux/rfkill-gpio.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2011, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-
-#ifndef __RFKILL_GPIO_H
-#define __RFKILL_GPIO_H
-
-#include <linux/types.h>
-#include <linux/rfkill.h>
-
-/**
- * struct rfkill_gpio_platform_data - platform data for rfkill gpio device.
- * for unused gpio's, the expected value is -1.
- * @name: name for the gpio rf kill instance
- */
-
-struct rfkill_gpio_platform_data {
- char *name;
- enum rfkill_type type;
-};
-
-#endif /* __RFKILL_GPIO_H */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index d9010789b4e8..e6a0031d1b1f 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -104,7 +104,8 @@ int __must_check rfkill_register(struct rfkill *rfkill);
*
* Pause polling -- say transmitter is off for other reasons.
* NOTE: not necessary for suspend/resume -- in that case the
- * core stops polling anyway
+ * core stops polling anyway (but will also correctly handle
+ * the case of polling having been paused before suspend.)
*/
void rfkill_pause_polling(struct rfkill *rfkill);
@@ -212,6 +213,15 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
* @rfkill: rfkill struct to query
*/
bool rfkill_blocked(struct rfkill *rfkill);
+
+/**
+ * rfkill_find_type - Helpper for finding rfkill type by name
+ * @name: the name of the type
+ *
+ * Returns enum rfkill_type that conrresponds the name.
+ */
+enum rfkill_type rfkill_find_type(const char *name);
+
#else /* !RFKILL */
static inline struct rfkill * __must_check
rfkill_alloc(const char *name,
@@ -268,6 +278,12 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
{
return false;
}
+
+static inline enum rfkill_type rfkill_find_type(const char *name)
+{
+ return RFKILL_TYPE_ALL;
+}
+
#endif /* RFKILL || RFKILL_MODULE */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index eab4f8fbed58..15d0df943466 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1161,10 +1161,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
to->l4_hash = from->l4_hash;
};
-static inline void skb_sender_cpu_clear(struct sk_buff *skb)
-{
-}
-
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
@@ -1985,6 +1981,30 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
skb->tail += len;
}
+/**
+ * skb_tailroom_reserve - adjust reserved_tailroom
+ * @skb: buffer to alter
+ * @mtu: maximum amount of headlen permitted
+ * @needed_tailroom: minimum amount of reserved_tailroom
+ *
+ * Set reserved_tailroom so that headlen can be as large as possible but
+ * not larger than mtu and tailroom cannot be smaller than
+ * needed_tailroom.
+ * The required headroom should already have been reserved before using
+ * this function.
+ */
+static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
+ unsigned int needed_tailroom)
+{
+ SKB_LINEAR_ASSERT(skb);
+ if (mtu < skb_tailroom(skb) - needed_tailroom)
+ /* use at most mtu */
+ skb->reserved_tailroom = skb_tailroom(skb) - mtu;
+ else
+ /* use up to all available space */
+ skb->reserved_tailroom = needed_tailroom;
+}
+
#define ENCAP_TYPE_ETHER 0
#define ENCAP_TYPE_IPPROTO 1
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index eead8ab93c0a..4bcf5a61aada 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -90,7 +90,21 @@ struct stmmac_dma_cfg {
int pbl;
int fixed_burst;
int mixed_burst;
- int burst_len;
+ bool aal;
+};
+
+#define AXI_BLEN 7
+struct stmmac_axi {
+ bool axi_lpi_en;
+ bool axi_xit_frm;
+ u32 axi_wr_osr_lmt;
+ u32 axi_rd_osr_lmt;
+ bool axi_kbbe;
+ bool axi_axi_all;
+ u32 axi_blen[AXI_BLEN];
+ bool axi_fb;
+ bool axi_mb;
+ bool axi_rb;
};
struct plat_stmmacenet_data {
@@ -100,6 +114,7 @@ struct plat_stmmacenet_data {
int interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
+ struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
int clk_csr;
int has_gmac;
@@ -122,5 +137,6 @@ struct plat_stmmacenet_data {
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
void *bsp_priv;
+ struct stmmac_axi *axi;
};
#endif
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 429fdfc3baf5..925730bc9fc1 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -568,6 +568,8 @@ enum {
FILTER_DYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
+ FILTER_COMM,
+ FILTER_CPU,
};
extern int trace_event_raw_init(struct trace_event_call *call);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b333c945e571..d0b5ca5d4e08 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -198,6 +198,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
void wbc_detach_inode(struct writeback_control *wbc);
void wbc_account_io(struct writeback_control *wbc, struct page *page,
size_t bytes);
+void cgroup_writeback_umount(void);
/**
* inode_attach_wb - associate an inode with its wb
@@ -301,6 +302,10 @@ static inline void wbc_account_io(struct writeback_control *wbc,
{
}
+static inline void cgroup_writeback_umount(void)
+{
+}
+
#endif /* CONFIG_CGROUP_WRITEBACK */
/*
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index 2f6a3f2233ed..da3a77d25fcb 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -75,6 +75,8 @@
#define LOWPAN_IPHC_MAX_HC_BUF_LEN (sizeof(struct ipv6hdr) + \
LOWPAN_IPHC_MAX_HEADER_LEN + \
LOWPAN_NHC_MAX_HDR_LEN)
+/* SCI/DCI is 4 bit width, so we have maximum 16 entries */
+#define LOWPAN_IPHC_CTX_TABLE_SIZE (1 << 4)
#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
@@ -98,9 +100,39 @@ enum lowpan_lltypes {
LOWPAN_LLTYPE_IEEE802154,
};
+enum lowpan_iphc_ctx_flags {
+ LOWPAN_IPHC_CTX_FLAG_ACTIVE,
+ LOWPAN_IPHC_CTX_FLAG_COMPRESSION,
+};
+
+struct lowpan_iphc_ctx {
+ u8 id;
+ struct in6_addr pfx;
+ u8 plen;
+ unsigned long flags;
+};
+
+struct lowpan_iphc_ctx_table {
+ spinlock_t lock;
+ const struct lowpan_iphc_ctx_ops *ops;
+ struct lowpan_iphc_ctx table[LOWPAN_IPHC_CTX_TABLE_SIZE];
+};
+
+static inline bool lowpan_iphc_ctx_is_active(const struct lowpan_iphc_ctx *ctx)
+{
+ return test_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags);
+}
+
+static inline bool
+lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx)
+{
+ return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
+}
+
struct lowpan_priv {
enum lowpan_lltypes lltype;
struct dentry *iface_debugfs;
+ struct lowpan_iphc_ctx_table ctx;
/* must be last */
u8 priv[0] __aligned(sizeof(void *));
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index d4f82edb5cff..dc71473462ac 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -25,6 +25,7 @@
#ifndef __HCI_CORE_H
#define __HCI_CORE_H
+#include <linux/leds.h>
#include <net/bluetooth/hci.h>
#include <net/bluetooth/hci_sock.h>
@@ -396,6 +397,8 @@ struct hci_dev {
struct delayed_work rpa_expired;
bdaddr_t rpa;
+ struct led_trigger *power_led;
+
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9bcaaf7cd15a..9e1b24c29f0c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -712,6 +712,8 @@ struct cfg80211_acl_data {
* @p2p_opp_ps: P2P opportunistic PS
* @acl: ACL configuration used by the drivers which has support for
* MAC address based access control
+ * @pbss: If set, start as a PCP instead of AP. Relevant for DMG
+ * networks.
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -730,6 +732,7 @@ struct cfg80211_ap_settings {
u8 p2p_ctwindow;
bool p2p_opp_ps;
const struct cfg80211_acl_data *acl;
+ bool pbss;
};
/**
@@ -1888,6 +1891,8 @@ struct cfg80211_ibss_params {
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT Capability overrides
* @vht_capa_mask: The bits of vht_capa which are to be used.
+ * @pbss: if set, connect to a PCP instead of AP. Valid for DMG
+ * networks.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -1910,6 +1915,7 @@ struct cfg80211_connect_params {
struct ieee80211_ht_cap ht_capa_mask;
struct ieee80211_vht_cap vht_capa;
struct ieee80211_vht_cap vht_capa_mask;
+ bool pbss;
};
/**
@@ -3489,6 +3495,7 @@ struct cfg80211_cached_keys;
* registered for unexpected class 3 frames (AP mode)
* @conn: (private) cfg80211 software SME connection state machine data
* @connect_keys: (private) keys to set after connection is established
+ * @conn_bss_type: connecting/connected BSS type
* @ibss_fixed: (private) IBSS is using fixed BSSID
* @ibss_dfs_possible: (private) IBSS may change to a DFS channel
* @event_list: (private) list for internal event processing
@@ -3519,6 +3526,7 @@ struct wireless_dev {
u8 ssid_len, mesh_id_len, mesh_id_up_len;
struct cfg80211_conn *conn;
struct cfg80211_cached_keys *connect_keys;
+ enum ieee80211_bss_type conn_bss_type;
struct list_head event_list;
spinlock_t event_lock;
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 10a16b5bd1c7..abffc64e7300 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -120,6 +120,11 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
+static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+{
+ *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+}
+
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
diff --git a/include/net/codel.h b/include/net/codel.h
index 267e70210061..d168aca115cc 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -162,12 +162,14 @@ struct codel_vars {
* struct codel_stats - contains codel shared variables and stats
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
+ * @drop_len: bytes of dropped packets in dequeue()
* ecn_mark: number of packets we ECN marked instead of dropping
* ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/
struct codel_stats {
u32 maxpacket;
u32 drop_count;
+ u32 drop_len;
u32 ecn_mark;
u32 ce_mark;
};
@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt);
goto end;
}
+ stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
skb = dequeue_func(vars, sch);
@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
+ stats->drop_len += qdisc_pkt_len(skb);
qdisc_drop(skb, sch);
stats->drop_count++;
diff --git a/include/net/devlink.h b/include/net/devlink.h
new file mode 100644
index 000000000000..c37d257891d6
--- /dev/null
+++ b/include/net/devlink.h
@@ -0,0 +1,140 @@
+/*
+ * include/net/devlink.h - Network physical device Netlink interface
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _NET_DEVLINK_H_
+#define _NET_DEVLINK_H_
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+#include <uapi/linux/devlink.h>
+
+struct devlink_ops;
+
+struct devlink {
+ struct list_head list;
+ struct list_head port_list;
+ const struct devlink_ops *ops;
+ struct device *dev;
+ possible_net_t _net;
+ char priv[0] __aligned(NETDEV_ALIGN);
+};
+
+struct devlink_port {
+ struct list_head list;
+ struct devlink *devlink;
+ unsigned index;
+ bool registered;
+ enum devlink_port_type type;
+ enum devlink_port_type desired_type;
+ void *type_dev;
+ bool split;
+ u32 split_group;
+};
+
+struct devlink_ops {
+ size_t priv_size;
+ int (*port_type_set)(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type);
+ int (*port_split)(struct devlink *devlink, unsigned int port_index,
+ unsigned int count);
+ int (*port_unsplit)(struct devlink *devlink, unsigned int port_index);
+};
+
+static inline void *devlink_priv(struct devlink *devlink)
+{
+ BUG_ON(!devlink);
+ return &devlink->priv;
+}
+
+static inline struct devlink *priv_to_devlink(void *priv)
+{
+ BUG_ON(!priv);
+ return container_of(priv, struct devlink, priv);
+}
+
+struct ib_device;
+
+#if IS_ENABLED(CONFIG_NET_DEVLINK)
+
+struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
+int devlink_register(struct devlink *devlink, struct device *dev);
+void devlink_unregister(struct devlink *devlink);
+void devlink_free(struct devlink *devlink);
+int devlink_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index);
+void devlink_port_unregister(struct devlink_port *devlink_port);
+void devlink_port_type_eth_set(struct devlink_port *devlink_port,
+ struct net_device *netdev);
+void devlink_port_type_ib_set(struct devlink_port *devlink_port,
+ struct ib_device *ibdev);
+void devlink_port_type_clear(struct devlink_port *devlink_port);
+void devlink_port_split_set(struct devlink_port *devlink_port,
+ u32 split_group);
+
+#else
+
+static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
+ size_t priv_size)
+{
+ return kzalloc(sizeof(struct devlink) + priv_size, GFP_KERNEL);
+}
+
+static inline int devlink_register(struct devlink *devlink, struct device *dev)
+{
+ return 0;
+}
+
+static inline void devlink_unregister(struct devlink *devlink)
+{
+}
+
+static inline void devlink_free(struct devlink *devlink)
+{
+ kfree(devlink);
+}
+
+static inline int devlink_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index)
+{
+ return 0;
+}
+
+static inline void devlink_port_unregister(struct devlink_port *devlink_port)
+{
+}
+
+static inline void devlink_port_type_eth_set(struct devlink_port *devlink_port,
+ struct net_device *netdev)
+{
+}
+
+static inline void devlink_port_type_ib_set(struct devlink_port *devlink_port,
+ struct ib_device *ibdev)
+{
+}
+
+static inline void devlink_port_type_clear(struct devlink_port *devlink_port)
+{
+}
+
+static inline void devlink_port_split_set(struct devlink_port *devlink_port,
+ u32 split_group)
+{
+}
+
+#endif
+
+#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 3dd54867174a..26c0a3fa009a 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -305,6 +305,8 @@ struct dsa_switch_driver {
/*
* VLAN support
*/
+ int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
+ bool vlan_filtering);
int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
diff --git a/include/net/ip.h b/include/net/ip.h
index cbb134b2f0e4..fad74d323bd6 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -240,6 +240,8 @@ static inline int inet_is_local_reserved_port(struct net *net, int port)
}
#endif
+__be32 inet_current_timestamp(void);
+
/* From inetpeer.c */
extern int inet_peer_threshold;
extern int inet_peer_minttl;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 5f28b606633e..e1395d70fb48 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -140,6 +140,7 @@ struct ip_tunnel {
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
+#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
@@ -206,6 +207,20 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
}
+static inline bool
+ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
+ const struct ip_tunnel_info *info)
+{
+ if (skb->mark)
+ return false;
+ if (!info)
+ return true;
+ if (info->key.tun_flags & TUNNEL_NOCACHE)
+ return false;
+
+ return true;
+}
+
static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
*tun_info)
{
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index 8f81bbbc38fc..e0f4109e64c6 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
/* Send a single event to user space */
void wireless_send_event(struct net_device *dev, unsigned int cmd,
union iwreq_data *wrqu, const char *extra);
+#ifdef CONFIG_WEXT_CORE
+/* flush all previous wext events - if work is done from netdev notifiers */
+void wireless_nlevent_flush(void);
+#else
+static inline void wireless_nlevent_flush(void) {}
+#endif
/* We may need a function to send a stream of events to user space.
* More on that later... */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 7c30faff245f..0c09da34b67a 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007-2010 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -298,6 +298,7 @@ struct ieee80211_vif_chanctx_switch {
* note that this is only called when it changes after the channel
* context had been assigned.
* @BSS_CHANGED_OCB: OCB join status changed
+ * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -323,6 +324,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_BEACON_INFO = 1<<20,
BSS_CHANGED_BANDWIDTH = 1<<21,
BSS_CHANGED_OCB = 1<<22,
+ BSS_CHANGED_MU_GROUPS = 1<<23,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -436,6 +438,19 @@ struct ieee80211_event {
};
/**
+ * struct ieee80211_mu_group_data - STA's VHT MU-MIMO group data
+ *
+ * This structure describes the group id data of VHT MU-MIMO
+ *
+ * @membership: 64 bits array - a bit is set if station is member of the group
+ * @position: 2 bits per group id indicating the position in the group
+ */
+struct ieee80211_mu_group_data {
+ u8 membership[WLAN_MEMBERSHIP_LEN];
+ u8 position[WLAN_USER_POSITION_LEN];
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -477,6 +492,7 @@ struct ieee80211_event {
* @enable_beacon: whether beaconing should be enabled or not
* @chandef: Channel definition for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
+ * @mu_group: VHT MU-MIMO group membership data
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
* This field is only valid when the channel is a wide HT/VHT channel.
* Note that with TDLS this can be the case (channel is HT, protection must
@@ -535,6 +551,7 @@ struct ieee80211_bss_conf {
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
struct cfg80211_chan_def chandef;
+ struct ieee80211_mu_group_data mu_group;
__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
int arp_addr_cnt;
bool qos;
@@ -691,12 +708,14 @@ enum mac80211_tx_info_flags {
* protocol frame (e.g. EAP)
* @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
* frame (PS-Poll or uAPSD).
+ * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
*
* These flags are used in tx_info->control.flags.
*/
enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
+ IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
};
/*
@@ -993,6 +1012,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime
* field) is valid and contains the time the last symbol of the MPDU
* (including FCS) was received.
+ * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime
+ * field) is valid and contains the time the SYNC preamble was received.
* @RX_FLAG_SHORTPRE: Short preamble was used for this frame
* @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
* @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
@@ -1014,6 +1035,14 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
* is stored in the @ampdu_delimiter_crc field)
* @RX_FLAG_LDPC: LDPC was used
+ * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
+ * processing it in any regular way.
+ * This is useful if drivers offload some frames but still want to report
+ * them for sniffing purposes.
+ * @RX_FLAG_SKIP_MONITOR: Process and report frame to all interfaces except
+ * monitor interfaces.
+ * This is useful if drivers offload some frames but still want to report
+ * them for sniffing purposes.
* @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
* @RX_FLAG_10MHZ: 10 MHz (half channel) was used
* @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
@@ -1033,6 +1062,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
RX_FLAG_DECRYPTED = BIT(1),
+ RX_FLAG_MACTIME_PLCP_START = BIT(2),
RX_FLAG_MMIC_STRIPPED = BIT(3),
RX_FLAG_IV_STRIPPED = BIT(4),
RX_FLAG_FAILED_FCS_CRC = BIT(5),
@@ -1046,7 +1076,7 @@ enum mac80211_rx_flags {
RX_FLAG_HT_GF = BIT(13),
RX_FLAG_AMPDU_DETAILS = BIT(14),
RX_FLAG_PN_VALIDATED = BIT(15),
- /* bit 16 free */
+ RX_FLAG_DUP_VALIDATED = BIT(16),
RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
RX_FLAG_AMPDU_IS_LAST = BIT(18),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
@@ -1054,6 +1084,8 @@ enum mac80211_rx_flags {
RX_FLAG_MACTIME_END = BIT(21),
RX_FLAG_VHT = BIT(22),
RX_FLAG_LDPC = BIT(23),
+ RX_FLAG_ONLY_MONITOR = BIT(24),
+ RX_FLAG_SKIP_MONITOR = BIT(25),
RX_FLAG_STBC_MASK = BIT(26) | BIT(27),
RX_FLAG_10MHZ = BIT(28),
RX_FLAG_5MHZ = BIT(29),
@@ -1072,6 +1104,7 @@ enum mac80211_rx_flags {
* @RX_VHT_FLAG_160MHZ: 160 MHz was used
* @RX_VHT_FLAG_BF: packet was beamformed
*/
+
enum mac80211_rx_vht_flags {
RX_VHT_FLAG_80MHZ = BIT(0),
RX_VHT_FLAG_160MHZ = BIT(1),
@@ -1091,6 +1124,8 @@ enum mac80211_rx_vht_flags {
* it but can store it and pass it back to the driver for synchronisation
* @band: the active band when this frame was received
* @freq: frequency the radio was tuned to when receiving this frame, in MHz
+ * This field must be set for management frames, but isn't strictly needed
+ * for data (other) frames - for those it only affects radiotap reporting.
* @signal: signal strength when receiving this frame, either in dBm, in dB or
* unspecified depending on the hardware capabilities flags
* @IEEE80211_HW_SIGNAL_*
@@ -1347,6 +1382,7 @@ enum ieee80211_vif_flags {
* @csa_active: marks whether a channel switch is going on. Internally it is
* write-protected by sdata_lock and local->mtx so holding either is fine
* for read access.
+ * @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @driver_flags: flags/capabilities the driver has for this interface,
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
@@ -1373,6 +1409,7 @@ struct ieee80211_vif {
u8 addr[ETH_ALEN];
bool p2p;
bool csa_active;
+ bool mu_mimo_owner;
u8 cab_queue;
u8 hw_queue[IEEE80211_NUM_ACS];
@@ -1486,9 +1523,8 @@ enum ieee80211_key_flags {
* wants to be given when a frame is transmitted and needs to be
* encrypted in hardware.
* @cipher: The key's cipher suite selector.
- * @tx_pn: PN used for TX on non-TKIP keys, may be used by the driver
- * as well if it needs to do software PN assignment by itself
- * (e.g. due to TSO)
+ * @tx_pn: PN used for TX keys, may be used by the driver as well if it
+ * needs to do software PN assignment by itself (e.g. due to TSO)
* @flags: key flags, see &enum ieee80211_key_flags.
* @keyidx: the key index (0-3)
* @keylen: key material length
@@ -1514,6 +1550,9 @@ struct ieee80211_key_conf {
#define IEEE80211_MAX_PN_LEN 16
+#define TKIP_PN_TO_IV16(pn) ((u16)(pn & 0xffff))
+#define TKIP_PN_TO_IV32(pn) ((u32)((pn >> 16) & 0xffffffff))
+
/**
* struct ieee80211_key_seq - key sequence counter
*
@@ -1684,6 +1723,18 @@ struct ieee80211_sta_rates {
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
* @mfp: indicates whether the STA uses management frame protection or not.
+ * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
+ * A-MSDU. Taken from the Extended Capabilities element. 0 means
+ * unlimited.
+ * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes. This
+ * field is always valid for packets with a VHT preamble. For packets
+ * with a HT preamble, additional limits apply:
+ * + If the skb is transmitted as part of a BA agreement, the
+ * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
+ * + If the skb is not part of a BA aggreement, the A-MSDU maximal
+ * size is min(max_amsdu_len, 7935) bytes.
+ * Both additional HT limits must be enforced by the low level driver.
+ * This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2).
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
@@ -1702,6 +1753,8 @@ struct ieee80211_sta {
bool tdls;
bool tdls_initiator;
bool mfp;
+ u8 max_amsdu_subframes;
+ u16 max_amsdu_len;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
@@ -1910,6 +1963,11 @@ struct ieee80211_txq {
* by just its MAC address; this prevents, for example, the same station
* from connecting to two virtual AP interfaces at the same time.
*
+ * @IEEE80211_HW_SUPPORTS_REORDERING_BUFFER: Hardware (or driver) manages the
+ * reordering buffer internally, guaranteeing mac80211 receives frames in
+ * order and does not need to manage its own reorder buffer or BA session
+ * timeout.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -1946,6 +2004,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU,
IEEE80211_HW_BEACON_TX_STATUS,
IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR,
+ IEEE80211_HW_SUPPORTS_REORDERING_BUFFER,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2167,7 +2226,7 @@ static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev
* @hw: the &struct ieee80211_hw to set the MAC address for
* @addr: the address to set
*/
-static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr)
+static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, const u8 *addr)
{
memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN);
}
@@ -2684,6 +2743,33 @@ enum ieee80211_ampdu_mlme_action {
};
/**
+ * struct ieee80211_ampdu_params - AMPDU action parameters
+ *
+ * @action: the ampdu action, value from %ieee80211_ampdu_mlme_action.
+ * @sta: peer of this AMPDU session
+ * @tid: tid of the BA session
+ * @ssn: start sequence number of the session. TX/RX_STOP can pass 0. When
+ * action is set to %IEEE80211_AMPDU_RX_START the driver passes back the
+ * actual ssn value used to start the session and writes the value here.
+ * @buf_size: reorder buffer size (number of subframes). Valid only when the
+ * action is set to %IEEE80211_AMPDU_RX_START or
+ * %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @amsdu: indicates the peer's ability to receive A-MSDU within A-MPDU.
+ * valid when the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @timeout: BA session timeout. Valid only when the action is set to
+ * %IEEE80211_AMPDU_RX_START
+ */
+struct ieee80211_ampdu_params {
+ enum ieee80211_ampdu_mlme_action action;
+ struct ieee80211_sta *sta;
+ u16 tid;
+ u16 ssn;
+ u8 buf_size;
+ bool amsdu;
+ u16 timeout;
+};
+
+/**
* enum ieee80211_frame_release_type - frame release reason
* @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll
* @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to
@@ -3027,13 +3113,9 @@ enum ieee80211_reconfig_type {
* @ampdu_action: Perform a certain A-MPDU action
* The RA/TID combination determines the destination and TID we want
* the ampdu action to be performed for. The action is defined through
- * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
- * is the first frame we expect to perform the action on. Notice
- * that TX/RX_STOP can pass NULL for this parameter.
- * The @buf_size parameter is only valid when the action is set to
- * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
- * buffer size (number of subframes) for this session -- the driver
- * may neither send aggregates containing more subframes than this
+ * ieee80211_ampdu_mlme_action.
+ * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver
+ * may neither send aggregates containing more subframes than @buf_size
* nor send aggregates in a way that lost frames would exceed the
* buffer size. If just limiting the aggregate size, this would be
* possible with a buf_size of 8:
@@ -3044,9 +3126,6 @@ enum ieee80211_reconfig_type {
* buffer size of 8. Correct ways to retransmit #1 would be:
* - TX: 1 or 18 or 81
* Even "189" would be wrong since 1 could be lost again.
- * The @amsdu parameter is valid when the action is set to
- * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability
- * to receive A-MSDU within A-MPDU.
*
* Returns a negative error code on failure.
* The callback can sleep.
@@ -3388,9 +3467,7 @@ struct ieee80211_ops {
int (*tx_last_beacon)(struct ieee80211_hw *hw);
int (*ampdu_action)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
@@ -4374,21 +4451,19 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
struct sk_buff *skb, u8 *p2k);
/**
- * ieee80211_get_key_tx_seq - get key TX sequence counter
+ * ieee80211_tkip_add_iv - write TKIP IV and Ext. IV to pos
*
+ * @pos: start of crypto header
* @keyconf: the parameter passed with the set key
- * @seq: buffer to receive the sequence data
+ * @pn: PN to add
*
- * This function allows a driver to retrieve the current TX IV/PN
- * for the given key. It must not be called if IV generation is
- * offloaded to the device.
+ * Returns: pointer to the octet following IVs (i.e. beginning of
+ * the packet payload)
*
- * Note that this function may only be called when no TX processing
- * can be done concurrently, for example when queues are stopped
- * and the stop has been synchronized.
+ * This function writes the tkip IV value to pos (which should
+ * point to the crypto header)
*/
-void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
- struct ieee80211_key_seq *seq);
+u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn);
/**
* ieee80211_get_key_rx_seq - get key RX sequence counter
@@ -4410,23 +4485,6 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq);
/**
- * ieee80211_set_key_tx_seq - set key TX sequence counter
- *
- * @keyconf: the parameter passed with the set key
- * @seq: new sequence data
- *
- * This function allows a driver to set the current TX IV/PNs for the
- * given key. This is useful when resuming from WoWLAN sleep and the
- * device may have transmitted frames using the PTK, e.g. replies to
- * ARP requests.
- *
- * Note that this function may only be called when no TX processing
- * can be done concurrently.
- */
-void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
- struct ieee80211_key_seq *seq);
-
-/**
* ieee80211_set_key_rx_seq - set key RX sequence counter
*
* @keyconf: the parameter passed with the set key
@@ -5121,6 +5179,24 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
const u8 *addr);
/**
+ * ieee80211_mark_rx_ba_filtered_frames - move RX BA window and mark filtered
+ * @pubsta: station struct
+ * @tid: the session's TID
+ * @ssn: starting sequence number of the bitmap, all frames before this are
+ * assumed to be out of the window after the call
+ * @filtered: bitmap of filtered frames, BIT(0) is the @ssn entry etc.
+ * @received_mpdus: number of received mpdus in firmware
+ *
+ * This function moves the BA window and releases all frames before @ssn, and
+ * marks frames marked in the bitmap as having been filtered. Afterwards, it
+ * checks if any frames in the window starting from @ssn can now be released
+ * (in case they were only waiting for frames that were filtered.)
+ */
+void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ u16 ssn, u64 filtered,
+ u16 received_mpdus);
+
+/**
* ieee80211_send_bar - send a BlockAckReq frame
*
* can be used to flush pending frames from the peer's aggregation reorder
@@ -5371,6 +5447,21 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
return ieee80211_iftype_p2p(vif->type, vif->p2p);
}
+/**
+ * ieee80211_update_mu_groups - set the VHT MU-MIMO groud data
+ *
+ * @vif: the specified virtual interface
+ * @membership: 64 bits array - a bit is set if station is member of the group
+ * @position: 2 bits per group id indicating the position in the group
+ *
+ * Note: This function assumes that the given vif is valid and the position and
+ * membership data is of the correct size and are in the same byte order as the
+ * matching GroupId management frame.
+ * Calls to this function need to be serialized with RX path.
+ */
+void ieee80211_update_mu_groups(struct ieee80211_vif *vif,
+ const u8 *membership, const u8 *position);
+
void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
int rssi_min_thold,
int rssi_max_thold);
@@ -5523,4 +5614,19 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
*/
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
+
+/**
+ * ieee80211_txq_get_depth - get pending frame/byte count of given txq
+ *
+ * The values are not guaranteed to be coherent with regard to each other, i.e.
+ * txq state can change half-way of this function and the caller may end up
+ * with "new" frame_cnt and "old" byte_cnt or vice-versa.
+ *
+ * @txq: pointer obtained from station or virtual interface
+ * @frame_cnt: pointer to store frame count
+ * @byte_cnt: pointer to store byte count
+ */
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+ unsigned long *frame_cnt,
+ unsigned long *byte_cnt);
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index da574bbdc333..2e3cdd2048d2 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -247,8 +247,9 @@ struct ieee802154_ops {
*/
static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
{
- /* return some invalid fc on failure */
- if (unlikely(skb->len < 2)) {
+ /* check if we can fc at skb_mac_header of sk buffer */
+ if (unlikely(!skb_mac_header_was_set(skb) ||
+ (skb_tail_pointer(skb) - skb_mac_header(skb)) < 2)) {
WARN_ON(1);
return cpu_to_le16(0);
}
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 2121df574262..bea14eee373e 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -392,4 +392,21 @@ struct tc_cls_u32_offload {
};
};
+/* tca flags definitions */
+#define TCA_CLS_FLAGS_SKIP_HW 1
+
+static inline bool tc_should_offload(struct net_device *dev, u32 flags)
+{
+ if (!(dev->features & NETIF_F_HW_TC))
+ return false;
+
+ if (flags & TCA_CLS_FLAGS_SKIP_HW)
+ return false;
+
+ if (!dev->netdev_ops->ndo_setup_tc)
+ return false;
+
+ return true;
+}
+
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 636a362a0e03..46e55f0202a6 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -345,6 +345,12 @@ extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
extern struct Qdisc_ops noqueue_qdisc_ops;
extern const struct Qdisc_ops *default_qdisc_ops;
+static inline const struct Qdisc_ops *
+get_default_qdisc_ops(const struct net_device *dev, int ntx)
+{
+ return ntx < dev->real_num_tx_queues ?
+ default_qdisc_ops : &pfifo_fast_ops;
+}
struct Qdisc_class_common {
u32 classid;
@@ -396,7 +402,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_destroy(struct Qdisc *qdisc);
-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
+ unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
@@ -707,6 +714,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
sch->qstats.backlog = 0;
}
+static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
+ struct Qdisc **pold)
+{
+ struct Qdisc *old;
+
+ sch_tree_lock(sch);
+ old = *pold;
+ *pold = new;
+ if (old != NULL) {
+ qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
+ qdisc_reset(old);
+ }
+ sch_tree_unlock(sch);
+
+ return old;
+}
+
static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
struct sk_buff_head *list)
{
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
new file mode 100644
index 000000000000..dc9a09aefb33
--- /dev/null
+++ b/include/net/tc_act/tc_ife.h
@@ -0,0 +1,61 @@
+#ifndef __NET_TC_IFE_H
+#define __NET_TC_IFE_H
+
+#include <net/act_api.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+
+#define IFE_METAHDRLEN 2
+struct tcf_ife_info {
+ struct tcf_common common;
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_src[ETH_ALEN];
+ u16 eth_type;
+ u16 flags;
+ /* list of metaids allowed */
+ struct list_head metalist;
+};
+#define to_ife(a) \
+ container_of(a->priv, struct tcf_ife_info, common)
+
+struct tcf_meta_info {
+ const struct tcf_meta_ops *ops;
+ void *metaval;
+ u16 metaid;
+ struct list_head metalist;
+};
+
+struct tcf_meta_ops {
+ u16 metaid; /*Maintainer provided ID */
+ u16 metatype; /*netlink attribute type (look at net/netlink.h) */
+ const char *name;
+ const char *synopsis;
+ struct list_head list;
+ int (*check_presence)(struct sk_buff *, struct tcf_meta_info *);
+ int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
+ int (*decode)(struct sk_buff *, void *, u16 len);
+ int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
+ int (*alloc)(struct tcf_meta_info *, void *);
+ void (*release)(struct tcf_meta_info *);
+ int (*validate)(void *val, int len);
+ struct module *owner;
+};
+
+#define MODULE_ALIAS_IFE_META(metan) MODULE_ALIAS("ifemeta" __stringify_1(metan))
+
+int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
+int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
+int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
+ const void *dval);
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval);
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval);
+int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
+int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
+int ife_validate_meta_u32(void *val, int len);
+int ife_validate_meta_u16(void *val, int len);
+void ife_release_meta_gen(struct tcf_meta_info *mi);
+int register_ife_op(struct tcf_meta_ops *mops);
+int unregister_ife_op(struct tcf_meta_ops *mops);
+
+#endif /* __NET_TC_IFE_H */
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index 4dce116bfd80..9ebab3a8cf0a 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -22,7 +22,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
* on-the-wire Rx packet header
* - all multibyte fields should be in network byte order
*/
-struct rxrpc_header {
+struct rxrpc_wire_header {
__be32 epoch; /* client boot timestamp */
__be32 cid; /* connection and channel ID */
@@ -68,10 +68,19 @@ struct rxrpc_header {
} __packed;
-#define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X)
-
extern const char *rxrpc_pkts[];
+#define RXRPC_SUPPORTED_PACKET_TYPES ( \
+ (1 << RXRPC_PACKET_TYPE_DATA) | \
+ (1 << RXRPC_PACKET_TYPE_ACK) | \
+ (1 << RXRPC_PACKET_TYPE_BUSY) | \
+ (1 << RXRPC_PACKET_TYPE_ABORT) | \
+ (1 << RXRPC_PACKET_TYPE_ACKALL) | \
+ (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \
+ (1 << RXRPC_PACKET_TYPE_RESPONSE) | \
+ /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \
+ (1 << RXRPC_PACKET_TYPE_VERSION))
+
/*****************************************************************************/
/*
* jumbo packet secondary header
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index e2b712c90d3f..c21c38ce7450 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -343,7 +343,7 @@ void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
-void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
+int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
void (*ack)(struct hdac_bus *,
struct hdac_stream *));
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 6496f98d3d68..9221f653fee3 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -298,6 +298,17 @@ enum bpf_func_id {
* Return: csum result
*/
BPF_FUNC_csum_diff,
+
+ /**
+ * bpf_skb_[gs]et_tunnel_opt(skb, opt, size)
+ * retrieve or populate tunnel options metadata
+ * @skb: pointer to skb
+ * @opt: pointer to raw tunnel option data
+ * @size: size of @opt
+ * Return: 0 on success for set, option size for get
+ */
+ BPF_FUNC_skb_get_tunnel_opt,
+ BPF_FUNC_skb_set_tunnel_opt,
__BPF_FUNC_MAX_ID,
};
@@ -305,6 +316,7 @@ enum bpf_func_id {
/* BPF_FUNC_skb_store_bytes flags. */
#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
+#define BPF_F_INVALIDATE_HASH (1ULL << 1)
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
* First 4 bits are for passing the header field size.
@@ -327,6 +339,10 @@ enum bpf_func_id {
#define BPF_F_FAST_STACK_CMP (1ULL << 9)
#define BPF_F_REUSE_STACKID (1ULL << 10)
+/* BPF_FUNC_skb_set_tunnel_key flags. */
+#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
+#define BPF_F_DONT_FRAGMENT (1ULL << 2)
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
new file mode 100644
index 000000000000..c9fee5781eb1
--- /dev/null
+++ b/include/uapi/linux/devlink.h
@@ -0,0 +1,72 @@
+/*
+ * include/uapi/linux/devlink.h - Network physical device Netlink interface
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_DEVLINK_H_
+#define _UAPI_LINUX_DEVLINK_H_
+
+#define DEVLINK_GENL_NAME "devlink"
+#define DEVLINK_GENL_VERSION 0x1
+#define DEVLINK_GENL_MCGRP_CONFIG_NAME "config"
+
+enum devlink_command {
+ /* don't change the order or add anything between, this is ABI! */
+ DEVLINK_CMD_UNSPEC,
+
+ DEVLINK_CMD_GET, /* can dump */
+ DEVLINK_CMD_SET,
+ DEVLINK_CMD_NEW,
+ DEVLINK_CMD_DEL,
+
+ DEVLINK_CMD_PORT_GET, /* can dump */
+ DEVLINK_CMD_PORT_SET,
+ DEVLINK_CMD_PORT_NEW,
+ DEVLINK_CMD_PORT_DEL,
+
+ DEVLINK_CMD_PORT_SPLIT,
+ DEVLINK_CMD_PORT_UNSPLIT,
+
+ /* add new commands above here */
+
+ __DEVLINK_CMD_MAX,
+ DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
+};
+
+enum devlink_port_type {
+ DEVLINK_PORT_TYPE_NOTSET,
+ DEVLINK_PORT_TYPE_AUTO,
+ DEVLINK_PORT_TYPE_ETH,
+ DEVLINK_PORT_TYPE_IB,
+};
+
+enum devlink_attr {
+ /* don't change the order or add anything between, this is ABI! */
+ DEVLINK_ATTR_UNSPEC,
+
+ /* bus name + dev name together are a handle for devlink entity */
+ DEVLINK_ATTR_BUS_NAME, /* string */
+ DEVLINK_ATTR_DEV_NAME, /* string */
+
+ DEVLINK_ATTR_PORT_INDEX, /* u32 */
+ DEVLINK_ATTR_PORT_TYPE, /* u16 */
+ DEVLINK_ATTR_PORT_DESIRED_TYPE, /* u16 */
+ DEVLINK_ATTR_PORT_NETDEV_IFINDEX, /* u32 */
+ DEVLINK_ATTR_PORT_NETDEV_NAME, /* string */
+ DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */
+ DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */
+ DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */
+
+ /* add new attributes above here, update the policy in devlink.c */
+
+ __DEVLINK_ATTR_MAX,
+ DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1
+};
+
+#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 37fd6dc33de4..2835b07416b7 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -13,9 +13,14 @@
#ifndef _UAPI_LINUX_ETHTOOL_H
#define _UAPI_LINUX_ETHTOOL_H
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/if_ether.h>
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MAX */
+#endif
+
/* All structures exposed to userland should be defined such that they
* have the same layout for 32-bit and 64-bit userland.
*/
@@ -1215,7 +1220,7 @@ enum ethtool_sfeatures_retval_bits {
struct ethtool_per_queue_op {
__u32 cmd;
__u32 sub_command;
- __u32 queue_mask[DIV_ROUND_UP(MAX_NUM_QUEUE, 32)];
+ __u32 queue_mask[__KERNEL_DIV_ROUND_UP(MAX_NUM_QUEUE, 32)];
char data[];
};
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 0890b217580d..0536eefff9bf 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -144,7 +144,10 @@ struct bridge_vlan_info {
* }
* }
* [MDBA_ROUTER] = {
- * [MDBA_ROUTER_PORT]
+ * [MDBA_ROUTER_PORT] = {
+ * u32 ifindex
+ * [MDBA_ROUTER_PATTR attributes]
+ * }
* }
*/
enum {
@@ -177,6 +180,14 @@ enum {
};
#define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1)
+/* multicast router types */
+enum {
+ MDB_RTR_TYPE_DISABLED,
+ MDB_RTR_TYPE_TEMP_QUERY,
+ MDB_RTR_TYPE_PERM,
+ MDB_RTR_TYPE_TEMP
+};
+
enum {
MDBA_ROUTER_UNSPEC,
MDBA_ROUTER_PORT,
@@ -184,6 +195,15 @@ enum {
};
#define MDBA_ROUTER_MAX (__MDBA_ROUTER_MAX - 1)
+/* router port attributes */
+enum {
+ MDBA_ROUTER_PATTR_UNSPEC,
+ MDBA_ROUTER_PATTR_TIMER,
+ MDBA_ROUTER_PATTR_TYPE,
+ __MDBA_ROUTER_PATTR_MAX
+};
+#define MDBA_ROUTER_PATTR_MAX (__MDBA_ROUTER_PATTR_MAX - 1)
+
struct br_port_msg {
__u8 family;
__u32 ifindex;
diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
index 321e399457f5..466073f0ce46 100644
--- a/include/uapi/linux/kernel.h
+++ b/include/uapi/linux/kernel.h
@@ -9,5 +9,6 @@
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif /* _UAPI_LINUX_KERNEL_H */
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 1e3c8cb43bd7..625b38f65764 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -66,27 +66,33 @@ struct media_device_info {
/*
* DVB entities
*/
-#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 1)
-#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 2)
-#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 3)
-#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 4)
+#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 0x00001)
+#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 0x00002)
+#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 0x00003)
+#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 0x00004)
/*
- * Connectors
+ * I/O entities
*/
-/* It is a responsibility of the entity drivers to add connectors and links */
-#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 21)
-#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 22)
-#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 23)
-/* For internal test signal generators and other debug connectors */
-#define MEDIA_ENT_F_CONN_TEST (MEDIA_ENT_F_BASE + 24)
+#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 0x01001)
+#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 0x01002)
+#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 0x01003)
/*
- * I/O entities
+ * Connectors
*/
-#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 31)
-#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 32)
-#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 33)
+/* It is a responsibility of the entity drivers to add connectors and links */
+#ifdef __KERNEL__
+ /*
+ * For now, it should not be used in userspace, as some
+ * definitions may change
+ */
+
+#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 0x30001)
+#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 0x30002)
+#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 0x30003)
+
+#endif
/*
* Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and
@@ -291,14 +297,14 @@ struct media_v2_entity {
__u32 id;
char name[64]; /* FIXME: move to a property? (RFC says so) */
__u32 function; /* Main function of the entity */
- __u16 reserved[12];
-};
+ __u32 reserved[6];
+} __attribute__ ((packed));
/* Should match the specific fields at media_intf_devnode */
struct media_v2_intf_devnode {
__u32 major;
__u32 minor;
-};
+} __attribute__ ((packed));
struct media_v2_interface {
__u32 id;
@@ -310,22 +316,22 @@ struct media_v2_interface {
struct media_v2_intf_devnode devnode;
__u32 raw[16];
};
-};
+} __attribute__ ((packed));
struct media_v2_pad {
__u32 id;
__u32 entity_id;
__u32 flags;
- __u16 reserved[9];
-};
+ __u32 reserved[5];
+} __attribute__ ((packed));
struct media_v2_link {
__u32 id;
__u32 source_id;
__u32 sink_id;
__u32 flags;
- __u32 reserved[5];
-};
+ __u32 reserved[6];
+} __attribute__ ((packed));
struct media_v2_topology {
__u64 topology_version;
@@ -345,7 +351,7 @@ struct media_v2_topology {
__u32 num_links;
__u32 reserved4;
__u64 ptr_links;
-};
+} __attribute__ ((packed));
static inline void __user *media_get_uptr(__u64 arg)
{
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index ce91215cf7e6..5062fb5751e1 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -1,6 +1,7 @@
#ifndef _UAPI__LINUX_MROUTE6_H
#define _UAPI__LINUX_MROUTE6_H
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sockios.h>
@@ -46,14 +47,8 @@ typedef unsigned short mifi_t;
typedef __u32 if_mask;
#define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */
-#if !defined(__KERNEL__)
-#if !defined(DIV_ROUND_UP)
-#define DIV_ROUND_UP(x,y) (((x) + ((y) - 1)) / (y))
-#endif
-#endif
-
typedef struct if_set {
- if_mask ifs_bits[DIV_ROUND_UP(IF_SETSIZE, NIFBITS)];
+ if_mask ifs_bits[__KERNEL_DIV_ROUND_UP(IF_SETSIZE, NIFBITS)];
} if_set;
#define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS)))
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 5b4a4be06e2b..cc68b92124d4 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -66,14 +66,18 @@ struct nd_cmd_ars_cap {
__u64 length;
__u32 status;
__u32 max_ars_out;
+ __u32 clear_err_unit;
+ __u32 reserved;
} __packed;
struct nd_cmd_ars_start {
__u64 address;
__u64 length;
__u16 type;
- __u8 reserved[6];
+ __u8 flags;
+ __u8 reserved[5];
__u32 status;
+ __u32 scrub_time;
} __packed;
struct nd_cmd_ars_status {
@@ -81,11 +85,14 @@ struct nd_cmd_ars_status {
__u32 out_length;
__u64 address;
__u64 length;
+ __u64 restart_address;
+ __u64 restart_length;
__u16 type;
+ __u16 flags;
__u32 num_records;
struct nd_ars_record {
__u32 handle;
- __u32 flags;
+ __u32 reserved;
__u64 err_address;
__u64 length;
} __packed records[0];
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 5b7b5ebe7ca8..5a30a7563633 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1727,6 +1727,8 @@ enum nl80211_commands {
* underlying device supports these minimal RRM features:
* %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES,
* %NL80211_FEATURE_QUIET,
+ * Or, if global RRM is supported, see:
+ * %NL80211_EXT_FEATURE_RRM
* If this flag is used, driver must add the Power Capabilities IE to the
* association request. In addition, it must also set the RRM capability
* flag in the association request's Capability Info field.
@@ -1789,6 +1791,10 @@ enum nl80211_commands {
* thus it must not specify the number of iterations, only the interval
* between scans. The scan plans are executed sequentially.
* Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan.
+ * @NL80211_ATTR_PBSS: flag attribute. If set it means operate
+ * in a PBSS. Specified in %NL80211_CMD_CONNECT to request
+ * connecting to a PCP, and in %NL80211_CMD_START_AP to start
+ * a PCP instead of AP. Relevant for DMG networks only.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2164,6 +2170,8 @@ enum nl80211_attrs {
NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
NL80211_ATTR_SCHED_SCAN_PLANS,
+ NL80211_ATTR_PBSS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -4396,12 +4404,18 @@ enum nl80211_feature_flags {
/**
* enum nl80211_ext_feature_index - bit index of extended features.
* @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
+ * @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can
+ * can request to use RRM (see %NL80211_ATTR_USE_RRM) with
+ * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
+ * the ASSOC_REQ_USE_RRM flag in the association request even if
+ * NL80211_FEATURE_QUIET is not advertized.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_VHT_IBSS,
+ NL80211_EXT_FEATURE_RRM,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 439873775d49..9874f5680926 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -172,6 +172,7 @@ enum {
TCA_U32_INDEV,
TCA_U32_PCNT,
TCA_U32_MARK,
+ TCA_U32_FLAGS,
__TCA_U32_MAX
};
diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h
index 058757f7a733..2e00dcebebd0 100644
--- a/include/uapi/linux/rfkill.h
+++ b/include/uapi/linux/rfkill.h
@@ -59,6 +59,8 @@ enum rfkill_type {
* @RFKILL_OP_DEL: a device was removed
* @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device
* @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all)
+ * into a state, also updating the default state used for devices that
+ * are hot-plugged later.
*/
enum rfkill_operation {
RFKILL_OP_ADD = 0,
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
new file mode 100644
index 000000000000..d648ff66586f
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -0,0 +1,38 @@
+#ifndef __UAPI_TC_IFE_H
+#define __UAPI_TC_IFE_H
+
+#include <linux/types.h>
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_IFE 25
+/* Flag bits for now just encoding/decoding; mutually exclusive */
+#define IFE_ENCODE 1
+#define IFE_DECODE 0
+
+struct tc_ife {
+ tc_gen;
+ __u16 flags;
+};
+
+/*XXX: We need to encode the total number of bytes consumed */
+enum {
+ TCA_IFE_UNSPEC,
+ TCA_IFE_PARMS,
+ TCA_IFE_TM,
+ TCA_IFE_DMAC,
+ TCA_IFE_SMAC,
+ TCA_IFE_TYPE,
+ TCA_IFE_METALST,
+ __TCA_IFE_MAX
+};
+#define TCA_IFE_MAX (__TCA_IFE_MAX - 1)
+
+#define IFE_META_SKBMARK 1
+#define IFE_META_HASHID 2
+#define IFE_META_PRIO 3
+#define IFE_META_QMAP 4
+/*Can be overridden at runtime by module option*/
+#define __IFE_META_MAX 5
+#define IFE_META_MAX (__IFE_META_MAX - 1)
+
+#endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0d58522103cd..614614821f00 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -64,8 +64,17 @@ static void remote_function(void *data)
struct task_struct *p = tfc->p;
if (p) {
- tfc->ret = -EAGAIN;
- if (task_cpu(p) != smp_processor_id() || !task_curr(p))
+ /* -EAGAIN */
+ if (task_cpu(p) != smp_processor_id())
+ return;
+
+ /*
+ * Now that we're on right CPU with IRQs disabled, we can test
+ * if we hit the right task without races.
+ */
+
+ tfc->ret = -ESRCH; /* No such (running) process */
+ if (p != current)
return;
}
@@ -92,13 +101,17 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
.p = p,
.func = func,
.info = info,
- .ret = -ESRCH, /* No such (running) process */
+ .ret = -EAGAIN,
};
+ int ret;
- if (task_curr(p))
- smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+ do {
+ ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+ if (!ret)
+ ret = data.ret;
+ } while (ret == -EAGAIN);
- return data.ret;
+ return ret;
}
/**
@@ -169,19 +182,6 @@ static bool is_kernel_event(struct perf_event *event)
* rely on ctx->is_active and therefore cannot use event_function_call().
* See perf_install_in_context().
*
- * This is because we need a ctx->lock serialized variable (ctx->is_active)
- * to reliably determine if a particular task/context is scheduled in. The
- * task_curr() use in task_function_call() is racy in that a remote context
- * switch is not a single atomic operation.
- *
- * As is, the situation is 'safe' because we set rq->curr before we do the
- * actual context switch. This means that task_curr() will fail early, but
- * we'll continue spinning on ctx->is_active until we've passed
- * perf_event_task_sched_out().
- *
- * Without this ctx->lock serialized variable we could have race where we find
- * the task (and hence the context) would not be active while in fact they are.
- *
* If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
*/
@@ -212,7 +212,7 @@ static int event_function(void *info)
*/
if (ctx->task) {
if (ctx->task != current) {
- ret = -EAGAIN;
+ ret = -ESRCH;
goto unlock;
}
@@ -276,10 +276,10 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
return;
}
-again:
if (task == TASK_TOMBSTONE)
return;
+again:
if (!task_function_call(task, event_function, &efs))
return;
@@ -289,13 +289,15 @@ again:
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
- if (task != TASK_TOMBSTONE) {
- if (ctx->is_active) {
- raw_spin_unlock_irq(&ctx->lock);
- goto again;
- }
- func(event, NULL, ctx, data);
+ if (task == TASK_TOMBSTONE) {
+ raw_spin_unlock_irq(&ctx->lock);
+ return;
}
+ if (ctx->is_active) {
+ raw_spin_unlock_irq(&ctx->lock);
+ goto again;
+ }
+ func(event, NULL, ctx, data);
raw_spin_unlock_irq(&ctx->lock);
}
@@ -314,6 +316,7 @@ again:
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
+ EVENT_TIME = 0x4,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
@@ -321,7 +324,13 @@ enum event_type_t {
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
-struct static_key_deferred perf_sched_events __read_mostly;
+
+static void perf_sched_delayed(struct work_struct *work);
+DEFINE_STATIC_KEY_FALSE(perf_sched_events);
+static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
+static DEFINE_MUTEX(perf_sched_mutex);
+static atomic_t perf_sched_count;
+
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
@@ -1288,16 +1297,18 @@ static u64 perf_event_time(struct perf_event *event)
/*
* Update the total_time_enabled and total_time_running fields for a event.
- * The caller of this function needs to hold the ctx->lock.
*/
static void update_event_times(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
u64 run_end;
+ lockdep_assert_held(&ctx->lock);
+
if (event->state < PERF_EVENT_STATE_INACTIVE ||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
return;
+
/*
* in cgroup mode, time_enabled represents
* the time the event was enabled AND active
@@ -1645,7 +1656,7 @@ out:
static bool is_orphaned_event(struct perf_event *event)
{
- return event->state == PERF_EVENT_STATE_EXIT;
+ return event->state == PERF_EVENT_STATE_DEAD;
}
static inline int pmu_filter_match(struct perf_event *event)
@@ -1690,14 +1701,14 @@ event_sched_out(struct perf_event *event,
perf_pmu_disable(event->pmu);
+ event->tstamp_stopped = tstamp;
+ event->pmu->del(event, 0);
+ event->oncpu = -1;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
- event->tstamp_stopped = tstamp;
- event->pmu->del(event, 0);
- event->oncpu = -1;
if (!is_software_event(event))
cpuctx->active_oncpu--;
@@ -1732,7 +1743,6 @@ group_sched_out(struct perf_event *group_event,
}
#define DETACH_GROUP 0x01UL
-#define DETACH_STATE 0x02UL
/*
* Cross CPU call to remove a performance event
@@ -1752,8 +1762,6 @@ __perf_remove_from_context(struct perf_event *event,
if (flags & DETACH_GROUP)
perf_group_detach(event);
list_del_event(event, ctx);
- if (flags & DETACH_STATE)
- event->state = PERF_EVENT_STATE_EXIT;
if (!ctx->nr_events && ctx->is_active) {
ctx->is_active = 0;
@@ -2063,14 +2071,27 @@ static void add_event_to_ctx(struct perf_event *event,
event->tstamp_stopped = tstamp;
}
-static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx);
+static void ctx_sched_out(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type);
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
+static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ if (!cpuctx->task_ctx)
+ return;
+
+ if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
+ return;
+
+ ctx_sched_out(ctx, cpuctx, EVENT_ALL);
+}
+
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
struct task_struct *task)
@@ -2097,49 +2118,68 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
/*
* Cross CPU call to install and enable a performance event
*
- * Must be called with ctx->mutex held
+ * Very similar to remote_function() + event_function() but cannot assume that
+ * things like ctx->is_active and cpuctx->task_ctx are set.
*/
static int __perf_install_in_context(void *info)
{
- struct perf_event_context *ctx = info;
+ struct perf_event *event = info;
+ struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
+ bool activate = true;
+ int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx->task) {
raw_spin_lock(&ctx->lock);
- /*
- * If we hit the 'wrong' task, we've since scheduled and
- * everything should be sorted, nothing to do!
- */
task_ctx = ctx;
- if (ctx->task != current)
+
+ /* If we're on the wrong CPU, try again */
+ if (task_cpu(ctx->task) != smp_processor_id()) {
+ ret = -ESRCH;
goto unlock;
+ }
/*
- * If task_ctx is set, it had better be to us.
+ * If we're on the right CPU, see if the task we target is
+ * current, if not we don't have to activate the ctx, a future
+ * context switch will do that for us.
*/
- WARN_ON_ONCE(cpuctx->task_ctx != ctx && cpuctx->task_ctx);
+ if (ctx->task != current)
+ activate = false;
+ else
+ WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
+
} else if (task_ctx) {
raw_spin_lock(&task_ctx->lock);
}
- ctx_resched(cpuctx, task_ctx);
+ if (activate) {
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+ add_event_to_ctx(event, ctx);
+ ctx_resched(cpuctx, task_ctx);
+ } else {
+ add_event_to_ctx(event, ctx);
+ }
+
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
- return 0;
+ return ret;
}
/*
- * Attach a performance event to a context
+ * Attach a performance event to a context.
+ *
+ * Very similar to event_function_call, see comment there.
*/
static void
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
- struct task_struct *task = NULL;
+ struct task_struct *task = READ_ONCE(ctx->task);
lockdep_assert_held(&ctx->mutex);
@@ -2147,40 +2187,46 @@ perf_install_in_context(struct perf_event_context *ctx,
if (event->cpu != -1)
event->cpu = cpu;
+ if (!task) {
+ cpu_function_call(cpu, __perf_install_in_context, event);
+ return;
+ }
+
+ /*
+ * Should not happen, we validate the ctx is still alive before calling.
+ */
+ if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
+ return;
+
/*
* Installing events is tricky because we cannot rely on ctx->is_active
* to be set in case this is the nr_events 0 -> 1 transition.
- *
- * So what we do is we add the event to the list here, which will allow
- * a future context switch to DTRT and then send a racy IPI. If the IPI
- * fails to hit the right task, this means a context switch must have
- * happened and that will have taken care of business.
*/
- raw_spin_lock_irq(&ctx->lock);
- task = ctx->task;
+again:
/*
- * Worse, we cannot even rely on the ctx actually existing anymore. If
- * between find_get_context() and perf_install_in_context() the task
- * went through perf_event_exit_task() its dead and we should not be
- * adding new events.
+ * Cannot use task_function_call() because we need to run on the task's
+ * CPU regardless of whether its current or not.
*/
- if (task == TASK_TOMBSTONE) {
+ if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
+ return;
+
+ raw_spin_lock_irq(&ctx->lock);
+ task = ctx->task;
+ if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
+ /*
+ * Cannot happen because we already checked above (which also
+ * cannot happen), and we hold ctx->mutex, which serializes us
+ * against perf_event_exit_task_context().
+ */
raw_spin_unlock_irq(&ctx->lock);
return;
}
- update_context_time(ctx);
+ raw_spin_unlock_irq(&ctx->lock);
/*
- * Update cgrp time only if current cgrp matches event->cgrp.
- * Must be done before calling add_event_to_ctx().
+ * Since !ctx->is_active doesn't mean anything, we must IPI
+ * unconditionally.
*/
- update_cgrp_time_from_event(event);
- add_event_to_ctx(event, ctx);
- raw_spin_unlock_irq(&ctx->lock);
-
- if (task)
- task_function_call(task, __perf_install_in_context, ctx);
- else
- cpu_function_call(cpu, __perf_install_in_context, ctx);
+ goto again;
}
/*
@@ -2219,17 +2265,18 @@ static void __perf_event_enable(struct perf_event *event,
event->state <= PERF_EVENT_STATE_ERROR)
return;
- update_context_time(ctx);
+ if (ctx->is_active)
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+
__perf_event_mark_enabled(event);
if (!ctx->is_active)
return;
if (!event_filter_match(event)) {
- if (is_cgroup_event(event)) {
- perf_cgroup_set_timestamp(current, ctx); // XXX ?
+ if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
- }
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
}
@@ -2237,8 +2284,10 @@ static void __perf_event_enable(struct perf_event *event,
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
- if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
+ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
+ }
task_ctx = cpuctx->task_ctx;
if (ctx->task)
@@ -2344,24 +2393,33 @@ static void ctx_sched_out(struct perf_event_context *ctx,
}
ctx->is_active &= ~event_type;
+ if (!(ctx->is_active & EVENT_ALL))
+ ctx->is_active = 0;
+
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
if (!ctx->is_active)
cpuctx->task_ctx = NULL;
}
- update_context_time(ctx);
- update_cgrp_time_from_cpuctx(cpuctx);
- if (!ctx->nr_active)
+ is_active ^= ctx->is_active; /* changed bits */
+
+ if (is_active & EVENT_TIME) {
+ /* update (and stop) ctx time */
+ update_context_time(ctx);
+ update_cgrp_time_from_cpuctx(cpuctx);
+ }
+
+ if (!ctx->nr_active || !(is_active & EVENT_ALL))
return;
perf_pmu_disable(ctx->pmu);
- if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
+ if (is_active & EVENT_PINNED) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
- if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
+ if (is_active & EVENT_FLEXIBLE) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
@@ -2641,18 +2699,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
perf_cgroup_sched_out(task, next);
}
-static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- if (!cpuctx->task_ctx)
- return;
-
- if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
- return;
-
- ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-}
-
/*
* Called with IRQs disabled
*/
@@ -2735,7 +2781,7 @@ ctx_sched_in(struct perf_event_context *ctx,
if (likely(!ctx->nr_events))
return;
- ctx->is_active |= event_type;
+ ctx->is_active |= (event_type | EVENT_TIME);
if (ctx->task) {
if (!is_active)
cpuctx->task_ctx = ctx;
@@ -2743,18 +2789,24 @@ ctx_sched_in(struct perf_event_context *ctx,
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
}
- now = perf_clock();
- ctx->timestamp = now;
- perf_cgroup_set_timestamp(task, ctx);
+ is_active ^= ctx->is_active; /* changed bits */
+
+ if (is_active & EVENT_TIME) {
+ /* start ctx time */
+ now = perf_clock();
+ ctx->timestamp = now;
+ perf_cgroup_set_timestamp(task, ctx);
+ }
+
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
+ if (is_active & EVENT_PINNED)
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
- if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
+ if (is_active & EVENT_FLEXIBLE)
ctx_flexible_sched_in(ctx, cpuctx);
}
@@ -3120,6 +3172,7 @@ static void perf_event_enable_on_exec(int ctxn)
cpuctx = __get_cpu_context(ctx);
perf_ctx_lock(cpuctx, ctx);
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME);
list_for_each_entry(event, &ctx->event_list, event_entry)
enabled |= event_enable_on_exec(event, ctx);
@@ -3537,12 +3590,22 @@ static void unaccount_event(struct perf_event *event)
if (has_branch_stack(event))
dec = true;
- if (dec)
- static_key_slow_dec_deferred(&perf_sched_events);
+ if (dec) {
+ if (!atomic_add_unless(&perf_sched_count, -1, 1))
+ schedule_delayed_work(&perf_sched_work, HZ);
+ }
unaccount_event_cpu(event, event->cpu);
}
+static void perf_sched_delayed(struct work_struct *work)
+{
+ mutex_lock(&perf_sched_mutex);
+ if (atomic_dec_and_test(&perf_sched_count))
+ static_branch_disable(&perf_sched_events);
+ mutex_unlock(&perf_sched_mutex);
+}
+
/*
* The following implement mutual exclusion of events on "exclusive" pmus
* (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
@@ -3752,30 +3815,42 @@ static void put_event(struct perf_event *event)
*/
int perf_event_release_kernel(struct perf_event *event)
{
- struct perf_event_context *ctx;
+ struct perf_event_context *ctx = event->ctx;
struct perf_event *child, *tmp;
+ /*
+ * If we got here through err_file: fput(event_file); we will not have
+ * attached to a context yet.
+ */
+ if (!ctx) {
+ WARN_ON_ONCE(event->attach_state &
+ (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
+ goto no_ctx;
+ }
+
if (!is_kernel_event(event))
perf_remove_from_owner(event);
ctx = perf_event_ctx_lock(event);
WARN_ON_ONCE(ctx->parent_ctx);
- perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE);
- perf_event_ctx_unlock(event, ctx);
+ perf_remove_from_context(event, DETACH_GROUP);
+ raw_spin_lock_irq(&ctx->lock);
/*
- * At this point we must have event->state == PERF_EVENT_STATE_EXIT,
- * either from the above perf_remove_from_context() or through
- * perf_event_exit_event().
+ * Mark this even as STATE_DEAD, there is no external reference to it
+ * anymore.
*
- * Therefore, anybody acquiring event->child_mutex after the below
- * loop _must_ also see this, most importantly inherit_event() which
- * will avoid placing more children on the list.
+ * Anybody acquiring event->child_mutex after the below loop _must_
+ * also see this, most importantly inherit_event() which will avoid
+ * placing more children on the list.
*
* Thus this guarantees that we will in fact observe and kill _ALL_
* child events.
*/
- WARN_ON_ONCE(event->state != PERF_EVENT_STATE_EXIT);
+ event->state = PERF_EVENT_STATE_DEAD;
+ raw_spin_unlock_irq(&ctx->lock);
+
+ perf_event_ctx_unlock(event, ctx);
again:
mutex_lock(&event->child_mutex);
@@ -3830,8 +3905,8 @@ again:
}
mutex_unlock(&event->child_mutex);
- /* Must be the last reference */
- put_event(event);
+no_ctx:
+ put_event(event); /* Must be the 'last' reference */
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
@@ -3988,7 +4063,7 @@ static bool is_event_hup(struct perf_event *event)
{
bool no_children;
- if (event->state != PERF_EVENT_STATE_EXIT)
+ if (event->state > PERF_EVENT_STATE_EXIT)
return false;
mutex_lock(&event->child_mutex);
@@ -7769,8 +7844,28 @@ static void account_event(struct perf_event *event)
if (is_cgroup_event(event))
inc = true;
- if (inc)
- static_key_slow_inc(&perf_sched_events.key);
+ if (inc) {
+ if (atomic_inc_not_zero(&perf_sched_count))
+ goto enabled;
+
+ mutex_lock(&perf_sched_mutex);
+ if (!atomic_read(&perf_sched_count)) {
+ static_branch_enable(&perf_sched_events);
+ /*
+ * Guarantee that all CPUs observe they key change and
+ * call the perf scheduling hooks before proceeding to
+ * install events that need them.
+ */
+ synchronize_sched();
+ }
+ /*
+ * Now that we have waited for the sync_sched(), allow further
+ * increments to by-pass the mutex.
+ */
+ atomic_inc(&perf_sched_count);
+ mutex_unlock(&perf_sched_mutex);
+ }
+enabled:
account_event_cpu(event, event->cpu);
}
@@ -8389,10 +8484,19 @@ SYSCALL_DEFINE5(perf_event_open,
if (move_group) {
gctx = group_leader->ctx;
mutex_lock_double(&gctx->mutex, &ctx->mutex);
+ if (gctx->task == TASK_TOMBSTONE) {
+ err = -ESRCH;
+ goto err_locked;
+ }
} else {
mutex_lock(&ctx->mutex);
}
+ if (ctx->task == TASK_TOMBSTONE) {
+ err = -ESRCH;
+ goto err_locked;
+ }
+
if (!perf_event_validate_size(event)) {
err = -E2BIG;
goto err_locked;
@@ -8509,7 +8613,12 @@ err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
- free_event(event);
+ /*
+ * If event_file is set, the fput() above will have called ->release()
+ * and that will take care of freeing the event.
+ */
+ if (!event_file)
+ free_event(event);
err_cpus:
put_online_cpus();
err_task:
@@ -8563,12 +8672,14 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
+ if (ctx->task == TASK_TOMBSTONE) {
+ err = -ESRCH;
+ goto err_unlock;
+ }
+
if (!exclusive_event_installable(event, ctx)) {
- mutex_unlock(&ctx->mutex);
- perf_unpin_context(ctx);
- put_ctx(ctx);
err = -EBUSY;
- goto err_free;
+ goto err_unlock;
}
perf_install_in_context(ctx, event, cpu);
@@ -8577,6 +8688,10 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
return event;
+err_unlock:
+ mutex_unlock(&ctx->mutex);
+ perf_unpin_context(ctx);
+ put_ctx(ctx);
err_free:
free_event(event);
err:
@@ -8695,7 +8810,7 @@ perf_event_exit_event(struct perf_event *child_event,
if (parent_event)
perf_group_detach(child_event);
list_del_event(child_event, child_ctx);
- child_event->state = PERF_EVENT_STATE_EXIT; /* see perf_event_release_kernel() */
+ child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
raw_spin_unlock_irq(&child_ctx->lock);
/*
@@ -9313,9 +9428,6 @@ void __init perf_event_init(void)
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
- /* do not patch jump label more than once per second */
- jump_label_rate_limit(&perf_sched_events, HZ);
-
/*
* Build time assertion that we keep the data_head at the intended
* location. IOW, validation we got the __reserved[] size right.
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 7a1b5c3ef14e..b981a7b023f0 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -136,8 +136,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
- } else
+ } else {
devres_free(ptr);
+ return ERR_PTR(-ENXIO);
+ }
return addr;
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index cd64c979d0e1..57b939c81bce 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -420,7 +420,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
* entity.
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
- printk_deferred_once("sched: DL replenish lagged to much\n");
+ printk_deferred_once("sched: DL replenish lagged too much\n");
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f333e57c4614..05ddc0820771 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -97,16 +97,16 @@ trace_find_event_field(struct trace_event_call *call, char *name)
struct ftrace_event_field *field;
struct list_head *head;
- field = __find_event_field(&ftrace_generic_fields, name);
+ head = trace_get_fields(call);
+ field = __find_event_field(head, name);
if (field)
return field;
- field = __find_event_field(&ftrace_common_fields, name);
+ field = __find_event_field(&ftrace_generic_fields, name);
if (field)
return field;
- head = trace_get_fields(call);
- return __find_event_field(head, name);
+ return __find_event_field(&ftrace_common_fields, name);
}
static int __trace_define_field(struct list_head *head, const char *type,
@@ -171,8 +171,10 @@ static int trace_define_generic_fields(void)
{
int ret;
- __generic_field(int, cpu, FILTER_OTHER);
- __generic_field(char *, comm, FILTER_PTR_STRING);
+ __generic_field(int, CPU, FILTER_CPU);
+ __generic_field(int, cpu, FILTER_CPU);
+ __generic_field(char *, COMM, FILTER_COMM);
+ __generic_field(char *, comm, FILTER_COMM);
return ret;
}
@@ -869,7 +871,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
* The ftrace subsystem is for showing formats only.
* They can not be enabled or disabled via the event files.
*/
- if (call->class && call->class->reg)
+ if (call->class && call->class->reg &&
+ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
return file;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index f93a219b18da..6816302542b2 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1043,13 +1043,14 @@ static int init_pred(struct filter_parse_state *ps,
return -EINVAL;
}
- if (is_string_field(field)) {
+ if (field->filter_type == FILTER_COMM) {
+ filter_build_regex(pred);
+ fn = filter_pred_comm;
+ pred->regex.field_len = TASK_COMM_LEN;
+ } else if (is_string_field(field)) {
filter_build_regex(pred);
- if (!strcmp(field->name, "comm")) {
- fn = filter_pred_comm;
- pred->regex.field_len = TASK_COMM_LEN;
- } else if (field->filter_type == FILTER_STATIC_STRING) {
+ if (field->filter_type == FILTER_STATIC_STRING) {
fn = filter_pred_string;
pred->regex.field_len = field->size;
} else if (field->filter_type == FILTER_DYN_STRING)
@@ -1072,7 +1073,7 @@ static int init_pred(struct filter_parse_state *ps,
}
pred->val = val;
- if (!strcmp(field->name, "cpu"))
+ if (field->filter_type == FILTER_CPU)
fn = filter_pred_cpu;
else
fn = select_comparison_fn(pred->op, field->size,
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 202df6cffcca..2a1abbaca10e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -156,7 +156,11 @@ check_stack(unsigned long ip, unsigned long *stack)
for (; p < top && i < stack_trace_max.nr_entries; p++) {
if (stack_dump_trace[i] == ULONG_MAX)
break;
- if (*p == stack_dump_trace[i]) {
+ /*
+ * The READ_ONCE_NOCHECK is used to let KASAN know that
+ * this is not a stack-out-of-bounds error.
+ */
+ if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
stack_dump_trace[x] = stack_dump_trace[i++];
this_size = stack_trace_index[x++] =
(top - p) * sizeof(unsigned long);
diff --git a/mm/filemap.c b/mm/filemap.c
index 23edccecadb0..3461d97ecb30 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -446,7 +446,8 @@ int filemap_write_and_wait(struct address_space *mapping)
{
int err = 0;
- if (mapping->nrpages) {
+ if ((!dax_mapping(mapping) && mapping->nrpages) ||
+ (dax_mapping(mapping) && mapping->nrexceptional)) {
err = filemap_fdatawrite(mapping);
/*
* Even if the above returned error, the pages may be
@@ -482,13 +483,8 @@ int filemap_write_and_wait_range(struct address_space *mapping,
{
int err = 0;
- if (dax_mapping(mapping) && mapping->nrexceptional) {
- err = dax_writeback_mapping_range(mapping, lstart, lend);
- if (err)
- return err;
- }
-
- if (mapping->nrpages) {
+ if ((!dax_mapping(mapping) && mapping->nrpages) ||
+ (dax_mapping(mapping) && mapping->nrexceptional)) {
err = __filemap_fdatawrite_range(mapping, lstart, lend,
WB_SYNC_ALL);
/* See comment of filemap_write_and_wait() */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1c317b85ea7d..e10a4fee88d2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2836,6 +2836,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pgtable_t pgtable;
pmd_t _pmd;
bool young, write, dirty;
+ unsigned long addr;
int i;
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
@@ -2865,7 +2866,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
pte_t entry, *pte;
/*
* Note that NUMA hinting access restrictions are not
@@ -2886,9 +2887,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
}
if (dirty)
SetPageDirty(page + i);
- pte = pte_offset_map(&_pmd, haddr);
+ pte = pte_offset_map(&_pmd, addr);
BUG_ON(!pte_none(*pte));
- set_pte_at(mm, haddr, pte, entry);
+ set_pte_at(mm, addr, pte, entry);
atomic_inc(&page[i]._mapcount);
pte_unmap(pte);
}
@@ -2938,7 +2939,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pmd_populate(mm, pmd, pgtable);
if (freeze) {
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ for (i = 0; i < HPAGE_PMD_NR; i++) {
page_remove_rmap(page + i, false);
put_page(page + i);
}
diff --git a/mm/memory.c b/mm/memory.c
index 635451abc8f7..8132787ae4d5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3404,8 +3404,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(pmd_none(*pmd)) &&
unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
- /* if an huge pmd materialized from under us just retry later */
- if (unlikely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
+ /*
+ * If a huge pmd materialized under us just retry later. Use
+ * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
+ * didn't become pmd_trans_huge under us and then back to pmd_none, as
+ * a result of MADV_DONTNEED running immediately after a huge pmd fault
+ * in a different thread of this mm, in turn leading to a misleading
+ * pmd_trans_huge() retval. All we have to ensure is that it is a
+ * regular pmd that we can walk with pte_offset_map() and we can do that
+ * through an atomic read in C, which is what pmd_trans_unstable()
+ * provides.
+ */
+ if (unlikely(pmd_trans_unstable(pmd) || pmd_devmap(*pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
diff --git a/mm/migrate.c b/mm/migrate.c
index b1034f9c77e7..3ad0fea5c438 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1582,7 +1582,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
(GFP_HIGHUSER_MOVABLE |
__GFP_THISNODE | __GFP_NOMEMALLOC |
__GFP_NORETRY | __GFP_NOWARN) &
- ~(__GFP_IO | __GFP_FS), 0);
+ ~__GFP_RECLAIM, 0);
return newpage;
}
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
index faf65baed617..34e44c0c0836 100644
--- a/net/6lowpan/core.c
+++ b/net/6lowpan/core.c
@@ -20,7 +20,7 @@
int lowpan_register_netdevice(struct net_device *dev,
enum lowpan_lltypes lltype)
{
- int ret;
+ int i, ret;
dev->addr_len = EUI64_ADDR_LEN;
dev->type = ARPHRD_6LOWPAN;
@@ -29,6 +29,10 @@ int lowpan_register_netdevice(struct net_device *dev,
lowpan_priv(dev)->lltype = lltype;
+ spin_lock_init(&lowpan_priv(dev)->ctx.lock);
+ for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
+ lowpan_priv(dev)->ctx.table[i].id = i;
+
ret = register_netdevice(dev);
if (ret < 0)
return ret;
@@ -68,6 +72,32 @@ void lowpan_unregister_netdev(struct net_device *dev)
}
EXPORT_SYMBOL(lowpan_unregister_netdev);
+static int lowpan_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int i;
+
+ if (dev->type != ARPHRD_6LOWPAN)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
+ clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE,
+ &lowpan_priv(dev)->ctx.table[i].flags);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block lowpan_notifier = {
+ .notifier_call = lowpan_event,
+};
+
static int __init lowpan_module_init(void)
{
int ret;
@@ -76,6 +106,12 @@ static int __init lowpan_module_init(void)
if (ret < 0)
return ret;
+ ret = register_netdevice_notifier(&lowpan_notifier);
+ if (ret < 0) {
+ lowpan_debugfs_exit();
+ return ret;
+ }
+
request_module_nowait("ipv6");
request_module_nowait("nhc_dest");
@@ -92,6 +128,7 @@ static int __init lowpan_module_init(void)
static void __exit lowpan_module_exit(void)
{
lowpan_debugfs_exit();
+ unregister_netdevice_notifier(&lowpan_notifier);
}
module_init(lowpan_module_init);
diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c
index 88eef84df0fc..aa49ff4ce6fd 100644
--- a/net/6lowpan/debugfs.c
+++ b/net/6lowpan/debugfs.c
@@ -16,19 +16,266 @@
#include "6lowpan_i.h"
+#define LOWPAN_DEBUGFS_CTX_PFX_NUM_ARGS 8
+
static struct dentry *lowpan_debugfs;
+static int lowpan_ctx_flag_active_set(void *data, u64 val)
+{
+ struct lowpan_iphc_ctx *ctx = data;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ if (val)
+ set_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags);
+ else
+ clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags);
+
+ return 0;
+}
+
+static int lowpan_ctx_flag_active_get(void *data, u64 *val)
+{
+ *val = lowpan_iphc_ctx_is_active(data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(lowpan_ctx_flag_active_fops,
+ lowpan_ctx_flag_active_get,
+ lowpan_ctx_flag_active_set, "%llu\n");
+
+static int lowpan_ctx_flag_c_set(void *data, u64 val)
+{
+ struct lowpan_iphc_ctx *ctx = data;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ if (val)
+ set_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
+ else
+ clear_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
+
+ return 0;
+}
+
+static int lowpan_ctx_flag_c_get(void *data, u64 *val)
+{
+ *val = lowpan_iphc_ctx_is_compression(data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(lowpan_ctx_flag_c_fops, lowpan_ctx_flag_c_get,
+ lowpan_ctx_flag_c_set, "%llu\n");
+
+static int lowpan_ctx_plen_set(void *data, u64 val)
+{
+ struct lowpan_iphc_ctx *ctx = data;
+ struct lowpan_iphc_ctx_table *t =
+ container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
+
+ if (val > 128)
+ return -EINVAL;
+
+ spin_lock_bh(&t->lock);
+ ctx->plen = val;
+ spin_unlock_bh(&t->lock);
+
+ return 0;
+}
+
+static int lowpan_ctx_plen_get(void *data, u64 *val)
+{
+ struct lowpan_iphc_ctx *ctx = data;
+ struct lowpan_iphc_ctx_table *t =
+ container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
+
+ spin_lock_bh(&t->lock);
+ *val = ctx->plen;
+ spin_unlock_bh(&t->lock);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(lowpan_ctx_plen_fops, lowpan_ctx_plen_get,
+ lowpan_ctx_plen_set, "%llu\n");
+
+static int lowpan_ctx_pfx_show(struct seq_file *file, void *offset)
+{
+ struct lowpan_iphc_ctx *ctx = file->private;
+ struct lowpan_iphc_ctx_table *t =
+ container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
+
+ spin_lock_bh(&t->lock);
+ seq_printf(file, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ be16_to_cpu(ctx->pfx.s6_addr16[0]),
+ be16_to_cpu(ctx->pfx.s6_addr16[1]),
+ be16_to_cpu(ctx->pfx.s6_addr16[2]),
+ be16_to_cpu(ctx->pfx.s6_addr16[3]),
+ be16_to_cpu(ctx->pfx.s6_addr16[4]),
+ be16_to_cpu(ctx->pfx.s6_addr16[5]),
+ be16_to_cpu(ctx->pfx.s6_addr16[6]),
+ be16_to_cpu(ctx->pfx.s6_addr16[7]));
+ spin_unlock_bh(&t->lock);
+
+ return 0;
+}
+
+static int lowpan_ctx_pfx_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lowpan_ctx_pfx_show, inode->i_private);
+}
+
+static ssize_t lowpan_ctx_pfx_write(struct file *fp,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ char buf[128] = {};
+ struct seq_file *file = fp->private_data;
+ struct lowpan_iphc_ctx *ctx = file->private;
+ struct lowpan_iphc_ctx_table *t =
+ container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]);
+ int status = count, n, i;
+ unsigned int addr[8];
+
+ if (copy_from_user(&buf, user_buf, min_t(size_t, sizeof(buf) - 1,
+ count))) {
+ status = -EFAULT;
+ goto out;
+ }
+
+ n = sscanf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
+ &addr[0], &addr[1], &addr[2], &addr[3], &addr[4],
+ &addr[5], &addr[6], &addr[7]);
+ if (n != LOWPAN_DEBUGFS_CTX_PFX_NUM_ARGS) {
+ status = -EINVAL;
+ goto out;
+ }
+
+ spin_lock_bh(&t->lock);
+ for (i = 0; i < 8; i++)
+ ctx->pfx.s6_addr16[i] = cpu_to_be16(addr[i] & 0xffff);
+ spin_unlock_bh(&t->lock);
+
+out:
+ return status;
+}
+
+const struct file_operations lowpan_ctx_pfx_fops = {
+ .open = lowpan_ctx_pfx_open,
+ .read = seq_read,
+ .write = lowpan_ctx_pfx_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int lowpan_dev_debugfs_ctx_init(struct net_device *dev,
+ struct dentry *ctx, u8 id)
+{
+ struct lowpan_priv *lpriv = lowpan_priv(dev);
+ struct dentry *dentry, *root;
+ char buf[32];
+
+ WARN_ON_ONCE(id > LOWPAN_IPHC_CTX_TABLE_SIZE);
+
+ sprintf(buf, "%d", id);
+
+ root = debugfs_create_dir(buf, ctx);
+ if (!root)
+ return -EINVAL;
+
+ dentry = debugfs_create_file("active", 0644, root,
+ &lpriv->ctx.table[id],
+ &lowpan_ctx_flag_active_fops);
+ if (!dentry)
+ return -EINVAL;
+
+ dentry = debugfs_create_file("compression", 0644, root,
+ &lpriv->ctx.table[id],
+ &lowpan_ctx_flag_c_fops);
+ if (!dentry)
+ return -EINVAL;
+
+ dentry = debugfs_create_file("prefix", 0644, root,
+ &lpriv->ctx.table[id],
+ &lowpan_ctx_pfx_fops);
+ if (!dentry)
+ return -EINVAL;
+
+ dentry = debugfs_create_file("prefix_len", 0644, root,
+ &lpriv->ctx.table[id],
+ &lowpan_ctx_plen_fops);
+ if (!dentry)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lowpan_context_show(struct seq_file *file, void *offset)
+{
+ struct lowpan_iphc_ctx_table *t = file->private;
+ int i;
+
+ seq_printf(file, "%3s|%-43s|%c\n", "cid", "prefix", 'C');
+ seq_puts(file, "-------------------------------------------------\n");
+
+ spin_lock_bh(&t->lock);
+ for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) {
+ if (!lowpan_iphc_ctx_is_active(&t->table[i]))
+ continue;
+
+ seq_printf(file, "%3d|%39pI6c/%-3d|%d\n", t->table[i].id,
+ &t->table[i].pfx, t->table[i].plen,
+ lowpan_iphc_ctx_is_compression(&t->table[i]));
+ }
+ spin_unlock_bh(&t->lock);
+
+ return 0;
+}
+
+static int lowpan_context_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lowpan_context_show, inode->i_private);
+}
+
+const struct file_operations lowpan_context_fops = {
+ .open = lowpan_context_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
int lowpan_dev_debugfs_init(struct net_device *dev)
{
struct lowpan_priv *lpriv = lowpan_priv(dev);
+ struct dentry *contexts, *dentry;
+ int ret, i;
/* creating the root */
lpriv->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs);
if (!lpriv->iface_debugfs)
goto fail;
+ contexts = debugfs_create_dir("contexts", lpriv->iface_debugfs);
+ if (!contexts)
+ goto remove_root;
+
+ dentry = debugfs_create_file("show", 0644, contexts,
+ &lowpan_priv(dev)->ctx,
+ &lowpan_context_fops);
+ if (!dentry)
+ goto remove_root;
+
+ for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) {
+ ret = lowpan_dev_debugfs_ctx_init(dev, contexts, i);
+ if (ret < 0)
+ goto remove_root;
+ }
+
return 0;
+remove_root:
+ lowpan_dev_debugfs_exit(dev);
fail:
return -EINVAL;
}
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 346b5c1a9185..72172514fea0 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -56,6 +56,7 @@
/* special link-layer handling */
#include <net/mac802154.h>
+#include "6lowpan_i.h"
#include "nhc.h"
/* Values of fields within the IPHC encoding first byte */
@@ -147,6 +148,9 @@
(((a)->s6_addr16[6]) == 0) && \
(((a)->s6_addr[14]) == 0))
+#define LOWPAN_IPHC_CID_DCI(cid) (cid & 0x0f)
+#define LOWPAN_IPHC_CID_SCI(cid) ((cid & 0xf0) >> 4)
+
static inline void iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
const void *lladdr)
{
@@ -195,6 +199,98 @@ static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
}
}
+static struct lowpan_iphc_ctx *
+lowpan_iphc_ctx_get_by_id(const struct net_device *dev, u8 id)
+{
+ struct lowpan_iphc_ctx *ret = &lowpan_priv(dev)->ctx.table[id];
+
+ if (!lowpan_iphc_ctx_is_active(ret))
+ return NULL;
+
+ return ret;
+}
+
+static struct lowpan_iphc_ctx *
+lowpan_iphc_ctx_get_by_addr(const struct net_device *dev,
+ const struct in6_addr *addr)
+{
+ struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table;
+ struct lowpan_iphc_ctx *ret = NULL;
+ struct in6_addr addr_pfx;
+ u8 addr_plen;
+ int i;
+
+ for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) {
+ /* Check if context is valid. A context that is not valid
+ * MUST NOT be used for compression.
+ */
+ if (!lowpan_iphc_ctx_is_active(&table[i]) ||
+ !lowpan_iphc_ctx_is_compression(&table[i]))
+ continue;
+
+ ipv6_addr_prefix(&addr_pfx, addr, table[i].plen);
+
+ /* if prefix len < 64, the remaining bits until 64th bit is
+ * zero. Otherwise we use table[i]->plen.
+ */
+ if (table[i].plen < 64)
+ addr_plen = 64;
+ else
+ addr_plen = table[i].plen;
+
+ if (ipv6_prefix_equal(&addr_pfx, &table[i].pfx, addr_plen)) {
+ /* remember first match */
+ if (!ret) {
+ ret = &table[i];
+ continue;
+ }
+
+ /* get the context with longest prefix len */
+ if (table[i].plen > ret->plen)
+ ret = &table[i];
+ }
+ }
+
+ return ret;
+}
+
+static struct lowpan_iphc_ctx *
+lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev,
+ const struct in6_addr *addr)
+{
+ struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table;
+ struct lowpan_iphc_ctx *ret = NULL;
+ struct in6_addr addr_mcast, network_pfx = {};
+ int i;
+
+ /* init mcast address with */
+ memcpy(&addr_mcast, addr, sizeof(*addr));
+
+ for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++) {
+ /* Check if context is valid. A context that is not valid
+ * MUST NOT be used for compression.
+ */
+ if (!lowpan_iphc_ctx_is_active(&table[i]) ||
+ !lowpan_iphc_ctx_is_compression(&table[i]))
+ continue;
+
+ /* setting plen */
+ addr_mcast.s6_addr[3] = table[i].plen;
+ /* get network prefix to copy into multicast address */
+ ipv6_addr_prefix(&network_pfx, &table[i].pfx,
+ table[i].plen);
+ /* setting network prefix */
+ memcpy(&addr_mcast.s6_addr[4], &network_pfx, 8);
+
+ if (ipv6_addr_equal(addr, &addr_mcast)) {
+ ret = &table[i];
+ break;
+ }
+ }
+
+ return ret;
+}
+
/* Uncompress address function for source and
* destination address(non-multicast).
*
@@ -259,30 +355,59 @@ static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev,
/* Uncompress address function for source context
* based address(non-multicast).
*/
-static int uncompress_context_based_src_addr(struct sk_buff *skb,
- struct in6_addr *ipaddr,
- u8 address_mode)
+static int uncompress_ctx_addr(struct sk_buff *skb,
+ const struct net_device *dev,
+ const struct lowpan_iphc_ctx *ctx,
+ struct in6_addr *ipaddr, u8 address_mode,
+ const void *lladdr)
{
+ bool fail;
+
switch (address_mode) {
- case LOWPAN_IPHC_SAM_00:
- /* unspec address ::
+ /* SAM and DAM are the same here */
+ case LOWPAN_IPHC_DAM_00:
+ fail = false;
+ /* SAM_00 -> unspec address ::
* Do nothing, address is already ::
+ *
+ * DAM 00 -> reserved should never occur.
*/
break;
case LOWPAN_IPHC_SAM_01:
- /* TODO */
+ case LOWPAN_IPHC_DAM_01:
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
+ ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen);
+ break;
case LOWPAN_IPHC_SAM_10:
- /* TODO */
+ case LOWPAN_IPHC_DAM_10:
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
+ ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen);
+ break;
case LOWPAN_IPHC_SAM_11:
- /* TODO */
- netdev_warn(skb->dev, "SAM value 0x%x not supported\n",
- address_mode);
- return -EINVAL;
+ case LOWPAN_IPHC_DAM_11:
+ fail = false;
+ switch (lowpan_priv(dev)->lltype) {
+ case LOWPAN_LLTYPE_IEEE802154:
+ iphc_uncompress_802154_lladdr(ipaddr, lladdr);
+ break;
+ default:
+ iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
+ break;
+ }
+ ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen);
+ break;
default:
pr_debug("Invalid sam value: 0x%x\n", address_mode);
return -EINVAL;
}
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
+ }
+
raw_dump_inline(NULL,
"Reconstructed context based ipv6 src addr is",
ipaddr->s6_addr, 16);
@@ -346,6 +471,30 @@ static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
return 0;
}
+static int lowpan_uncompress_multicast_ctx_daddr(struct sk_buff *skb,
+ struct lowpan_iphc_ctx *ctx,
+ struct in6_addr *ipaddr,
+ u8 address_mode)
+{
+ struct in6_addr network_pfx = {};
+ bool fail;
+
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 2);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[12], 4);
+ if (fail)
+ return -EIO;
+
+ /* take prefix_len and network prefix from the context */
+ ipaddr->s6_addr[3] = ctx->plen;
+ /* get network prefix to copy into multicast address */
+ ipv6_addr_prefix(&network_pfx, &ctx->pfx, ctx->plen);
+ /* setting network prefix */
+ memcpy(&ipaddr->s6_addr[4], &network_pfx, 8);
+
+ return 0;
+}
+
/* get the ecn values from iphc tf format and set it to ipv6hdr */
static inline void lowpan_iphc_tf_set_ecn(struct ipv6hdr *hdr, const u8 *tf)
{
@@ -459,7 +608,8 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
const void *daddr, const void *saddr)
{
struct ipv6hdr hdr = {};
- u8 iphc0, iphc1;
+ struct lowpan_iphc_ctx *ci;
+ u8 iphc0, iphc1, cid = 0;
int err;
raw_dump_table(__func__, "raw skb data dump uncompressed",
@@ -469,12 +619,14 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
lowpan_fetch_skb(skb, &iphc1, sizeof(iphc1)))
return -EINVAL;
- /* another if the CID flag is set */
- if (iphc1 & LOWPAN_IPHC_CID)
- return -ENOTSUPP;
-
hdr.version = 6;
+ /* default CID = 0, another if the CID flag is set */
+ if (iphc1 & LOWPAN_IPHC_CID) {
+ if (lowpan_fetch_skb(skb, &cid, sizeof(cid)))
+ return -EINVAL;
+ }
+
err = lowpan_iphc_tf_decompress(skb, &hdr,
iphc0 & LOWPAN_IPHC_TF_MASK);
if (err < 0)
@@ -500,10 +652,17 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
}
if (iphc1 & LOWPAN_IPHC_SAC) {
- /* Source address context based uncompression */
+ spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+ ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_SCI(cid));
+ if (!ci) {
+ spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+ return -EINVAL;
+ }
+
pr_debug("SAC bit is set. Handle context based source address.\n");
- err = uncompress_context_based_src_addr(skb, &hdr.saddr,
- iphc1 & LOWPAN_IPHC_SAM_MASK);
+ err = uncompress_ctx_addr(skb, dev, ci, &hdr.saddr,
+ iphc1 & LOWPAN_IPHC_SAM_MASK, saddr);
+ spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
} else {
/* Source address uncompression */
pr_debug("source address stateless compression\n");
@@ -515,27 +674,52 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
if (err)
return -EINVAL;
- /* check for Multicast Compression */
- if (iphc1 & LOWPAN_IPHC_M) {
- if (iphc1 & LOWPAN_IPHC_DAC) {
- pr_debug("dest: context-based mcast compression\n");
- /* TODO: implement this */
- } else {
- err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr,
- iphc1 & LOWPAN_IPHC_DAM_MASK);
+ switch (iphc1 & (LOWPAN_IPHC_M | LOWPAN_IPHC_DAC)) {
+ case LOWPAN_IPHC_M | LOWPAN_IPHC_DAC:
+ spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+ ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid));
+ if (!ci) {
+ spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+ return -EINVAL;
+ }
- if (err)
- return -EINVAL;
+ /* multicast with context */
+ pr_debug("dest: context-based mcast compression\n");
+ err = lowpan_uncompress_multicast_ctx_daddr(skb, ci,
+ &hdr.daddr,
+ iphc1 & LOWPAN_IPHC_DAM_MASK);
+ spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+ break;
+ case LOWPAN_IPHC_M:
+ /* multicast */
+ err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr,
+ iphc1 & LOWPAN_IPHC_DAM_MASK);
+ break;
+ case LOWPAN_IPHC_DAC:
+ spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+ ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid));
+ if (!ci) {
+ spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+ return -EINVAL;
}
- } else {
+
+ /* Destination address context based uncompression */
+ pr_debug("DAC bit is set. Handle context based destination address.\n");
+ err = uncompress_ctx_addr(skb, dev, ci, &hdr.daddr,
+ iphc1 & LOWPAN_IPHC_DAM_MASK, daddr);
+ spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+ break;
+ default:
err = uncompress_addr(skb, dev, &hdr.daddr,
iphc1 & LOWPAN_IPHC_DAM_MASK, daddr);
pr_debug("dest: stateless compression mode %d dest %pI6c\n",
iphc1 & LOWPAN_IPHC_DAM_MASK, &hdr.daddr);
- if (err)
- return -EINVAL;
+ break;
}
+ if (err)
+ return -EINVAL;
+
/* Next header data uncompression */
if (iphc0 & LOWPAN_IPHC_NH) {
err = lowpan_nhc_do_uncompression(skb, dev, &hdr);
@@ -585,6 +769,58 @@ static const u8 lowpan_iphc_dam_to_sam_value[] = {
[LOWPAN_IPHC_DAM_11] = LOWPAN_IPHC_SAM_11,
};
+static u8 lowpan_compress_ctx_addr(u8 **hc_ptr, const struct in6_addr *ipaddr,
+ const struct lowpan_iphc_ctx *ctx,
+ const unsigned char *lladdr, bool sam)
+{
+ struct in6_addr tmp = {};
+ u8 dam;
+
+ /* check for SAM/DAM = 11 */
+ memcpy(&tmp.s6_addr[8], lladdr, 8);
+ /* second bit-flip (Universe/Local) is done according RFC2464 */
+ tmp.s6_addr[8] ^= 0x02;
+ /* context information are always used */
+ ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
+ if (ipv6_addr_equal(&tmp, ipaddr)) {
+ dam = LOWPAN_IPHC_DAM_11;
+ goto out;
+ }
+
+ memset(&tmp, 0, sizeof(tmp));
+ /* check for SAM/DAM = 01 */
+ tmp.s6_addr[11] = 0xFF;
+ tmp.s6_addr[12] = 0xFE;
+ memcpy(&tmp.s6_addr[14], &ipaddr->s6_addr[14], 2);
+ /* context information are always used */
+ ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
+ if (ipv6_addr_equal(&tmp, ipaddr)) {
+ lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[14], 2);
+ dam = LOWPAN_IPHC_DAM_10;
+ goto out;
+ }
+
+ memset(&tmp, 0, sizeof(tmp));
+ /* check for SAM/DAM = 10, should always match */
+ memcpy(&tmp.s6_addr[8], &ipaddr->s6_addr[8], 8);
+ /* context information are always used */
+ ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen);
+ if (ipv6_addr_equal(&tmp, ipaddr)) {
+ lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[8], 8);
+ dam = LOWPAN_IPHC_DAM_01;
+ goto out;
+ }
+
+ WARN_ONCE(1, "context found but no address mode matched\n");
+ return LOWPAN_IPHC_DAM_00;
+out:
+
+ if (sam)
+ return lowpan_iphc_dam_to_sam_value[dam];
+ else
+ return dam;
+}
+
static u8 lowpan_compress_addr_64(u8 **hc_ptr, const struct in6_addr *ipaddr,
const unsigned char *lladdr, bool sam)
{
@@ -708,6 +944,21 @@ static u8 lowpan_iphc_tf_compress(u8 **hc_ptr, const struct ipv6hdr *hdr)
return val;
}
+static u8 lowpan_iphc_mcast_ctx_addr_compress(u8 **hc_ptr,
+ const struct lowpan_iphc_ctx *ctx,
+ const struct in6_addr *ipaddr)
+{
+ u8 data[6];
+
+ /* flags/scope, reserved (RIID) */
+ memcpy(data, &ipaddr->s6_addr[1], 2);
+ /* group ID */
+ memcpy(&data[1], &ipaddr->s6_addr[11], 4);
+ lowpan_push_hc_data(hc_ptr, data, 6);
+
+ return LOWPAN_IPHC_DAM_00;
+}
+
static u8 lowpan_iphc_mcast_addr_compress(u8 **hc_ptr,
const struct in6_addr *ipaddr)
{
@@ -742,10 +993,11 @@ static u8 lowpan_iphc_mcast_addr_compress(u8 **hc_ptr,
int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
const void *daddr, const void *saddr)
{
- u8 iphc0, iphc1, *hc_ptr;
+ u8 iphc0, iphc1, *hc_ptr, cid = 0;
struct ipv6hdr *hdr;
u8 head[LOWPAN_IPHC_MAX_HC_BUF_LEN] = {};
- int ret, addr_type;
+ struct lowpan_iphc_ctx *dci, *sci, dci_entry, sci_entry;
+ int ret, ipv6_daddr_type, ipv6_saddr_type;
if (skb->protocol != htons(ETH_P_IPV6))
return -EINVAL;
@@ -769,14 +1021,38 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
iphc0 = LOWPAN_DISPATCH_IPHC;
iphc1 = 0;
- /* TODO: context lookup */
-
raw_dump_inline(__func__, "saddr", saddr, EUI64_ADDR_LEN);
raw_dump_inline(__func__, "daddr", daddr, EUI64_ADDR_LEN);
raw_dump_table(__func__, "sending raw skb network uncompressed packet",
skb->data, skb->len);
+ ipv6_daddr_type = ipv6_addr_type(&hdr->daddr);
+ spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+ if (ipv6_daddr_type & IPV6_ADDR_MULTICAST)
+ dci = lowpan_iphc_ctx_get_by_mcast_addr(dev, &hdr->daddr);
+ else
+ dci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->daddr);
+ if (dci) {
+ memcpy(&dci_entry, dci, sizeof(*dci));
+ cid |= dci->id;
+ }
+ spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+
+ spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+ sci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->saddr);
+ if (sci) {
+ memcpy(&sci_entry, sci, sizeof(*sci));
+ cid |= (sci->id << 4);
+ }
+ spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+
+ /* if cid is zero it will be compressed */
+ if (cid) {
+ iphc1 |= LOWPAN_IPHC_CID;
+ lowpan_push_hc_data(&hc_ptr, &cid, sizeof(cid));
+ }
+
/* Traffic Class, Flow Label compression */
iphc0 |= lowpan_iphc_tf_compress(&hc_ptr, hdr);
@@ -813,39 +1089,64 @@ int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
sizeof(hdr->hop_limit));
}
- addr_type = ipv6_addr_type(&hdr->saddr);
+ ipv6_saddr_type = ipv6_addr_type(&hdr->saddr);
/* source address compression */
- if (addr_type == IPV6_ADDR_ANY) {
+ if (ipv6_saddr_type == IPV6_ADDR_ANY) {
pr_debug("source address is unspecified, setting SAC\n");
iphc1 |= LOWPAN_IPHC_SAC;
} else {
- if (addr_type & IPV6_ADDR_LINKLOCAL) {
- iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->saddr,
- saddr, true);
- pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n",
- &hdr->saddr, iphc1);
+ if (sci) {
+ iphc1 |= lowpan_compress_ctx_addr(&hc_ptr, &hdr->saddr,
+ &sci_entry, saddr,
+ true);
+ iphc1 |= LOWPAN_IPHC_SAC;
} else {
- pr_debug("send the full source address\n");
- lowpan_push_hc_data(&hc_ptr, hdr->saddr.s6_addr, 16);
+ if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL) {
+ iphc1 |= lowpan_compress_addr_64(&hc_ptr,
+ &hdr->saddr,
+ saddr, true);
+ pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n",
+ &hdr->saddr, iphc1);
+ } else {
+ pr_debug("send the full source address\n");
+ lowpan_push_hc_data(&hc_ptr,
+ hdr->saddr.s6_addr, 16);
+ }
}
}
- addr_type = ipv6_addr_type(&hdr->daddr);
/* destination address compression */
- if (addr_type & IPV6_ADDR_MULTICAST) {
+ if (ipv6_daddr_type & IPV6_ADDR_MULTICAST) {
pr_debug("destination address is multicast: ");
iphc1 |= LOWPAN_IPHC_M;
- iphc1 |= lowpan_iphc_mcast_addr_compress(&hc_ptr, &hdr->daddr);
+ if (dci) {
+ iphc1 |= lowpan_iphc_mcast_ctx_addr_compress(&hc_ptr,
+ &dci_entry,
+ &hdr->daddr);
+ iphc1 |= LOWPAN_IPHC_DAC;
+ } else {
+ iphc1 |= lowpan_iphc_mcast_addr_compress(&hc_ptr,
+ &hdr->daddr);
+ }
} else {
- if (addr_type & IPV6_ADDR_LINKLOCAL) {
- /* TODO: context lookup */
- iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->daddr,
- daddr, false);
- pr_debug("dest address unicast link-local %pI6c "
- "iphc1 0x%02x\n", &hdr->daddr, iphc1);
+ if (dci) {
+ iphc1 |= lowpan_compress_ctx_addr(&hc_ptr, &hdr->daddr,
+ &dci_entry, daddr,
+ false);
+ iphc1 |= LOWPAN_IPHC_DAC;
} else {
- pr_debug("dest address unicast %pI6c\n", &hdr->daddr);
- lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16);
+ if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL) {
+ iphc1 |= lowpan_compress_addr_64(&hc_ptr,
+ &hdr->daddr,
+ daddr, false);
+ pr_debug("dest address unicast link-local %pI6c iphc1 0x%02x\n",
+ &hdr->daddr, iphc1);
+ } else {
+ pr_debug("dest address unicast %pI6c\n",
+ &hdr->daddr);
+ lowpan_push_hc_data(&hc_ptr,
+ hdr->daddr.s6_addr, 16);
+ }
}
}
diff --git a/net/Kconfig b/net/Kconfig
index b80efecfc1a0..2760825e53fa 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -396,6 +396,22 @@ config DST_CACHE
bool "dst cache"
default n
+config NET_DEVLINK
+ tristate "Network physical/parent device Netlink interface"
+ help
+ Network physical/parent device Netlink interface provides
+ infrastructure to support access to physical chip-wide config and
+ monitoring.
+
+config MAY_USE_DEVLINK
+ tristate
+ default m if NET_DEVLINK=m
+ default y if NET_DEVLINK=y || NET_DEVLINK=n
+ help
+ Drivers using the devlink infrastructure should have a dependency
+ on MAY_USE_DEVLINK to ensure they do not cause link errors when
+ devlink is a loadable module and the driver using it is built-in.
+
endif # if NET
# Used by archs to tell that they support BPF_JIT
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 2dd40e5ea030..f66930ee3c0b 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -15,6 +15,20 @@ config BATMAN_ADV
https://www.open-mesh.org/ for more information and user space
tools.
+config BATMAN_ADV_BATMAN_V
+ bool "B.A.T.M.A.N. V protocol (experimental)"
+ depends on BATMAN_ADV && CFG80211=y || (CFG80211=m && BATMAN_ADV=m)
+ default n
+ help
+ This option enables the B.A.T.M.A.N. V protocol, the successor
+ of the currently used B.A.T.M.A.N. IV protocol. The main
+ changes include splitting of the OGM protocol into a neighbor
+ discovery protocol (Echo Location Protocol, ELP) and a new OGM
+ Protocol OGMv2 for flooding protocol information through the
+ network, as well as a throughput based metric.
+ B.A.T.M.A.N. V is currently considered experimental and not
+ compatible to B.A.T.M.A.N. IV networks.
+
config BATMAN_ADV_BLA
bool "Bridge Loop Avoidance"
depends on BATMAN_ADV && INET
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 207e2af316c7..797cf2fc88c1 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -18,6 +18,9 @@
obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
batman-adv-y += bat_iv_ogm.o
+batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v.o
+batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_elp.o
+batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_ogm.o
batman-adv-y += bitarray.o
batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
batman-adv-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index a7485d676088..03dafd33d23b 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,6 +1,6 @@
/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
*
- * Marek Lindner
+ * Marek Lindner, Linus Lüssing
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -18,6 +18,32 @@
#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
#define _NET_BATMAN_ADV_BAT_ALGO_H_
+struct batadv_priv;
+
int batadv_iv_init(void);
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+
+int batadv_v_init(void);
+int batadv_v_mesh_init(struct batadv_priv *bat_priv);
+void batadv_v_mesh_free(struct batadv_priv *bat_priv);
+
+#else
+
+static inline int batadv_v_init(void)
+{
+ return 0;
+}
+
+static inline int batadv_v_mesh_init(struct batadv_priv *bat_priv)
+{
+ return 0;
+}
+
+static inline void batadv_v_mesh_free(struct batadv_priv *bat_priv)
+{
+}
+
+#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
+
#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 5651e33ca6bd..cb2d1b9b0340 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -478,7 +478,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
skb->len + ETH_HLEN);
- batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
+ batadv_send_broadcast_skb(skb, hard_iface);
}
}
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
new file mode 100644
index 000000000000..3315b9a598af
--- /dev/null
+++ b/net/batman-adv/bat_v.c
@@ -0,0 +1,347 @@
+/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing, Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "bat_algo.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/cache.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "bat_v_elp.h"
+#include "bat_v_ogm.h"
+#include "hash.h"
+#include "originator.h"
+#include "packet.h"
+
+static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
+{
+ int ret;
+
+ ret = batadv_v_elp_iface_enable(hard_iface);
+ if (ret < 0)
+ return ret;
+
+ ret = batadv_v_ogm_iface_enable(hard_iface);
+ if (ret < 0)
+ batadv_v_elp_iface_disable(hard_iface);
+
+ /* enable link throughput auto-detection by setting the throughput
+ * override to zero
+ */
+ atomic_set(&hard_iface->bat_v.throughput_override, 0);
+
+ return ret;
+}
+
+static void batadv_v_iface_disable(struct batadv_hard_iface *hard_iface)
+{
+ batadv_v_elp_iface_disable(hard_iface);
+}
+
+static void batadv_v_iface_update_mac(struct batadv_hard_iface *hard_iface)
+{
+}
+
+static void batadv_v_primary_iface_set(struct batadv_hard_iface *hard_iface)
+{
+ batadv_v_elp_primary_iface_set(hard_iface);
+ batadv_v_ogm_primary_iface_set(hard_iface);
+}
+
+static void
+batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
+{
+ ewma_throughput_init(&hardif_neigh->bat_v.throughput);
+ INIT_WORK(&hardif_neigh->bat_v.metric_work,
+ batadv_v_elp_throughput_metric_update);
+}
+
+static void batadv_v_ogm_schedule(struct batadv_hard_iface *hard_iface)
+{
+}
+
+static void batadv_v_ogm_emit(struct batadv_forw_packet *forw_packet)
+{
+}
+
+/**
+ * batadv_v_orig_print_neigh - print neighbors for the originator table
+ * @orig_node: the orig_node for which the neighbors are printed
+ * @if_outgoing: outgoing interface for these entries
+ * @seq: debugfs table seq_file struct
+ *
+ * Must be called while holding an rcu lock.
+ */
+static void
+batadv_v_orig_print_neigh(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *if_outgoing,
+ struct seq_file *seq)
+{
+ struct batadv_neigh_node *neigh_node;
+ struct batadv_neigh_ifinfo *n_ifinfo;
+
+ hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
+ n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+ if (!n_ifinfo)
+ continue;
+
+ seq_printf(seq, " %pM (%9u.%1u)",
+ neigh_node->addr,
+ n_ifinfo->bat_v.throughput / 10,
+ n_ifinfo->bat_v.throughput % 10);
+
+ batadv_neigh_ifinfo_put(n_ifinfo);
+ }
+}
+
+/**
+ * batadv_v_hardif_neigh_print - print a single ELP neighbour node
+ * @seq: neighbour table seq_file struct
+ * @hardif_neigh: hardif neighbour information
+ */
+static void
+batadv_v_hardif_neigh_print(struct seq_file *seq,
+ struct batadv_hardif_neigh_node *hardif_neigh)
+{
+ int last_secs, last_msecs;
+ u32 throughput;
+
+ last_secs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen) / 1000;
+ last_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen) % 1000;
+ throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
+
+ seq_printf(seq, "%pM %4i.%03is (%9u.%1u) [%10s]\n",
+ hardif_neigh->addr, last_secs, last_msecs, throughput / 10,
+ throughput % 10, hardif_neigh->if_incoming->net_dev->name);
+}
+
+/**
+ * batadv_v_neigh_print - print the single hop neighbour list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq: neighbour table seq_file struct
+ */
+static void batadv_v_neigh_print(struct batadv_priv *bat_priv,
+ struct seq_file *seq)
+{
+ struct net_device *net_dev = (struct net_device *)seq->private;
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ struct batadv_hard_iface *hard_iface;
+ int batman_count = 0;
+
+ seq_printf(seq, " %-15s %s (%11s) [%10s]\n", "Neighbor",
+ "last-seen", "throughput", "IF");
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != net_dev)
+ continue;
+
+ hlist_for_each_entry_rcu(hardif_neigh,
+ &hard_iface->neigh_list, list) {
+ batadv_v_hardif_neigh_print(seq, hardif_neigh);
+ batman_count++;
+ }
+ }
+ rcu_read_unlock();
+
+ if (batman_count == 0)
+ seq_puts(seq, "No batman nodes in range ...\n");
+}
+
+/**
+ * batadv_v_orig_print - print the originator table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq: debugfs table seq_file struct
+ * @if_outgoing: the outgoing interface for which this should be printed
+ */
+static void batadv_v_orig_print(struct batadv_priv *bat_priv,
+ struct seq_file *seq,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_neigh_node *neigh_node;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ int last_seen_msecs, last_seen_secs;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_ifinfo *n_ifinfo;
+ unsigned long last_seen_jiffies;
+ struct hlist_head *head;
+ int batman_count = 0;
+ u32 i;
+
+ seq_printf(seq, " %-15s %s (%11s) %17s [%10s]: %20s ...\n",
+ "Originator", "last-seen", "throughput", "Nexthop",
+ "outgoingIF", "Potential nexthops");
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ neigh_node = batadv_orig_router_get(orig_node,
+ if_outgoing);
+ if (!neigh_node)
+ continue;
+
+ n_ifinfo = batadv_neigh_ifinfo_get(neigh_node,
+ if_outgoing);
+ if (!n_ifinfo)
+ goto next;
+
+ last_seen_jiffies = jiffies - orig_node->last_seen;
+ last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
+ last_seen_secs = last_seen_msecs / 1000;
+ last_seen_msecs = last_seen_msecs % 1000;
+
+ seq_printf(seq, "%pM %4i.%03is (%9u.%1u) %pM [%10s]:",
+ orig_node->orig, last_seen_secs,
+ last_seen_msecs,
+ n_ifinfo->bat_v.throughput / 10,
+ n_ifinfo->bat_v.throughput % 10,
+ neigh_node->addr,
+ neigh_node->if_incoming->net_dev->name);
+
+ batadv_v_orig_print_neigh(orig_node, if_outgoing, seq);
+ seq_puts(seq, "\n");
+ batman_count++;
+
+next:
+ batadv_neigh_node_put(neigh_node);
+ if (n_ifinfo)
+ batadv_neigh_ifinfo_put(n_ifinfo);
+ }
+ rcu_read_unlock();
+ }
+
+ if (batman_count == 0)
+ seq_puts(seq, "No batman nodes in range ...\n");
+}
+
+static int batadv_v_neigh_cmp(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2)
+{
+ struct batadv_neigh_ifinfo *ifinfo1, *ifinfo2;
+
+ ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
+ ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
+
+ if (WARN_ON(!ifinfo1 || !ifinfo2))
+ return 0;
+
+ return ifinfo1->bat_v.throughput - ifinfo2->bat_v.throughput;
+}
+
+static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2)
+{
+ struct batadv_neigh_ifinfo *ifinfo1, *ifinfo2;
+ u32 threshold;
+
+ ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
+ ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
+
+ threshold = ifinfo1->bat_v.throughput / 4;
+ threshold = ifinfo1->bat_v.throughput - threshold;
+
+ return ifinfo2->bat_v.throughput > threshold;
+}
+
+static struct batadv_algo_ops batadv_batman_v __read_mostly = {
+ .name = "BATMAN_V",
+ .bat_iface_enable = batadv_v_iface_enable,
+ .bat_iface_disable = batadv_v_iface_disable,
+ .bat_iface_update_mac = batadv_v_iface_update_mac,
+ .bat_primary_iface_set = batadv_v_primary_iface_set,
+ .bat_hardif_neigh_init = batadv_v_hardif_neigh_init,
+ .bat_ogm_emit = batadv_v_ogm_emit,
+ .bat_ogm_schedule = batadv_v_ogm_schedule,
+ .bat_orig_print = batadv_v_orig_print,
+ .bat_neigh_cmp = batadv_v_neigh_cmp,
+ .bat_neigh_is_similar_or_better = batadv_v_neigh_is_sob,
+ .bat_neigh_print = batadv_v_neigh_print,
+};
+
+/**
+ * batadv_v_mesh_init - initialize the B.A.T.M.A.N. V private resources for a
+ * mesh
+ * @bat_priv: the object representing the mesh interface to initialise
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int batadv_v_mesh_init(struct batadv_priv *bat_priv)
+{
+ return batadv_v_ogm_init(bat_priv);
+}
+
+/**
+ * batadv_v_mesh_free - free the B.A.T.M.A.N. V private resources for a mesh
+ * @bat_priv: the object representing the mesh interface to free
+ */
+void batadv_v_mesh_free(struct batadv_priv *bat_priv)
+{
+ batadv_v_ogm_free(bat_priv);
+}
+
+/**
+ * batadv_v_init - B.A.T.M.A.N. V initialization function
+ *
+ * Description: Takes care of initializing all the subcomponents.
+ * It is invoked upon module load only.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int __init batadv_v_init(void)
+{
+ int ret;
+
+ /* B.A.T.M.A.N. V echo location protocol packet */
+ ret = batadv_recv_handler_register(BATADV_ELP,
+ batadv_v_elp_packet_recv);
+ if (ret < 0)
+ return ret;
+
+ ret = batadv_recv_handler_register(BATADV_OGM2,
+ batadv_v_ogm_packet_recv);
+ if (ret < 0)
+ goto elp_unregister;
+
+ ret = batadv_algo_register(&batadv_batman_v);
+ if (ret < 0)
+ goto ogm_unregister;
+
+ return ret;
+
+ogm_unregister:
+ batadv_recv_handler_unregister(BATADV_OGM2);
+
+elp_unregister:
+ batadv_recv_handler_unregister(BATADV_ELP);
+
+ return ret;
+}
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
new file mode 100644
index 000000000000..3844e7efd0b0
--- /dev/null
+++ b/net/batman-adv/bat_v_elp.c
@@ -0,0 +1,515 @@
+/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing, Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "bat_v_elp.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/cfg80211.h>
+
+#include "bat_algo.h"
+#include "bat_v_ogm.h"
+#include "hard-interface.h"
+#include "originator.h"
+#include "packet.h"
+#include "routing.h"
+#include "send.h"
+
+/**
+ * batadv_v_elp_start_timer - restart timer for ELP periodic work
+ * @hard_iface: the interface for which the timer has to be reset
+ */
+static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
+{
+ unsigned int msecs;
+
+ msecs = atomic_read(&hard_iface->bat_v.elp_interval) - BATADV_JITTER;
+ msecs += prandom_u32() % (2 * BATADV_JITTER);
+
+ queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.elp_wq,
+ msecs_to_jiffies(msecs));
+}
+
+/**
+ * batadv_v_elp_get_throughput - get the throughput towards a neighbour
+ * @neigh: the neighbour for which the throughput has to be obtained
+ *
+ * Return: The throughput towards the given neighbour in multiples of 100kpbs
+ * (a value of '1' equals to 0.1Mbps, '10' equals 1Mbps, etc).
+ */
+static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+{
+ struct batadv_hard_iface *hard_iface = neigh->if_incoming;
+ struct ethtool_link_ksettings link_settings;
+ struct station_info sinfo;
+ u32 throughput;
+ int ret;
+
+ /* if the user specified a customised value for this interface, then
+ * return it directly
+ */
+ throughput = atomic_read(&hard_iface->bat_v.throughput_override);
+ if (throughput != 0)
+ return throughput;
+
+ /* if this is a wireless device, then ask its throughput through
+ * cfg80211 API
+ */
+ if (batadv_is_wifi_netdev(hard_iface->net_dev)) {
+ if (hard_iface->net_dev->ieee80211_ptr) {
+ ret = cfg80211_get_station(hard_iface->net_dev,
+ neigh->addr, &sinfo);
+ if (ret == -ENOENT) {
+ /* Node is not associated anymore! It would be
+ * possible to delete this neighbor. For now set
+ * the throughput metric to 0.
+ */
+ return 0;
+ }
+ if (!ret)
+ return sinfo.expected_throughput / 100;
+ }
+
+ /* unsupported WiFi driver version */
+ goto default_throughput;
+ }
+
+ /* if not a wifi interface, check if this device provides data via
+ * ethtool (e.g. an Ethernet adapter)
+ */
+ memset(&link_settings, 0, sizeof(link_settings));
+ rtnl_lock();
+ ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings);
+ rtnl_unlock();
+ if (ret == 0) {
+ /* link characteristics might change over time */
+ if (link_settings.base.duplex == DUPLEX_FULL)
+ hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX;
+ else
+ hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
+
+ throughput = link_settings.base.speed;
+ if (throughput && (throughput != SPEED_UNKNOWN))
+ return throughput * 10;
+ }
+
+default_throughput:
+ if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) {
+ batadv_info(hard_iface->soft_iface,
+ "WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n",
+ hard_iface->net_dev->name,
+ BATADV_THROUGHPUT_DEFAULT_VALUE / 10,
+ BATADV_THROUGHPUT_DEFAULT_VALUE % 10);
+ hard_iface->bat_v.flags |= BATADV_WARNING_DEFAULT;
+ }
+
+ /* if none of the above cases apply, return the base_throughput */
+ return BATADV_THROUGHPUT_DEFAULT_VALUE;
+}
+
+/**
+ * batadv_v_elp_throughput_metric_update - worker updating the throughput metric
+ * of a single hop neighbour
+ * @work: the work queue item
+ */
+void batadv_v_elp_throughput_metric_update(struct work_struct *work)
+{
+ struct batadv_hardif_neigh_node_bat_v *neigh_bat_v;
+ struct batadv_hardif_neigh_node *neigh;
+
+ neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v,
+ metric_work);
+ neigh = container_of(neigh_bat_v, struct batadv_hardif_neigh_node,
+ bat_v);
+
+ ewma_throughput_add(&neigh->bat_v.throughput,
+ batadv_v_elp_get_throughput(neigh));
+
+ /* decrement refcounter to balance increment performed before scheduling
+ * this task
+ */
+ batadv_hardif_neigh_put(neigh);
+}
+
+/**
+ * batadv_v_elp_wifi_neigh_probe - send link probing packets to a neighbour
+ * @neigh: the neighbour to probe
+ *
+ * Sends a predefined number of unicast wifi packets to a given neighbour in
+ * order to trigger the throughput estimation on this link by the RC algorithm.
+ * Packets are sent only if there there is not enough payload unicast traffic
+ * towards this neighbour..
+ *
+ * Return: True on success and false in case of error during skb preparation.
+ */
+static bool
+batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
+{
+ struct batadv_hard_iface *hard_iface = neigh->if_incoming;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ unsigned long last_tx_diff;
+ struct sk_buff *skb;
+ int probe_len, i;
+ int elp_skb_len;
+
+ /* this probing routine is for Wifi neighbours only */
+ if (!batadv_is_wifi_netdev(hard_iface->net_dev))
+ return true;
+
+ /* probe the neighbor only if no unicast packets have been sent
+ * to it in the last 100 milliseconds: this is the rate control
+ * algorithm sampling interval (minstrel). In this way, if not
+ * enough traffic has been sent to the neighbor, batman-adv can
+ * generate 2 probe packets and push the RC algorithm to perform
+ * the sampling
+ */
+ last_tx_diff = jiffies_to_msecs(jiffies - neigh->bat_v.last_unicast_tx);
+ if (last_tx_diff <= BATADV_ELP_PROBE_MAX_TX_DIFF)
+ return true;
+
+ probe_len = max_t(int, sizeof(struct batadv_elp_packet),
+ BATADV_ELP_MIN_PROBE_SIZE);
+
+ for (i = 0; i < BATADV_ELP_PROBES_PER_NODE; i++) {
+ elp_skb_len = hard_iface->bat_v.elp_skb->len;
+ skb = skb_copy_expand(hard_iface->bat_v.elp_skb, 0,
+ probe_len - elp_skb_len,
+ GFP_ATOMIC);
+ if (!skb)
+ return false;
+
+ /* Tell the skb to get as big as the allocated space (we want
+ * the packet to be exactly of that size to make the link
+ * throughput estimation effective.
+ */
+ skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Sending unicast (probe) ELP packet on interface %s to %pM\n",
+ hard_iface->net_dev->name, neigh->addr);
+
+ batadv_send_skb_packet(skb, hard_iface, neigh->addr);
+ }
+
+ return true;
+}
+
+/**
+ * batadv_v_elp_periodic_work - ELP periodic task per interface
+ * @work: work queue item
+ *
+ * Emits broadcast ELP message in regular intervals.
+ */
+static void batadv_v_elp_periodic_work(struct work_struct *work)
+{
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_hard_iface_bat_v *bat_v;
+ struct batadv_elp_packet *elp_packet;
+ struct batadv_priv *bat_priv;
+ struct sk_buff *skb;
+ u32 elp_interval;
+
+ bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
+ hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
+ goto out;
+
+ /* we are in the process of shutting this interface down */
+ if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
+ (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
+ goto out;
+
+ /* the interface was enabled but may not be ready yet */
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ goto restart_timer;
+
+ skb = skb_copy(hard_iface->bat_v.elp_skb, GFP_ATOMIC);
+ if (!skb)
+ goto restart_timer;
+
+ elp_packet = (struct batadv_elp_packet *)skb->data;
+ elp_packet->seqno = htonl(atomic_read(&hard_iface->bat_v.elp_seqno));
+ elp_interval = atomic_read(&hard_iface->bat_v.elp_interval);
+ elp_packet->elp_interval = htonl(elp_interval);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Sending broadcast ELP packet on interface %s, seqno %u\n",
+ hard_iface->net_dev->name,
+ atomic_read(&hard_iface->bat_v.elp_seqno));
+
+ batadv_send_broadcast_skb(skb, hard_iface);
+
+ atomic_inc(&hard_iface->bat_v.elp_seqno);
+
+ /* The throughput metric is updated on each sent packet. This way, if a
+ * node is dead and no longer sends packets, batman-adv is still able to
+ * react timely to its death.
+ *
+ * The throughput metric is updated by following these steps:
+ * 1) if the hard_iface is wifi => send a number of unicast ELPs for
+ * probing/sampling to each neighbor
+ * 2) update the throughput metric value of each neighbor (note that the
+ * value retrieved in this step might be 100ms old because the
+ * probing packets at point 1) could still be in the HW queue)
+ */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(hardif_neigh, &hard_iface->neigh_list, list) {
+ if (!batadv_v_elp_wifi_neigh_probe(hardif_neigh))
+ /* if something goes wrong while probing, better to stop
+ * sending packets immediately and reschedule the task
+ */
+ break;
+
+ if (!kref_get_unless_zero(&hardif_neigh->refcount))
+ continue;
+
+ /* Reading the estimated throughput from cfg80211 is a task that
+ * may sleep and that is not allowed in an rcu protected
+ * context. Therefore schedule a task for that.
+ */
+ queue_work(batadv_event_workqueue,
+ &hardif_neigh->bat_v.metric_work);
+ }
+ rcu_read_unlock();
+
+restart_timer:
+ batadv_v_elp_start_timer(hard_iface);
+out:
+ return;
+}
+
+/**
+ * batadv_v_elp_iface_enable - setup the ELP interface private resources
+ * @hard_iface: interface for which the data has to be prepared
+ *
+ * Return: 0 on success or a -ENOMEM in case of failure.
+ */
+int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_elp_packet *elp_packet;
+ unsigned char *elp_buff;
+ u32 random_seqno;
+ size_t size;
+ int res = -ENOMEM;
+
+ size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
+ hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
+ if (!hard_iface->bat_v.elp_skb)
+ goto out;
+
+ skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
+ elp_buff = skb_push(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+ elp_packet = (struct batadv_elp_packet *)elp_buff;
+ memset(elp_packet, 0, BATADV_ELP_HLEN);
+
+ elp_packet->packet_type = BATADV_ELP;
+ elp_packet->version = BATADV_COMPAT_VERSION;
+
+ /* randomize initial seqno to avoid collision */
+ get_random_bytes(&random_seqno, sizeof(random_seqno));
+ atomic_set(&hard_iface->bat_v.elp_seqno, random_seqno);
+ atomic_set(&hard_iface->bat_v.elp_interval, 500);
+
+ /* assume full-duplex by default */
+ hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX;
+
+ /* warn the user (again) if there is no throughput data is available */
+ hard_iface->bat_v.flags &= ~BATADV_WARNING_DEFAULT;
+
+ if (batadv_is_wifi_netdev(hard_iface->net_dev))
+ hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
+
+ INIT_DELAYED_WORK(&hard_iface->bat_v.elp_wq,
+ batadv_v_elp_periodic_work);
+ batadv_v_elp_start_timer(hard_iface);
+ res = 0;
+
+out:
+ return res;
+}
+
+/**
+ * batadv_v_elp_iface_disable - release ELP interface private resources
+ * @hard_iface: interface for which the resources have to be released
+ */
+void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
+{
+ cancel_delayed_work_sync(&hard_iface->bat_v.elp_wq);
+
+ dev_kfree_skb(hard_iface->bat_v.elp_skb);
+ hard_iface->bat_v.elp_skb = NULL;
+}
+
+/**
+ * batadv_v_elp_primary_iface_set - change internal data to reflect the new
+ * primary interface
+ * @primary_iface: the new primary interface
+ */
+void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface)
+{
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_elp_packet *elp_packet;
+ struct sk_buff *skb;
+
+ /* update orig field of every elp iface belonging to this mesh */
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (primary_iface->soft_iface != hard_iface->soft_iface)
+ continue;
+
+ if (!hard_iface->bat_v.elp_skb)
+ continue;
+
+ skb = hard_iface->bat_v.elp_skb;
+ elp_packet = (struct batadv_elp_packet *)skb->data;
+ ether_addr_copy(elp_packet->orig,
+ primary_iface->net_dev->dev_addr);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * batadv_v_elp_neigh_update - update an ELP neighbour node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @neigh_addr: the neighbour interface address
+ * @if_incoming: the interface the packet was received through
+ * @elp_packet: the received ELP packet
+ *
+ * Updates the ELP neighbour node state with the data received within the new
+ * ELP packet.
+ */
+static void batadv_v_elp_neigh_update(struct batadv_priv *bat_priv,
+ u8 *neigh_addr,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_elp_packet *elp_packet)
+
+{
+ struct batadv_neigh_node *neigh;
+ struct batadv_orig_node *orig_neigh;
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ s32 seqno_diff;
+ s32 elp_latest_seqno;
+
+ orig_neigh = batadv_v_ogm_orig_get(bat_priv, elp_packet->orig);
+ if (!orig_neigh)
+ return;
+
+ neigh = batadv_neigh_node_new(orig_neigh, if_incoming, neigh_addr);
+ if (!neigh)
+ goto orig_free;
+
+ hardif_neigh = batadv_hardif_neigh_get(if_incoming, neigh_addr);
+ if (!hardif_neigh)
+ goto neigh_free;
+
+ elp_latest_seqno = hardif_neigh->bat_v.elp_latest_seqno;
+ seqno_diff = ntohl(elp_packet->seqno) - elp_latest_seqno;
+
+ /* known or older sequence numbers are ignored. However always adopt
+ * if the router seems to have been restarted.
+ */
+ if (seqno_diff < 1 && seqno_diff > -BATADV_ELP_MAX_AGE)
+ goto hardif_free;
+
+ neigh->last_seen = jiffies;
+ hardif_neigh->last_seen = jiffies;
+ hardif_neigh->bat_v.elp_latest_seqno = ntohl(elp_packet->seqno);
+ hardif_neigh->bat_v.elp_interval = ntohl(elp_packet->elp_interval);
+
+hardif_free:
+ if (hardif_neigh)
+ batadv_hardif_neigh_put(hardif_neigh);
+neigh_free:
+ if (neigh)
+ batadv_neigh_node_put(neigh);
+orig_free:
+ if (orig_neigh)
+ batadv_orig_node_put(orig_neigh);
+}
+
+/**
+ * batadv_v_elp_packet_recv - main ELP packet handler
+ * @skb: the received packet
+ * @if_incoming: the interface this packet was received through
+ *
+ * Return: NET_RX_SUCCESS and consumes the skb if the packet was peoperly
+ * processed or NET_RX_DROP in case of failure.
+ */
+int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_elp_packet *elp_packet;
+ struct batadv_hard_iface *primary_if;
+ struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ bool ret;
+
+ ret = batadv_check_management_packet(skb, if_incoming, BATADV_ELP_HLEN);
+ if (!ret)
+ return NET_RX_DROP;
+
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ return NET_RX_DROP;
+
+ /* did we receive a B.A.T.M.A.N. V ELP packet on an interface
+ * that does not have B.A.T.M.A.N. V ELP enabled ?
+ */
+ if (strcmp(bat_priv->bat_algo_ops->name, "BATMAN_V") != 0)
+ return NET_RX_DROP;
+
+ elp_packet = (struct batadv_elp_packet *)skb->data;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Received ELP packet from %pM seqno %u ORIG: %pM\n",
+ ethhdr->h_source, ntohl(elp_packet->seqno),
+ elp_packet->orig);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ batadv_v_elp_neigh_update(bat_priv, ethhdr->h_source, if_incoming,
+ elp_packet);
+
+out:
+ if (primary_if)
+ batadv_hardif_put(primary_if);
+ consume_skb(skb);
+ return NET_RX_SUCCESS;
+}
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
new file mode 100644
index 000000000000..e95f1bca0785
--- /dev/null
+++ b/net/batman-adv/bat_v_elp.h
@@ -0,0 +1,33 @@
+/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing, Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+#ifndef _NET_BATMAN_ADV_BAT_V_ELP_H_
+#define _NET_BATMAN_ADV_BAT_V_ELP_H_
+
+struct sk_buff;
+struct work_struct;
+
+int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface);
+void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface);
+void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface);
+int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming);
+void batadv_v_elp_throughput_metric_update(struct work_struct *work);
+
+#endif /* _NET_BATMAN_ADV_BAT_V_ELP_H_ */
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
new file mode 100644
index 000000000000..d9bcbe6e7d65
--- /dev/null
+++ b/net/batman-adv/bat_v_ogm.c
@@ -0,0 +1,833 @@
+/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+ *
+ * Antonio Quartulli
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "bat_v_ogm.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "hard-interface.h"
+#include "hash.h"
+#include "originator.h"
+#include "packet.h"
+#include "routing.h"
+#include "send.h"
+#include "translation-table.h"
+
+/**
+ * batadv_v_ogm_orig_get - retrieve and possibly create an originator node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the address of the originator
+ *
+ * Return: the orig_node corresponding to the specified address. If such object
+ * does not exist it is allocated here. In case of allocation failure returns
+ * NULL.
+ */
+struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
+ const u8 *addr)
+{
+ struct batadv_orig_node *orig_node;
+ int hash_added;
+
+ orig_node = batadv_orig_hash_find(bat_priv, addr);
+ if (orig_node)
+ return orig_node;
+
+ orig_node = batadv_orig_node_new(bat_priv, addr);
+ if (!orig_node)
+ return NULL;
+
+ hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
+ batadv_choose_orig, orig_node,
+ &orig_node->hash_entry);
+ if (hash_added != 0) {
+ /* orig_node->refcounter is initialised to 2 by
+ * batadv_orig_node_new()
+ */
+ batadv_orig_node_put(orig_node);
+ batadv_orig_node_put(orig_node);
+ orig_node = NULL;
+ }
+
+ return orig_node;
+}
+
+/**
+ * batadv_v_ogm_start_timer - restart the OGM sending timer
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
+{
+ unsigned long msecs;
+ /* this function may be invoked in different contexts (ogm rescheduling
+ * or hard_iface activation), but the work timer should not be reset
+ */
+ if (delayed_work_pending(&bat_priv->bat_v.ogm_wq))
+ return;
+
+ msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
+ msecs += prandom_u32() % (2 * BATADV_JITTER);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->bat_v.ogm_wq,
+ msecs_to_jiffies(msecs));
+}
+
+/**
+ * batadv_v_ogm_send_to_if - send a batman ogm using a given interface
+ * @skb: the OGM to send
+ * @hard_iface: the interface to use to send the OGM
+ */
+static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ return;
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
+ skb->len + ETH_HLEN);
+
+ batadv_send_broadcast_skb(skb, hard_iface);
+}
+
+/**
+ * batadv_v_ogm_send - periodic worker broadcasting the own OGM
+ * @work: work queue item
+ */
+static void batadv_v_ogm_send(struct work_struct *work)
+{
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_priv_bat_v *bat_v;
+ struct batadv_priv *bat_priv;
+ struct batadv_ogm2_packet *ogm_packet;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned char *ogm_buff, *pkt_buff;
+ int ogm_buff_len;
+ u16 tvlv_len = 0;
+
+ bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
+ bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
+ goto out;
+
+ ogm_buff = bat_priv->bat_v.ogm_buff;
+ ogm_buff_len = bat_priv->bat_v.ogm_buff_len;
+ /* tt changes have to be committed before the tvlv data is
+ * appended as it may alter the tt tvlv container
+ */
+ batadv_tt_local_commit_changes(bat_priv);
+ tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, &ogm_buff,
+ &ogm_buff_len,
+ BATADV_OGM2_HLEN);
+
+ bat_priv->bat_v.ogm_buff = ogm_buff;
+ bat_priv->bat_v.ogm_buff_len = ogm_buff_len;
+
+ skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + ogm_buff_len);
+ if (!skb)
+ goto reschedule;
+
+ skb_reserve(skb, ETH_HLEN);
+ pkt_buff = skb_put(skb, ogm_buff_len);
+ memcpy(pkt_buff, ogm_buff, ogm_buff_len);
+
+ ogm_packet = (struct batadv_ogm2_packet *)skb->data;
+ ogm_packet->seqno = htonl(atomic_read(&bat_priv->bat_v.ogm_seqno));
+ atomic_inc(&bat_priv->bat_v.ogm_seqno);
+ ogm_packet->tvlv_len = htons(tvlv_len);
+
+ /* broadcast on every interface */
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n",
+ ogm_packet->orig, ntohl(ogm_packet->seqno),
+ ntohl(ogm_packet->throughput), ogm_packet->ttl,
+ hard_iface->net_dev->name,
+ hard_iface->net_dev->dev_addr);
+
+ /* this skb gets consumed by batadv_v_ogm_send_to_if() */
+ skb_tmp = skb_clone(skb, GFP_ATOMIC);
+ if (!skb_tmp)
+ break;
+
+ batadv_v_ogm_send_to_if(skb_tmp, hard_iface);
+ }
+ rcu_read_unlock();
+
+ consume_skb(skb);
+
+reschedule:
+ batadv_v_ogm_start_timer(bat_priv);
+out:
+ return;
+}
+
+/**
+ * batadv_v_ogm_iface_enable - prepare an interface for B.A.T.M.A.N. V
+ * @hard_iface: the interface to prepare
+ *
+ * Takes care of scheduling own OGM sending routine for this interface.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ batadv_v_ogm_start_timer(bat_priv);
+
+ return 0;
+}
+
+/**
+ * batadv_v_ogm_primary_iface_set - set a new primary interface
+ * @primary_iface: the new primary interface
+ */
+void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
+ struct batadv_ogm2_packet *ogm_packet;
+
+ if (!bat_priv->bat_v.ogm_buff)
+ return;
+
+ ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
+ ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
+}
+
+/**
+ * batadv_v_ogm_orig_update - update the originator status based on the received
+ * OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the originator to update
+ * @neigh_node: the neighbour the OGM has been received from (to update)
+ * @ogm2: the received OGM
+ * @if_outgoing: the interface where this OGM is going to be forwarded through
+ */
+static void
+batadv_v_ogm_orig_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ const struct batadv_ogm2_packet *ogm2,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL;
+ struct batadv_neigh_node *router = NULL;
+ s32 neigh_seq_diff;
+ u32 neigh_last_seqno;
+ u32 router_last_seqno;
+ u32 router_throughput, neigh_throughput;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Searching and updating originator entry of received packet\n");
+
+ /* if this neighbor already is our next hop there is nothing
+ * to change
+ */
+ router = batadv_orig_router_get(orig_node, if_outgoing);
+ if (router == neigh_node)
+ goto out;
+
+ /* don't consider neighbours with worse throughput.
+ * also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than
+ * the last received seqno from our best next hop.
+ */
+ if (router) {
+ router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
+ neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+
+ /* if these are not allocated, something is wrong. */
+ if (!router_ifinfo || !neigh_ifinfo)
+ goto out;
+
+ neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno;
+ router_last_seqno = router_ifinfo->bat_v.last_seqno;
+ neigh_seq_diff = neigh_last_seqno - router_last_seqno;
+ router_throughput = router_ifinfo->bat_v.throughput;
+ neigh_throughput = neigh_ifinfo->bat_v.throughput;
+
+ if ((neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF) &&
+ (router_throughput >= neigh_throughput))
+ goto out;
+ }
+
+ batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
+
+out:
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ if (neigh_ifinfo)
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+ if (router)
+ batadv_neigh_node_put(router);
+}
+
+/**
+ * batadv_v_forward_penalty - apply a penalty to the throughput metric forwarded
+ * with B.A.T.M.A.N. V OGMs
+ * @bat_priv: the bat priv with all the soft interface information
+ * @if_incoming: the interface where the OGM has been received
+ * @if_outgoing: the interface where the OGM has to be forwarded to
+ * @throughput: the current throughput
+ *
+ * Apply a penalty on the current throughput metric value based on the
+ * characteristic of the interface where the OGM has been received. The return
+ * value is computed as follows:
+ * - throughput * 50% if the incoming and outgoing interface are the
+ * same WiFi interface and the throughput is above
+ * 1MBit/s
+ * - throughput if the outgoing interface is the default
+ * interface (i.e. this OGM is processed for the
+ * internal table and not forwarded)
+ * - throughput * hop penalty otherwise
+ *
+ * Return: the penalised throughput metric.
+ */
+static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing,
+ u32 throughput)
+{
+ int hop_penalty = atomic_read(&bat_priv->hop_penalty);
+ int hop_penalty_max = BATADV_TQ_MAX_VALUE;
+
+ /* Don't apply hop penalty in default originator table. */
+ if (if_outgoing == BATADV_IF_DEFAULT)
+ return throughput;
+
+ /* Forwarding on the same WiFi interface cuts the throughput in half
+ * due to the store & forward characteristics of WIFI.
+ * Very low throughput values are the exception.
+ */
+ if ((throughput > 10) &&
+ (if_incoming == if_outgoing) &&
+ !(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX))
+ return throughput / 2;
+
+ /* hop penalty of 255 equals 100% */
+ return throughput * (hop_penalty_max - hop_penalty) / hop_penalty_max;
+}
+
+/**
+ * batadv_v_ogm_forward - forward an OGM to the given outgoing interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ogm_received: previously received OGM to be forwarded
+ * @throughput: throughput to announce, may vary per outgoing interface
+ * @if_incoming: the interface on which this OGM was received on
+ * @if_outgoing: the interface to which the OGM has to be forwarded to
+ *
+ * Forward an OGM to an interface after having altered the throughput metric and
+ * the TTL value contained in it. The original OGM isn't modified.
+ */
+static void batadv_v_ogm_forward(struct batadv_priv *bat_priv,
+ const struct batadv_ogm2_packet *ogm_received,
+ u32 throughput,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_ogm2_packet *ogm_forward;
+ unsigned char *skb_buff;
+ struct sk_buff *skb;
+ size_t packet_len;
+ u16 tvlv_len;
+
+ if (ogm_received->ttl <= 1) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
+ return;
+ }
+
+ tvlv_len = ntohs(ogm_received->tvlv_len);
+
+ packet_len = BATADV_OGM2_HLEN + tvlv_len;
+ skb = netdev_alloc_skb_ip_align(if_outgoing->net_dev,
+ ETH_HLEN + packet_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, ETH_HLEN);
+ skb_buff = skb_put(skb, packet_len);
+ memcpy(skb_buff, ogm_received, packet_len);
+
+ /* apply forward penalty */
+ ogm_forward = (struct batadv_ogm2_packet *)skb_buff;
+ ogm_forward->throughput = htonl(throughput);
+ ogm_forward->ttl--;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding OGM2 packet on %s: throughput %u, ttl %u, received via %s\n",
+ if_outgoing->net_dev->name, throughput, ogm_forward->ttl,
+ if_incoming->net_dev->name);
+
+ batadv_v_ogm_send_to_if(skb, if_outgoing);
+}
+
+/**
+ * batadv_v_ogm_metric_update - update route metric based on OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ogm2: OGM2 structure
+ * @orig_node: Originator structure for which the OGM has been received
+ * @neigh_node: the neigh_node through with the OGM has been received
+ * @if_incoming: the interface where this packet was received
+ * @if_outgoing: the interface for which the packet should be considered
+ *
+ * Return:
+ * 1 if the OGM is new,
+ * 0 if it is not new but valid,
+ * <0 on error (e.g. old OGM)
+ */
+static int batadv_v_ogm_metric_update(struct batadv_priv *bat_priv,
+ const struct batadv_ogm2_packet *ogm2,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo = NULL;
+ struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
+ bool protection_started = false;
+ int ret = -EINVAL;
+ u32 path_throughput;
+ s32 seq_diff;
+
+ orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
+ if (!orig_ifinfo)
+ goto out;
+
+ seq_diff = ntohl(ogm2->seqno) - orig_ifinfo->last_real_seqno;
+
+ if (!hlist_empty(&orig_node->neigh_list) &&
+ batadv_window_protected(bat_priv, seq_diff,
+ BATADV_OGM_MAX_AGE,
+ &orig_ifinfo->batman_seqno_reset,
+ &protection_started)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: packet within window protection time from %pM\n",
+ ogm2->orig);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Last reset: %ld, %ld\n",
+ orig_ifinfo->batman_seqno_reset, jiffies);
+ goto out;
+ }
+
+ /* drop packets with old seqnos, however accept the first packet after
+ * a host has been rebooted.
+ */
+ if ((seq_diff < 0) && !protection_started)
+ goto out;
+
+ neigh_node->last_seen = jiffies;
+
+ orig_node->last_seen = jiffies;
+
+ orig_ifinfo->last_real_seqno = ntohl(ogm2->seqno);
+ orig_ifinfo->last_ttl = ogm2->ttl;
+
+ neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
+ if (!neigh_ifinfo)
+ goto out;
+
+ path_throughput = batadv_v_forward_penalty(bat_priv, if_incoming,
+ if_outgoing,
+ ntohl(ogm2->throughput));
+ neigh_ifinfo->bat_v.throughput = path_throughput;
+ neigh_ifinfo->bat_v.last_seqno = ntohl(ogm2->seqno);
+ neigh_ifinfo->last_ttl = ogm2->ttl;
+
+ if (seq_diff > 0 || protection_started)
+ ret = 1;
+ else
+ ret = 0;
+out:
+ if (orig_ifinfo)
+ batadv_orig_ifinfo_put(orig_ifinfo);
+ if (neigh_ifinfo)
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+
+ return ret;
+}
+
+/**
+ * batadv_v_ogm_route_update - update routes based on OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ethhdr: the Ethernet header of the OGM2
+ * @ogm2: OGM2 structure
+ * @orig_node: Originator structure for which the OGM has been received
+ * @neigh_node: the neigh_node through with the OGM has been received
+ * @if_incoming: the interface where this packet was received
+ * @if_outgoing: the interface for which the packet should be considered
+ */
+static void batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
+ const struct ethhdr *ethhdr,
+ const struct batadv_ogm2_packet *ogm2,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
+ struct batadv_orig_node *orig_neigh_node = NULL;
+ struct batadv_orig_ifinfo *orig_ifinfo = NULL;
+ struct batadv_neigh_node *orig_neigh_router = NULL;
+
+ neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+ if (!neigh_ifinfo)
+ goto out;
+
+ orig_neigh_node = batadv_v_ogm_orig_get(bat_priv, ethhdr->h_source);
+ if (!orig_neigh_node)
+ goto out;
+
+ orig_neigh_router = batadv_orig_router_get(orig_neigh_node,
+ if_outgoing);
+
+ /* drop packet if sender is not a direct neighbor and if we
+ * don't route towards it
+ */
+ router = batadv_orig_router_get(orig_node, if_outgoing);
+ if (router && router->orig_node != orig_node && !orig_neigh_router) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
+ goto out;
+ }
+
+ if (router)
+ batadv_neigh_node_put(router);
+
+ /* Update routes, and check if the OGM is from the best next hop */
+ batadv_v_ogm_orig_update(bat_priv, orig_node, neigh_node, ogm2,
+ if_outgoing);
+
+ orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
+ if (!orig_ifinfo)
+ goto out;
+
+ /* don't forward the same seqno twice on one interface */
+ if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm2->seqno))
+ goto out;
+
+ /* acquire possibly updated router */
+ router = batadv_orig_router_get(orig_node, if_outgoing);
+
+ /* strict rule: forward packets coming from the best next hop only */
+ if (neigh_node != router)
+ goto out;
+
+ /* only forward for specific interface, not for the default one. */
+ if (if_outgoing != BATADV_IF_DEFAULT) {
+ orig_ifinfo->last_seqno_forwarded = ntohl(ogm2->seqno);
+ batadv_v_ogm_forward(bat_priv, ogm2,
+ neigh_ifinfo->bat_v.throughput,
+ if_incoming, if_outgoing);
+ }
+
+out:
+ if (orig_ifinfo)
+ batadv_orig_ifinfo_put(orig_ifinfo);
+ if (router)
+ batadv_neigh_node_put(router);
+ if (orig_neigh_router)
+ batadv_neigh_node_put(orig_neigh_router);
+ if (orig_neigh_node)
+ batadv_orig_node_put(orig_neigh_node);
+ if (neigh_ifinfo)
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+}
+
+/**
+ * batadv_v_ogm_process_per_outif - process a batman v OGM for an outgoing if
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ethhdr: the Ethernet header of the OGM2
+ * @ogm2: OGM2 structure
+ * @orig_node: Originator structure for which the OGM has been received
+ * @neigh_node: the neigh_node through with the OGM has been received
+ * @if_incoming: the interface where this packet was received
+ * @if_outgoing: the interface for which the packet should be considered
+ */
+static void
+batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
+ const struct ethhdr *ethhdr,
+ const struct batadv_ogm2_packet *ogm2,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ int seqno_age;
+
+ /* first, update the metric with according sanity checks */
+ seqno_age = batadv_v_ogm_metric_update(bat_priv, ogm2, orig_node,
+ neigh_node, if_incoming,
+ if_outgoing);
+
+ /* outdated sequence numbers are to be discarded */
+ if (seqno_age < 0)
+ return;
+
+ /* only unknown & newer OGMs contain TVLVs we are interested in */
+ if ((seqno_age > 0) && (if_outgoing == BATADV_IF_DEFAULT))
+ batadv_tvlv_containers_process(bat_priv, true, orig_node,
+ NULL, NULL,
+ (unsigned char *)(ogm2 + 1),
+ ntohs(ogm2->tvlv_len));
+
+ /* if the metric update went through, update routes if needed */
+ batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node,
+ neigh_node, if_incoming, if_outgoing);
+}
+
+/**
+ * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
+ * @buff_pos: current position in the skb
+ * @packet_len: total length of the skb
+ * @tvlv_len: tvlv length of the previously considered OGM
+ *
+ * Return: true if there is enough space for another OGM, false otherwise.
+ */
+static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+ __be16 tvlv_len)
+{
+ int next_buff_pos = 0;
+
+ next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
+ next_buff_pos += ntohs(tvlv_len);
+
+ return (next_buff_pos <= packet_len) &&
+ (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+}
+
+/**
+ * batadv_v_ogm_process - process an incoming batman v OGM
+ * @skb: the skb containing the OGM
+ * @ogm_offset: offset to the OGM which should be processed (for aggregates)
+ * @if_incoming: the interface where this packet was receved
+ */
+static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
+ struct batadv_hard_iface *if_incoming)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct ethhdr *ethhdr;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_hardif_neigh_node *hardif_neigh = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_ogm2_packet *ogm_packet;
+ u32 ogm_throughput, link_throughput, path_throughput;
+
+ ethhdr = eth_hdr(skb);
+ ogm_packet = (struct batadv_ogm2_packet *)(skb->data + ogm_offset);
+
+ ogm_throughput = ntohl(ogm_packet->throughput);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Received OGM2 packet via NB: %pM, IF: %s [%pM] (from OG: %pM, seqno %u, troughput %u, TTL %u, V %u, tvlv_len %u)\n",
+ ethhdr->h_source, if_incoming->net_dev->name,
+ if_incoming->net_dev->dev_addr, ogm_packet->orig,
+ ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
+ ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+
+ /* If the troughput metric is 0, immediately drop the packet. No need to
+ * create orig_node / neigh_node for an unusable route.
+ */
+ if (ogm_throughput == 0) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet with troughput metric of 0\n");
+ return;
+ }
+
+ /* require ELP packets be to received from this neighbor first */
+ hardif_neigh = batadv_hardif_neigh_get(if_incoming, ethhdr->h_source);
+ if (!hardif_neigh) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
+ goto out;
+ }
+
+ orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
+ if (!orig_node)
+ return;
+
+ neigh_node = batadv_neigh_node_new(orig_node, if_incoming,
+ ethhdr->h_source);
+ if (!neigh_node)
+ goto out;
+
+ /* Update the received throughput metric to match the link
+ * characteristic:
+ * - If this OGM traveled one hop so far (emitted by single hop
+ * neighbor) the path throughput metric equals the link throughput.
+ * - For OGMs traversing more than hop the path throughput metric is
+ * the smaller of the path throughput and the link throughput.
+ */
+ link_throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
+ path_throughput = min_t(u32, link_throughput, ogm_throughput);
+ ogm_packet->throughput = htonl(path_throughput);
+
+ batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet, orig_node,
+ neigh_node, if_incoming,
+ BATADV_IF_DEFAULT);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ continue;
+
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet,
+ orig_node, neigh_node,
+ if_incoming, hard_iface);
+ }
+ rcu_read_unlock();
+out:
+ if (orig_node)
+ batadv_orig_node_put(orig_node);
+ if (neigh_node)
+ batadv_neigh_node_put(neigh_node);
+ if (hardif_neigh)
+ batadv_hardif_neigh_put(hardif_neigh);
+}
+
+/**
+ * batadv_v_ogm_packet_recv - OGM2 receiving handler
+ * @skb: the received OGM
+ * @if_incoming: the interface where this OGM has been received
+ *
+ * Return: NET_RX_SUCCESS and consume the skb on success or returns NET_RX_DROP
+ * (without freeing the skb) on failure
+ */
+int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_ogm2_packet *ogm_packet;
+ struct ethhdr *ethhdr = eth_hdr(skb);
+ int ogm_offset;
+ u8 *packet_pos;
+ int ret = NET_RX_DROP;
+
+ /* did we receive a OGM2 packet on an interface that does not have
+ * B.A.T.M.A.N. V enabled ?
+ */
+ if (strcmp(bat_priv->bat_algo_ops->name, "BATMAN_V") != 0)
+ return NET_RX_DROP;
+
+ if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
+ return NET_RX_DROP;
+
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ return NET_RX_DROP;
+
+ ogm_packet = (struct batadv_ogm2_packet *)skb->data;
+
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
+ return NET_RX_DROP;
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
+ skb->len + ETH_HLEN);
+
+ ogm_offset = 0;
+ ogm_packet = (struct batadv_ogm2_packet *)skb->data;
+
+ while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
+ ogm_packet->tvlv_len)) {
+ batadv_v_ogm_process(skb, ogm_offset, if_incoming);
+
+ ogm_offset += BATADV_OGM2_HLEN;
+ ogm_offset += ntohs(ogm_packet->tvlv_len);
+
+ packet_pos = skb->data + ogm_offset;
+ ogm_packet = (struct batadv_ogm2_packet *)packet_pos;
+ }
+
+ ret = NET_RX_SUCCESS;
+ consume_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_v_ogm_init - initialise the OGM2 engine
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or a negative error code in case of failure
+ */
+int batadv_v_ogm_init(struct batadv_priv *bat_priv)
+{
+ struct batadv_ogm2_packet *ogm_packet;
+ unsigned char *ogm_buff;
+ u32 random_seqno;
+
+ bat_priv->bat_v.ogm_buff_len = BATADV_OGM2_HLEN;
+ ogm_buff = kzalloc(bat_priv->bat_v.ogm_buff_len, GFP_ATOMIC);
+ if (!ogm_buff)
+ return -ENOMEM;
+
+ bat_priv->bat_v.ogm_buff = ogm_buff;
+ ogm_packet = (struct batadv_ogm2_packet *)ogm_buff;
+ ogm_packet->packet_type = BATADV_OGM2;
+ ogm_packet->version = BATADV_COMPAT_VERSION;
+ ogm_packet->ttl = BATADV_TTL;
+ ogm_packet->flags = BATADV_NO_FLAGS;
+ ogm_packet->throughput = htonl(BATADV_THROUGHPUT_MAX_VALUE);
+
+ /* randomize initial seqno to avoid collision */
+ get_random_bytes(&random_seqno, sizeof(random_seqno));
+ atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
+ INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
+
+ return 0;
+}
+
+/**
+ * batadv_v_ogm_free - free OGM private resources
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_v_ogm_free(struct batadv_priv *bat_priv)
+{
+ cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
+
+ kfree(bat_priv->bat_v.ogm_buff);
+ bat_priv->bat_v.ogm_buff = NULL;
+ bat_priv->bat_v.ogm_buff_len = 0;
+}
diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h
new file mode 100644
index 000000000000..d849c75ada0e
--- /dev/null
+++ b/net/batman-adv/bat_v_ogm.h
@@ -0,0 +1,36 @@
+/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+ *
+ * Antonio Quartulli
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _BATMAN_ADV_BATADV_V_OGM_H_
+#define _BATMAN_ADV_BATADV_V_OGM_H_
+
+#include <linux/types.h>
+
+struct batadv_hard_iface;
+struct batadv_priv;
+struct sk_buff;
+
+int batadv_v_ogm_init(struct batadv_priv *bat_priv);
+void batadv_v_ogm_free(struct batadv_priv *bat_priv);
+int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface);
+struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
+ const u8 *addr);
+void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface);
+int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming);
+
+#endif /* _BATMAN_ADV_BATADV_V_OGM_H_ */
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 4c9b69d465a6..e96d7c745b4a 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -654,9 +654,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
goto free_neigh;
}
- send_status = batadv_send_skb_packet(tmp_skb,
- neigh_node->if_incoming,
- neigh_node->addr);
+ send_status = batadv_send_unicast_skb(tmp_skb, neigh_node);
if (send_status == NET_XMIT_SUCCESS) {
/* count the sent packet */
switch (packet_subtype) {
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index adb9c3989add..e6956d0746a2 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -378,8 +378,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
skb->len + ETH_HLEN);
packet->ttl--;
- batadv_send_skb_packet(skb, neigh_node->if_incoming,
- neigh_node->addr);
+ batadv_send_unicast_skb(skb, neigh_node);
ret = true;
}
@@ -486,8 +485,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
skb_fragment->len + ETH_HLEN);
- batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming,
- neigh_node->addr);
+ batadv_send_unicast_skb(skb_fragment, neigh_node);
frag_header.no++;
/* The initial check in this function should cover this case */
@@ -506,7 +504,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
skb->len + ETH_HLEN);
- batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_unicast_skb(skb, neigh_node);
ret = true;
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 5ee04f7140af..4423047889e1 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -40,8 +40,8 @@
*
* Return: false on parse error and true otherwise.
*/
-static bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
- const char *description, u32 *throughput)
+bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
+ const char *description, u32 *throughput)
{
enum batadv_bandwidth_units bw_unit_type = BATADV_BW_UNIT_KBIT;
u64 lthroughput;
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index b58346350024..8a5e1ddf1175 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -49,5 +49,7 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv);
void batadv_gw_init(struct batadv_priv *bat_priv);
void batadv_gw_free(struct batadv_priv *bat_priv);
+bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
+ const char *description, u32 *throughput);
#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 6268f08b7154..14d0013b387e 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -278,7 +278,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
ether_addr_copy(icmp_header->orig, primary_if->net_dev->dev_addr);
- batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_unicast_skb(skb, neigh_node);
goto out;
dst_unreach:
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index e3d7051747b0..d64ddb961979 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -87,6 +87,7 @@ static int __init batadv_init(void)
batadv_recv_handler_init();
+ batadv_v_init();
batadv_iv_init();
batadv_nc_init();
@@ -159,6 +160,10 @@ int batadv_mesh_init(struct net_device *soft_iface)
INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
+ ret = batadv_v_mesh_init(bat_priv);
+ if (ret < 0)
+ goto err;
+
ret = batadv_originator_init(bat_priv);
if (ret < 0)
goto err;
@@ -201,6 +206,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
batadv_purge_outstanding_packets(bat_priv, NULL);
batadv_gw_node_free(bat_priv);
+
+ batadv_v_mesh_free(bat_priv);
batadv_nc_mesh_free(bat_priv);
batadv_dat_free(bat_priv);
batadv_bla_free(bat_priv);
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 8c01f54c61f3..db4533631834 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,12 +24,13 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2016.0"
+#define BATADV_SOURCE_VERSION "2016.1"
#endif
/* B.A.T.M.A.N. parameters */
#define BATADV_TQ_MAX_VALUE 255
+#define BATADV_THROUGHPUT_MAX_VALUE 0xFFFFFFFF
#define BATADV_JITTER 20
/* Time To Live of broadcast messages */
@@ -60,6 +61,15 @@
#define BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
#define BATADV_TQ_TOTAL_BIDRECT_LIMIT 1
+/* B.A.T.M.A.N. V */
+#define BATADV_THROUGHPUT_DEFAULT_VALUE 10 /* 1 Mbps */
+#define BATADV_ELP_PROBES_PER_NODE 2
+#define BATADV_ELP_MIN_PROBE_SIZE 200 /* bytes */
+#define BATADV_ELP_PROBE_MAX_TX_DIFF 100 /* milliseconds */
+#define BATADV_ELP_MAX_AGE 64
+#define BATADV_OGM_MAX_ORIGDIFF 5
+#define BATADV_OGM_MAX_AGE 64
+
/* number of OGMs sent with the last tt diff */
#define BATADV_TT_OGM_APPEND_MAX 3
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index d253bb23e2ac..b41719b6487a 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -575,9 +575,7 @@ batadv_nc_hash_find(struct batadv_hashtable *hash,
*/
static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
{
- batadv_send_skb_packet(nc_packet->skb,
- nc_packet->neigh_node->if_incoming,
- nc_packet->nc_path->next_hop);
+ batadv_send_unicast_skb(nc_packet->skb, nc_packet->neigh_node);
nc_packet->skb = NULL;
batadv_nc_packet_free(nc_packet);
}
@@ -1067,11 +1065,11 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
struct batadv_unicast_packet *packet1;
struct batadv_unicast_packet *packet2;
struct batadv_coded_packet *coded_packet;
- struct batadv_neigh_node *neigh_tmp, *router_neigh;
- struct batadv_neigh_node *router_coding = NULL;
+ struct batadv_neigh_node *neigh_tmp, *router_neigh, *first_dest;
+ struct batadv_neigh_node *router_coding = NULL, *second_dest;
struct batadv_neigh_ifinfo *router_neigh_ifinfo = NULL;
struct batadv_neigh_ifinfo *router_coding_ifinfo = NULL;
- u8 *first_source, *first_dest, *second_source, *second_dest;
+ u8 *first_source, *second_source;
__be32 packet_id1, packet_id2;
size_t count;
bool res = false;
@@ -1114,9 +1112,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
*/
if (tq_weighted_neigh >= tq_weighted_coding) {
/* Destination from nc_packet is selected for MAC-header */
- first_dest = nc_packet->nc_path->next_hop;
+ first_dest = nc_packet->neigh_node;
first_source = nc_packet->nc_path->prev_hop;
- second_dest = neigh_node->addr;
+ second_dest = neigh_node;
second_source = ethhdr->h_source;
packet1 = (struct batadv_unicast_packet *)nc_packet->skb->data;
packet2 = (struct batadv_unicast_packet *)skb->data;
@@ -1125,9 +1123,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
skb->data + sizeof(*packet2));
} else {
/* Destination for skb is selected for MAC-header */
- first_dest = neigh_node->addr;
+ first_dest = neigh_node;
first_source = ethhdr->h_source;
- second_dest = nc_packet->nc_path->next_hop;
+ second_dest = nc_packet->neigh_node;
second_source = nc_packet->nc_path->prev_hop;
packet1 = (struct batadv_unicast_packet *)skb->data;
packet2 = (struct batadv_unicast_packet *)nc_packet->skb->data;
@@ -1169,7 +1167,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
coded_packet->first_ttvn = packet1->ttvn;
/* Info about second unicast packet */
- ether_addr_copy(coded_packet->second_dest, second_dest);
+ ether_addr_copy(coded_packet->second_dest, second_dest->addr);
ether_addr_copy(coded_packet->second_source, second_source);
ether_addr_copy(coded_packet->second_orig_dest, packet2->dest);
coded_packet->second_crc = packet_id2;
@@ -1224,7 +1222,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
batadv_nc_packet_free(nc_packet);
/* Send the coded packet and return true */
- batadv_send_skb_packet(skb_dest, neigh_node->if_incoming, first_dest);
+ batadv_send_unicast_skb(skb_dest, first_dest);
res = true;
out:
if (router_neigh)
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index e7f915181aba..8a8d7ca1a5cf 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -26,6 +26,8 @@
* @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
* @BATADV_BCAST: broadcast packets carrying broadcast payload
* @BATADV_CODED: network coded packets
+ * @BATADV_ELP: echo location packets for B.A.T.M.A.N. V
+ * @BATADV_OGM2: originator messages for B.A.T.M.A.N. V
*
* @BATADV_UNICAST: unicast packets carrying unicast payload traffic
* @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
@@ -40,6 +42,8 @@ enum batadv_packettype {
BATADV_IV_OGM = 0x00,
BATADV_BCAST = 0x01,
BATADV_CODED = 0x02,
+ BATADV_ELP = 0x03,
+ BATADV_OGM2 = 0x04,
/* 0x40 - 0x7f: unicast */
#define BATADV_UNICAST_MIN 0x40
BATADV_UNICAST = 0x40,
@@ -235,6 +239,51 @@ struct batadv_ogm_packet {
#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
/**
+ * struct batadv_ogm2_packet - ogm2 (routing protocol) packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
+ * @flags: reseved for routing relevant flags - currently always 0
+ * @seqno: sequence number
+ * @orig: originator mac address
+ * @tvlv_len: length of the appended tvlv buffer (in bytes)
+ * @throughput: the currently flooded path throughput
+ */
+struct batadv_ogm2_packet {
+ u8 packet_type;
+ u8 version;
+ u8 ttl;
+ u8 flags;
+ __be32 seqno;
+ u8 orig[ETH_ALEN];
+ __be16 tvlv_len;
+ __be32 throughput;
+ /* __packed is not needed as the struct size is divisible by 4,
+ * and the largest data type in this struct has a size of 4.
+ */
+};
+
+#define BATADV_OGM2_HLEN sizeof(struct batadv_ogm2_packet)
+
+/**
+ * struct batadv_elp_packet - elp (neighbor discovery) packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @orig: originator mac address
+ * @seqno: sequence number
+ * @elp_interval: currently used ELP sending interval in ms
+ */
+struct batadv_elp_packet {
+ u8 packet_type;
+ u8 version;
+ u8 orig[ETH_ALEN];
+ __be32 seqno;
+ __be32 elp_interval;
+};
+
+#define BATADV_ELP_HLEN sizeof(struct batadv_elp_packet)
+
+/**
* struct batadv_icmp_header - common members among all the ICMP packets
* @packet_type: batman-adv packet type, part of the general header
* @version: batman-adv protocol version, part of the genereal header
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index caff32cf6fe7..3ce06e0a91b1 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -49,16 +49,30 @@
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
-/* send out an already prepared packet to the given address via the
- * specified batman interface
+/**
+ * batadv_send_skb_packet - send an already prepared packet
+ * @skb: the packet to send
+ * @hard_iface: the interface to use to send the broadcast packet
+ * @dst_addr: the payload destination
+ *
+ * Send out an already prepared packet to the given neighbor or broadcast it
+ * using the specified interface. Either hard_iface or neigh_node must be not
+ * NULL.
+ * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
+ * otherwise it is sent as unicast to the given neighbor.
+ *
+ * Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
+ * otherwise
*/
int batadv_send_skb_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
const u8 *dst_addr)
{
- struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_priv *bat_priv;
struct ethhdr *ethhdr;
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
if (hard_iface->if_status != BATADV_IF_ACTIVE)
goto send_skb_err;
@@ -100,6 +114,35 @@ send_skb_err:
return NET_XMIT_DROP;
}
+int batadv_send_broadcast_skb(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface)
+{
+ return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
+}
+
+int batadv_send_unicast_skb(struct sk_buff *skb,
+ struct batadv_neigh_node *neigh)
+{
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ struct batadv_hardif_neigh_node *hardif_neigh;
+#endif
+ int ret;
+
+ ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
+
+ if ((hardif_neigh) && (ret != NET_XMIT_DROP))
+ hardif_neigh->bat_v.last_unicast_tx = jiffies;
+
+ if (hardif_neigh)
+ batadv_hardif_neigh_put(hardif_neigh);
+#endif
+
+ return ret;
+}
+
/**
* batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
* @skb: Packet to be transmitted.
@@ -146,8 +189,7 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
ret = NET_XMIT_POLICED;
} else {
- batadv_send_skb_packet(skb, neigh_node->if_incoming,
- neigh_node->addr);
+ batadv_send_unicast_skb(skb, neigh_node);
ret = NET_XMIT_SUCCESS;
}
@@ -538,8 +580,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
/* send a copy of the saved skb */
skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (skb1)
- batadv_send_skb_packet(skb1, hard_iface,
- batadv_broadcast_addr);
+ batadv_send_broadcast_skb(skb1, hard_iface);
}
rcu_read_unlock();
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 7ff95cada2e7..6fd7270d8ce6 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -28,12 +28,16 @@
struct sk_buff;
struct work_struct;
-int batadv_send_skb_packet(struct sk_buff *skb,
- struct batadv_hard_iface *hard_iface,
- const u8 *dst_addr);
int batadv_send_skb_to_orig(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *recv_if);
+int batadv_send_skb_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ const u8 *dst_addr);
+int batadv_send_broadcast_skb(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface);
+int batadv_send_unicast_skb(struct sk_buff *skb,
+ struct batadv_neigh_node *neigh_node);
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface);
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
const struct sk_buff *skb,
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 4d70d4413e40..e7cf51333a36 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -242,6 +242,55 @@ ssize_t batadv_show_vlan_##_name(struct kobject *kobj, \
static BATADV_ATTR_VLAN(_name, _mode, batadv_show_vlan_##_name, \
batadv_store_vlan_##_name)
+#define BATADV_ATTR_HIF_STORE_UINT(_name, _var, _min, _max, _post_func) \
+ssize_t batadv_store_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff, \
+ size_t count) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_hard_iface *hard_iface; \
+ ssize_t length; \
+ \
+ hard_iface = batadv_hardif_get_by_netdev(net_dev); \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = __batadv_store_uint_attr(buff, count, _min, _max, \
+ _post_func, attr, \
+ &hard_iface->_var, net_dev); \
+ \
+ batadv_hardif_put(hard_iface); \
+ return length; \
+}
+
+#define BATADV_ATTR_HIF_SHOW_UINT(_name, _var) \
+ssize_t batadv_show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_hard_iface *hard_iface; \
+ ssize_t length; \
+ \
+ hard_iface = batadv_hardif_get_by_netdev(net_dev); \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_var)); \
+ \
+ batadv_hardif_put(hard_iface); \
+ return length; \
+}
+
+/* Use this, if you are going to set [name] in hard_iface to an
+ * unsigned integer value
+ */
+#define BATADV_ATTR_HIF_UINT(_name, _var, _mode, _min, _max, _post_func)\
+ static BATADV_ATTR_HIF_STORE_UINT(_name, _var, _min, \
+ _max, _post_func) \
+ static BATADV_ATTR_HIF_SHOW_UINT(_name, _var) \
+ static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
+ batadv_store_##_name)
+
static int batadv_store_bool_attr(char *buff, size_t count,
struct net_device *net_dev,
const char *attr_name, atomic_t *attr,
@@ -868,13 +917,94 @@ static ssize_t batadv_show_iface_status(struct kobject *kobj,
return length;
}
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+
+/**
+ * batadv_store_throughput_override - parse and store throughput override
+ * entered by the user
+ * @kobj: kobject representing the private mesh sysfs directory
+ * @attr: the batman-adv attribute the user is interacting with
+ * @buff: the buffer containing the user data
+ * @count: number of bytes in the buffer
+ *
+ * Return: 'count' on success or a negative error code in case of failure
+ */
+static ssize_t batadv_store_throughput_override(struct kobject *kobj,
+ struct attribute *attr,
+ char *buff, size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_hard_iface *hard_iface;
+ u32 tp_override;
+ u32 old_tp_override;
+ bool ret;
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface)
+ return -EINVAL;
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
+ &tp_override);
+ if (!ret)
+ return count;
+
+ old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
+ if (old_tp_override == tp_override)
+ goto out;
+
+ batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
+ "throughput_override",
+ old_tp_override / 10, old_tp_override % 10,
+ tp_override / 10, tp_override % 10);
+
+ atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
+
+out:
+ batadv_hardif_put(hard_iface);
+ return count;
+}
+
+static ssize_t batadv_show_throughput_override(struct kobject *kobj,
+ struct attribute *attr,
+ char *buff)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_hard_iface *hard_iface;
+ u32 tp_override;
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface)
+ return -EINVAL;
+
+ tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
+
+ return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
+ tp_override % 10);
+}
+
+#endif
+
static BATADV_ATTR(mesh_iface, S_IRUGO | S_IWUSR, batadv_show_mesh_iface,
batadv_store_mesh_iface);
static BATADV_ATTR(iface_status, S_IRUGO, batadv_show_iface_status, NULL);
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+BATADV_ATTR_HIF_UINT(elp_interval, bat_v.elp_interval, S_IRUGO | S_IWUSR,
+ 2 * BATADV_JITTER, INT_MAX, NULL);
+static BATADV_ATTR(throughput_override, S_IRUGO | S_IWUSR,
+ batadv_show_throughput_override,
+ batadv_store_throughput_override);
+#endif
static struct batadv_attribute *batadv_batman_attrs[] = {
&batadv_attr_mesh_iface,
&batadv_attr_iface_status,
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ &batadv_attr_elp_interval,
+ &batadv_attr_throughput_override,
+#endif
NULL,
};
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 612de23178e6..9abfb3e73c34 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -22,6 +22,7 @@
#error only "main.h" can be included directly
#endif
+#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/if_ether.h>
@@ -86,6 +87,36 @@ struct batadv_hard_iface_bat_iv {
};
/**
+ * enum batadv_v_hard_iface_flags - interface flags useful to B.A.T.M.A.N. V
+ * @BATADV_FULL_DUPLEX: tells if the connection over this link is full-duplex
+ * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that no
+ * throughput data is available for this interface and that default values are
+ * assumed.
+ */
+enum batadv_v_hard_iface_flags {
+ BATADV_FULL_DUPLEX = BIT(0),
+ BATADV_WARNING_DEFAULT = BIT(1),
+};
+
+/**
+ * struct batadv_hard_iface_bat_v - per hard-interface B.A.T.M.A.N. V data
+ * @elp_interval: time interval between two ELP transmissions
+ * @elp_seqno: current ELP sequence number
+ * @elp_skb: base skb containing the ELP message to send
+ * @elp_wq: workqueue used to schedule ELP transmissions
+ * @throughput_override: throughput override to disable link auto-detection
+ * @flags: interface specific flags
+ */
+struct batadv_hard_iface_bat_v {
+ atomic_t elp_interval;
+ atomic_t elp_seqno;
+ struct sk_buff *elp_skb;
+ struct delayed_work elp_wq;
+ atomic_t throughput_override;
+ u8 flags;
+};
+
+/**
* struct batadv_hard_iface - network device known to batman-adv
* @list: list node for batadv_hardif_list
* @if_num: identificator of the interface
@@ -99,6 +130,7 @@ struct batadv_hard_iface_bat_iv {
* @soft_iface: the batman-adv interface which uses this network interface
* @rcu: struct used for freeing in an RCU-safe manner
* @bat_iv: per hard-interface B.A.T.M.A.N. IV data
+ * @bat_v: per hard-interface B.A.T.M.A.N. V data
* @cleanup_work: work queue callback item for hard-interface deinit
* @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
* @neigh_list: list of unique single hop neighbors via this interface
@@ -116,6 +148,9 @@ struct batadv_hard_iface {
struct net_device *soft_iface;
struct rcu_head rcu;
struct batadv_hard_iface_bat_iv bat_iv;
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ struct batadv_hard_iface_bat_v bat_v;
+#endif
struct work_struct cleanup_work;
struct dentry *debug_dir;
struct hlist_head neigh_list;
@@ -130,6 +165,7 @@ struct batadv_hard_iface {
* @router: router that should be used to reach this originator
* @last_real_seqno: last and best known sequence number
* @last_ttl: ttl of last received packet
+ * @last_seqno_forwarded: seqno of the OGM which was forwarded last
* @batman_seqno_reset: time when the batman seqno window was reset
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in an RCU-safe manner
@@ -140,6 +176,7 @@ struct batadv_orig_ifinfo {
struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
u32 last_real_seqno;
u8 last_ttl;
+ u32 last_seqno_forwarded;
unsigned long batman_seqno_reset;
struct kref refcount;
struct rcu_head rcu;
@@ -346,12 +383,32 @@ struct batadv_gw_node {
struct rcu_head rcu;
};
+DECLARE_EWMA(throughput, 1024, 8)
+
+/**
+ * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor
+ * information
+ * @throughput: ewma link throughput towards this neighbor
+ * @elp_interval: time interval between two ELP transmissions
+ * @elp_latest_seqno: latest and best known ELP sequence number
+ * @last_unicast_tx: when the last unicast packet has been sent to this neighbor
+ * @metric_work: work queue callback item for metric update
+ */
+struct batadv_hardif_neigh_node_bat_v {
+ struct ewma_throughput throughput;
+ u32 elp_interval;
+ u32 elp_latest_seqno;
+ unsigned long last_unicast_tx;
+ struct work_struct metric_work;
+};
+
/**
* struct batadv_hardif_neigh_node - unique neighbor per hard-interface
* @list: list node for batadv_hard_iface::neigh_list
* @addr: the MAC address of the neighboring interface
* @if_incoming: pointer to incoming hard-interface
* @last_seen: when last packet via this neighbor was received
+ * @bat_v: B.A.T.M.A.N. V private data
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in a RCU-safe manner
*/
@@ -360,6 +417,9 @@ struct batadv_hardif_neigh_node {
u8 addr[ETH_ALEN];
struct batadv_hard_iface *if_incoming;
unsigned long last_seen;
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ struct batadv_hardif_neigh_node_bat_v bat_v;
+#endif
struct kref refcount;
struct rcu_head rcu;
};
@@ -407,10 +467,22 @@ struct batadv_neigh_ifinfo_bat_iv {
};
/**
+ * struct batadv_neigh_ifinfo_bat_v - neighbor information per outgoing
+ * interface for B.A.T.M.A.N. V
+ * @throughput: last throughput metric received from originator via this neigh
+ * @last_seqno: last sequence number known for this neighbor
+ */
+struct batadv_neigh_ifinfo_bat_v {
+ u32 throughput;
+ u32 last_seqno;
+};
+
+/**
* struct batadv_neigh_ifinfo - neighbor information per outgoing interface
* @list: list node for batadv_neigh_node::ifinfo_list
* @if_outgoing: pointer to outgoing hard-interface
* @bat_iv: B.A.T.M.A.N. IV private structure
+ * @bat_v: B.A.T.M.A.N. V private data
* @last_ttl: last received ttl from this neigh node
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in a RCU-safe manner
@@ -419,6 +491,9 @@ struct batadv_neigh_ifinfo {
struct hlist_node list;
struct batadv_hard_iface *if_outgoing;
struct batadv_neigh_ifinfo_bat_iv bat_iv;
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ struct batadv_neigh_ifinfo_bat_v bat_v;
+#endif
u8 last_ttl;
struct kref refcount;
struct rcu_head rcu;
@@ -751,6 +826,20 @@ struct batadv_softif_vlan {
};
/**
+ * struct batadv_priv_bat_v - B.A.T.M.A.N. V per soft-interface private data
+ * @ogm_buff: buffer holding the OGM packet
+ * @ogm_buff_len: length of the OGM packet buffer
+ * @ogm_seqno: OGM sequence number - used to identify each OGM
+ * @ogm_wq: workqueue used to schedule OGM transmissions
+ */
+struct batadv_priv_bat_v {
+ unsigned char *ogm_buff;
+ int ogm_buff_len;
+ atomic_t ogm_seqno;
+ struct delayed_work ogm_wq;
+};
+
+/**
* struct batadv_priv - per mesh interface data
* @mesh_state: current status of the mesh (inactive/active/deactivating)
* @soft_iface: net device which holds this struct as private data
@@ -804,6 +893,7 @@ struct batadv_softif_vlan {
* @mcast: multicast data
* @network_coding: bool indicating whether network coding is enabled
* @nc: network coding data
+ * @bat_v: B.A.T.M.A.N. V per soft-interface private data
*/
struct batadv_priv {
atomic_t mesh_state;
@@ -869,6 +959,9 @@ struct batadv_priv {
atomic_t network_coding;
struct batadv_priv_nc nc;
#endif /* CONFIG_BATMAN_ADV_NC */
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ struct batadv_priv_bat_v bat_v;
+#endif
};
/**
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 95d1a66ba03a..06c31b9a68b0 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -69,6 +69,15 @@ config BT_6LOWPAN
help
IPv6 compression over Bluetooth Low Energy.
+config BT_LEDS
+ bool "Enable LED triggers"
+ depends on BT
+ depends on LEDS_CLASS
+ select LEDS_TRIGGERS
+ help
+ This option selects a few LED triggers for different
+ Bluetooth events.
+
config BT_SELFTEST
bool "Bluetooth self testing support"
depends on BT && DEBUG_KERNEL
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 2b15ae8c1def..b3ff12eb9b6d 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -17,6 +17,7 @@ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
+bluetooth-$(CONFIG_BT_LEDS) += leds.o
bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 883c821a9e78..2713fc86e85a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -40,6 +40,7 @@
#include "hci_request.h"
#include "hci_debugfs.h"
#include "smp.h"
+#include "leds.h"
static void hci_rx_work(struct work_struct *work);
static void hci_cmd_work(struct work_struct *work);
@@ -1395,6 +1396,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
set_bit(HCI_UP, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_UP);
+ hci_leds_update_powered(hdev, true);
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
!hci_dev_test_flag(hdev, HCI_CONFIG) &&
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
@@ -1532,6 +1534,8 @@ int hci_dev_do_close(struct hci_dev *hdev)
return 0;
}
+ hci_leds_update_powered(hdev, false);
+
/* Flush RX and TX works */
flush_work(&hdev->tx_work);
flush_work(&hdev->rx_work);
@@ -2017,6 +2021,7 @@ static void hci_power_on(struct work_struct *work)
if (test_bit(HCI_UP, &hdev->flags) &&
hci_dev_test_flag(hdev, HCI_MGMT) &&
hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
+ cancel_delayed_work(&hdev->power_off);
hci_req_sync_lock(hdev);
err = __hci_req_hci_power_on(hdev);
hci_req_sync_unlock(hdev);
@@ -3067,6 +3072,8 @@ int hci_register_dev(struct hci_dev *hdev)
if (error < 0)
goto err_wqueue;
+ hci_leds_init(hdev);
+
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
hdev);
diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c
new file mode 100644
index 000000000000..8319c8440c89
--- /dev/null
+++ b/net/bluetooth/leds.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015, Heiner Kallweit <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "leds.h"
+
+struct hci_basic_led_trigger {
+ struct led_trigger led_trigger;
+ struct hci_dev *hdev;
+};
+
+#define to_hci_basic_led_trigger(arg) container_of(arg, \
+ struct hci_basic_led_trigger, led_trigger)
+
+void hci_leds_update_powered(struct hci_dev *hdev, bool enabled)
+{
+ if (hdev->power_led)
+ led_trigger_event(hdev->power_led,
+ enabled ? LED_FULL : LED_OFF);
+}
+
+static void power_activate(struct led_classdev *led_cdev)
+{
+ struct hci_basic_led_trigger *htrig;
+ bool powered;
+
+ htrig = to_hci_basic_led_trigger(led_cdev->trigger);
+ powered = test_bit(HCI_UP, &htrig->hdev->flags);
+
+ led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF);
+}
+
+static struct led_trigger *led_allocate_basic(struct hci_dev *hdev,
+ void (*activate)(struct led_classdev *led_cdev),
+ const char *name)
+{
+ struct hci_basic_led_trigger *htrig;
+
+ htrig = devm_kzalloc(&hdev->dev, sizeof(*htrig), GFP_KERNEL);
+ if (!htrig)
+ return NULL;
+
+ htrig->hdev = hdev;
+ htrig->led_trigger.activate = activate;
+ htrig->led_trigger.name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "%s-%s", hdev->name,
+ name);
+ if (!htrig->led_trigger.name)
+ goto err_alloc;
+
+ if (devm_led_trigger_register(&hdev->dev, &htrig->led_trigger))
+ goto err_register;
+
+ return &htrig->led_trigger;
+
+err_register:
+ devm_kfree(&hdev->dev, (void *)htrig->led_trigger.name);
+err_alloc:
+ devm_kfree(&hdev->dev, htrig);
+ return NULL;
+}
+
+void hci_leds_init(struct hci_dev *hdev)
+{
+ /* initialize power_led */
+ hdev->power_led = led_allocate_basic(hdev, power_activate, "power");
+}
diff --git a/net/bluetooth/leds.h b/net/bluetooth/leds.h
new file mode 100644
index 000000000000..a9c4d6ea01cf
--- /dev/null
+++ b/net/bluetooth/leds.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2015, Heiner Kallweit <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if IS_ENABLED(CONFIG_BT_LEDS)
+void hci_leds_update_powered(struct hci_dev *hdev, bool enabled);
+void hci_leds_init(struct hci_dev *hdev);
+#else
+static inline void hci_leds_update_powered(struct hci_dev *hdev,
+ bool enabled) {}
+static inline void hci_leds_init(struct hci_dev *hdev) {}
+#endif
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 82e3e9705017..dcea4f4c62b3 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -723,6 +723,8 @@ int br_fdb_dump(struct sk_buff *skb,
struct net_bridge_fdb_entry *f;
hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
+ int err;
+
if (idx < cb->args[0])
goto skip;
@@ -741,12 +743,15 @@ int br_fdb_dump(struct sk_buff *skb,
if (!filter_dev && f->dst)
goto skip;
- if (fdb_fill_info(skb, br, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI) < 0)
+ err = fdb_fill_info(skb, br, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI);
+ if (err < 0) {
+ cb->args[1] = err;
break;
+ }
skip:
++idx;
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index fcdb86dd5a23..f47759f05b6d 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -44,7 +44,6 @@ int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb
skb_push(skb, ETH_HLEN);
br_drop_fake_rtable(skb);
- skb_sender_cpu_clear(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(skb->protocol == htons(ETH_P_8021Q) ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index b37a1cc97d98..a73df3315df9 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -223,6 +223,31 @@ static void destroy_nbp_rcu(struct rcu_head *head)
destroy_nbp(p);
}
+static unsigned get_max_headroom(struct net_bridge *br)
+{
+ unsigned max_headroom = 0;
+ struct net_bridge_port *p;
+
+ list_for_each_entry(p, &br->port_list, list) {
+ unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
+
+ if (dev_headroom > max_headroom)
+ max_headroom = dev_headroom;
+ }
+
+ return max_headroom;
+}
+
+static void update_headroom(struct net_bridge *br, int new_hr)
+{
+ struct net_bridge_port *p;
+
+ list_for_each_entry(p, &br->port_list, list)
+ netdev_set_rx_headroom(p->dev, new_hr);
+
+ br->dev->needed_headroom = new_hr;
+}
+
/* Delete port(interface) from bridge is done in two steps.
* via RCU. First step, marks device as down. That deletes
* all the timers and stops new packets from flowing through.
@@ -248,6 +273,9 @@ static void del_nbp(struct net_bridge_port *p)
br_ifinfo_notify(RTM_DELLINK, p);
list_del_rcu(&p->list);
+ if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
+ update_headroom(br, get_max_headroom(br));
+ netdev_reset_rx_headroom(dev);
nbp_vlan_flush(p);
br_fdb_delete_by_port(br, p, 0, 1);
@@ -438,6 +466,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
{
struct net_bridge_port *p;
int err = 0;
+ unsigned br_hr, dev_hr;
bool changed_addr;
/* Don't allow bridging non-ethernet like devices, or DSA-enabled
@@ -505,8 +534,12 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
netdev_update_features(br->dev);
- if (br->dev->needed_headroom < dev->needed_headroom)
- br->dev->needed_headroom = dev->needed_headroom;
+ br_hr = br->dev->needed_headroom;
+ dev_hr = netdev_get_fwd_headroom(dev);
+ if (br_hr < dev_hr)
+ update_headroom(br, dev_hr);
+ else
+ netdev_set_rx_headroom(dev, br_hr);
if (br_fdb_insert(br, p, dev->dev_addr, 0))
netdev_err(dev, "failed insert local address bridge forwarding table\n");
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 73786e2fe065..253bc77eda3b 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -20,7 +20,7 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
- struct nlattr *nest;
+ struct nlattr *nest, *port_nest;
if (!br->multicast_router || hlist_empty(&br->router_list))
return 0;
@@ -30,8 +30,20 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
return -EMSGSIZE;
hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
- if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
+ if (!p)
+ continue;
+ port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT);
+ if (!port_nest)
goto fail;
+ if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
+ nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
+ br_timer_value(&p->multicast_router_timer)) ||
+ nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
+ p->multicast_router)) {
+ nla_nest_cancel(skb, port_nest);
+ goto fail;
+ }
+ nla_nest_end(skb, port_nest);
}
nla_nest_end(skb, nest);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 8b6e4249be1b..a4c15df2b792 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -759,13 +759,17 @@ static void br_multicast_router_expired(unsigned long data)
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
- if (port->multicast_router != 1 ||
+ if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
+ port->multicast_router == MDB_RTR_TYPE_PERM ||
timer_pending(&port->multicast_router_timer) ||
hlist_unhashed(&port->rlist))
goto out;
hlist_del_init_rcu(&port->rlist);
br_rtr_notify(br->dev, port, RTM_DELMDB);
+ /* Don't allow timer refresh if the router expired */
+ if (port->multicast_router == MDB_RTR_TYPE_TEMP)
+ port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
out:
spin_unlock(&br->multicast_lock);
@@ -912,7 +916,7 @@ static void br_ip6_multicast_port_query_expired(unsigned long data)
void br_multicast_add_port(struct net_bridge_port *port)
{
- port->multicast_router = 1;
+ port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
(unsigned long)port);
@@ -959,7 +963,8 @@ void br_multicast_enable_port(struct net_bridge_port *port)
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_enable(&port->ip6_own_query);
#endif
- if (port->multicast_router == 2 && hlist_unhashed(&port->rlist))
+ if (port->multicast_router == MDB_RTR_TYPE_PERM &&
+ hlist_unhashed(&port->rlist))
br_multicast_add_router(br, port);
out:
@@ -980,6 +985,9 @@ void br_multicast_disable_port(struct net_bridge_port *port)
if (!hlist_unhashed(&port->rlist)) {
hlist_del_init_rcu(&port->rlist);
br_rtr_notify(br->dev, port, RTM_DELMDB);
+ /* Don't allow timer refresh if disabling */
+ if (port->multicast_router == MDB_RTR_TYPE_TEMP)
+ port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
}
del_timer(&port->multicast_router_timer);
del_timer(&port->ip4_own_query.timer);
@@ -1227,13 +1235,14 @@ static void br_multicast_mark_router(struct net_bridge *br,
unsigned long now = jiffies;
if (!port) {
- if (br->multicast_router == 1)
+ if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY)
mod_timer(&br->multicast_router_timer,
now + br->multicast_querier_interval);
return;
}
- if (port->multicast_router != 1)
+ if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
+ port->multicast_router == MDB_RTR_TYPE_PERM)
return;
br_multicast_add_router(br, port);
@@ -1713,7 +1722,7 @@ void br_multicast_init(struct net_bridge *br)
br->hash_elasticity = 4;
br->hash_max = 512;
- br->multicast_router = 1;
+ br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
br->multicast_querier = 0;
br->multicast_query_use_ifaddr = 0;
br->multicast_last_member_count = 2;
@@ -1823,11 +1832,11 @@ int br_multicast_set_router(struct net_bridge *br, unsigned long val)
spin_lock_bh(&br->multicast_lock);
switch (val) {
- case 0:
- case 2:
+ case MDB_RTR_TYPE_DISABLED:
+ case MDB_RTR_TYPE_PERM:
del_timer(&br->multicast_router_timer);
/* fall through */
- case 1:
+ case MDB_RTR_TYPE_TEMP_QUERY:
br->multicast_router = val;
err = 0;
break;
@@ -1838,37 +1847,53 @@ int br_multicast_set_router(struct net_bridge *br, unsigned long val)
return err;
}
+static void __del_port_router(struct net_bridge_port *p)
+{
+ if (hlist_unhashed(&p->rlist))
+ return;
+ hlist_del_init_rcu(&p->rlist);
+ br_rtr_notify(p->br->dev, p, RTM_DELMDB);
+}
+
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
{
struct net_bridge *br = p->br;
+ unsigned long now = jiffies;
int err = -EINVAL;
spin_lock(&br->multicast_lock);
-
- switch (val) {
- case 0:
- case 1:
- case 2:
- p->multicast_router = val;
+ if (p->multicast_router == val) {
+ /* Refresh the temp router port timer */
+ if (p->multicast_router == MDB_RTR_TYPE_TEMP)
+ mod_timer(&p->multicast_router_timer,
+ now + br->multicast_querier_interval);
err = 0;
-
- if (val < 2 && !hlist_unhashed(&p->rlist)) {
- hlist_del_init_rcu(&p->rlist);
- br_rtr_notify(br->dev, p, RTM_DELMDB);
- }
-
- if (val == 1)
- break;
-
+ goto unlock;
+ }
+ switch (val) {
+ case MDB_RTR_TYPE_DISABLED:
+ p->multicast_router = MDB_RTR_TYPE_DISABLED;
+ __del_port_router(p);
+ del_timer(&p->multicast_router_timer);
+ break;
+ case MDB_RTR_TYPE_TEMP_QUERY:
+ p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
+ __del_port_router(p);
+ break;
+ case MDB_RTR_TYPE_PERM:
+ p->multicast_router = MDB_RTR_TYPE_PERM;
del_timer(&p->multicast_router_timer);
-
- if (val == 0)
- break;
-
br_multicast_add_router(br, p);
break;
+ case MDB_RTR_TYPE_TEMP:
+ p->multicast_router = MDB_RTR_TYPE_TEMP;
+ br_multicast_mark_router(br, p);
+ break;
+ default:
+ goto unlock;
}
-
+ err = 0;
+unlock:
spin_unlock(&br->multicast_lock);
return err;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9cfedf565f5b..9382619a405b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1197,6 +1197,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
return new_piece;
}
+static size_t sizeof_footer(struct ceph_connection *con)
+{
+ return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
+ sizeof(struct ceph_msg_footer) :
+ sizeof(struct ceph_msg_footer_old);
+}
+
static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
{
BUG_ON(!msg);
@@ -2335,9 +2342,9 @@ static int read_partial_message(struct ceph_connection *con)
ceph_pr_addr(&con->peer_addr.in_addr),
seq, con->in_seq + 1);
con->in_base_pos = -front_len - middle_len - data_len -
- sizeof(m->footer);
+ sizeof_footer(con);
con->in_tag = CEPH_MSGR_TAG_READY;
- return 0;
+ return 1;
} else if ((s64)seq - (s64)con->in_seq > 1) {
pr_err("read_partial_message bad seq %lld expected %lld\n",
seq, con->in_seq + 1);
@@ -2360,10 +2367,10 @@ static int read_partial_message(struct ceph_connection *con)
/* skip this message */
dout("alloc_msg said skip message\n");
con->in_base_pos = -front_len - middle_len - data_len -
- sizeof(m->footer);
+ sizeof_footer(con);
con->in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
- return 0;
+ return 1;
}
BUG_ON(!con->in_msg);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 3534e12683d3..5bc053778fed 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2853,8 +2853,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid);
if (!req) {
- pr_warn("%s osd%d tid %llu unknown, skipping\n",
- __func__, osd->o_osd, tid);
+ dout("%s osd%d tid %llu unknown, skipping\n", __func__,
+ osd->o_osd, tid);
m = NULL;
*skip = 1;
goto out;
diff --git a/net/core/Makefile b/net/core/Makefile
index 7a8fb8aef992..014422e2561f 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
obj-$(CONFIG_DST_CACHE) += dst_cache.o
+obj-$(CONFIG_NET_DEVLINK) += devlink.o
diff --git a/net/core/devlink.c b/net/core/devlink.c
new file mode 100644
index 000000000000..590fa561cb7f
--- /dev/null
+++ b/net/core/devlink.c
@@ -0,0 +1,738 @@
+/*
+ * net/core/devlink.c - Network physical/parent device Netlink interface
+ *
+ * Heavily inspired by net/wireless/
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <rdma/ib_verbs.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/devlink.h>
+
+static LIST_HEAD(devlink_list);
+
+/* devlink_mutex
+ *
+ * An overall lock guarding every operation coming from userspace.
+ * It also guards devlink devices list and it is taken when
+ * driver registers/unregisters it.
+ */
+static DEFINE_MUTEX(devlink_mutex);
+
+/* devlink_port_mutex
+ *
+ * Shared lock to guard lists of ports in all devlink devices.
+ */
+static DEFINE_MUTEX(devlink_port_mutex);
+
+static struct net *devlink_net(const struct devlink *devlink)
+{
+ return read_pnet(&devlink->_net);
+}
+
+static void devlink_net_set(struct devlink *devlink, struct net *net)
+{
+ write_pnet(&devlink->_net, net);
+}
+
+static struct devlink *devlink_get_from_attrs(struct net *net,
+ struct nlattr **attrs)
+{
+ struct devlink *devlink;
+ char *busname;
+ char *devname;
+
+ if (!attrs[DEVLINK_ATTR_BUS_NAME] || !attrs[DEVLINK_ATTR_DEV_NAME])
+ return ERR_PTR(-EINVAL);
+
+ busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]);
+ devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
+
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (strcmp(devlink->dev->bus->name, busname) == 0 &&
+ strcmp(dev_name(devlink->dev), devname) == 0 &&
+ net_eq(devlink_net(devlink), net))
+ return devlink;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct devlink *devlink_get_from_info(struct genl_info *info)
+{
+ return devlink_get_from_attrs(genl_info_net(info), info->attrs);
+}
+
+static struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
+ int port_index)
+{
+ struct devlink_port *devlink_port;
+
+ list_for_each_entry(devlink_port, &devlink->port_list, list) {
+ if (devlink_port->index == port_index)
+ return devlink_port;
+ }
+ return NULL;
+}
+
+static bool devlink_port_index_exists(struct devlink *devlink, int port_index)
+{
+ return devlink_port_get_by_index(devlink, port_index);
+}
+
+static struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
+ struct nlattr **attrs)
+{
+ if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ u32 port_index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
+ struct devlink_port *devlink_port;
+
+ devlink_port = devlink_port_get_by_index(devlink, port_index);
+ if (!devlink_port)
+ return ERR_PTR(-ENODEV);
+ return devlink_port;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ return devlink_port_get_from_attrs(devlink, info->attrs);
+}
+
+#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
+
+static int devlink_nl_pre_doit(const struct genl_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink;
+
+ mutex_lock(&devlink_mutex);
+ devlink = devlink_get_from_info(info);
+ if (IS_ERR(devlink)) {
+ mutex_unlock(&devlink_mutex);
+ return PTR_ERR(devlink);
+ }
+ info->user_ptr[0] = devlink;
+ if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
+ struct devlink_port *devlink_port;
+
+ mutex_lock(&devlink_port_mutex);
+ devlink_port = devlink_port_get_from_info(devlink, info);
+ if (IS_ERR(devlink_port)) {
+ mutex_unlock(&devlink_port_mutex);
+ mutex_unlock(&devlink_mutex);
+ return PTR_ERR(devlink_port);
+ }
+ info->user_ptr[1] = devlink_port;
+ }
+ return 0;
+}
+
+static void devlink_nl_post_doit(const struct genl_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
+ mutex_unlock(&devlink_port_mutex);
+ mutex_unlock(&devlink_mutex);
+}
+
+static struct genl_family devlink_nl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = DEVLINK_GENL_NAME,
+ .version = DEVLINK_GENL_VERSION,
+ .maxattr = DEVLINK_ATTR_MAX,
+ .netnsok = true,
+ .pre_doit = devlink_nl_pre_doit,
+ .post_doit = devlink_nl_post_doit,
+};
+
+enum devlink_multicast_groups {
+ DEVLINK_MCGRP_CONFIG,
+};
+
+static const struct genl_multicast_group devlink_nl_mcgrps[] = {
+ [DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME },
+};
+
+static int devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
+{
+ if (nla_put_string(msg, DEVLINK_ATTR_BUS_NAME, devlink->dev->bus->name))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, DEVLINK_ATTR_DEV_NAME, dev_name(devlink->dev)))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
+{
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
+ goto nla_put_failure;
+ if (devlink_port->desired_type != DEVLINK_PORT_TYPE_NOTSET &&
+ nla_put_u16(msg, DEVLINK_ATTR_PORT_DESIRED_TYPE,
+ devlink_port->desired_type))
+ goto nla_put_failure;
+ if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
+ struct net_device *netdev = devlink_port->type_dev;
+
+ if (netdev &&
+ (nla_put_u32(msg, DEVLINK_ATTR_PORT_NETDEV_IFINDEX,
+ netdev->ifindex) ||
+ nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
+ netdev->name)))
+ goto nla_put_failure;
+ }
+ if (devlink_port->type == DEVLINK_PORT_TYPE_IB) {
+ struct ib_device *ibdev = devlink_port->type_dev;
+
+ if (ibdev &&
+ nla_put_string(msg, DEVLINK_ATTR_PORT_IBDEV_NAME,
+ ibdev->name))
+ goto nla_put_failure;
+ }
+ if (devlink_port->split &&
+ nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP,
+ devlink_port->split_group))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static void devlink_port_notify(struct devlink_port *devlink_port,
+ enum devlink_command cmd)
+{
+ struct devlink *devlink = devlink_port->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ if (!devlink_port->registered)
+ return;
+
+ WARN_ON(cmd != DEVLINK_CMD_PORT_NEW && cmd != DEVLINK_CMD_PORT_DEL);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_port_fill(msg, devlink, devlink_port, cmd, 0, 0, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ if (err)
+ goto out;
+ idx++;
+ }
+out:
+ mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_port_fill(msg, devlink, devlink_port,
+ DEVLINK_CMD_PORT_NEW,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink *devlink;
+ struct devlink_port *devlink_port;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ mutex_lock(&devlink_port_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ list_for_each_entry(devlink_port, &devlink->port_list, list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_port_fill(msg, devlink, devlink_port,
+ DEVLINK_CMD_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err)
+ goto out;
+ idx++;
+ }
+ }
+out:
+ mutex_unlock(&devlink_port_mutex);
+ mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int devlink_port_type_set(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ enum devlink_port_type port_type)
+
+{
+ int err;
+
+ if (devlink->ops && devlink->ops->port_type_set) {
+ if (port_type == DEVLINK_PORT_TYPE_NOTSET)
+ return -EINVAL;
+ err = devlink->ops->port_type_set(devlink_port, port_type);
+ if (err)
+ return err;
+ devlink_port->desired_type = port_type;
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ int err;
+
+ if (info->attrs[DEVLINK_ATTR_PORT_TYPE]) {
+ enum devlink_port_type port_type;
+
+ port_type = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_TYPE]);
+ err = devlink_port_type_set(devlink, devlink_port, port_type);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int devlink_port_split(struct devlink *devlink,
+ u32 port_index, u32 count)
+
+{
+ if (devlink->ops && devlink->ops->port_split)
+ return devlink->ops->port_split(devlink, port_index, count);
+ return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ u32 port_index;
+ u32 count;
+
+ if (!info->attrs[DEVLINK_ATTR_PORT_INDEX] ||
+ !info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT])
+ return -EINVAL;
+
+ port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+ count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]);
+ return devlink_port_split(devlink, port_index, count);
+}
+
+static int devlink_port_unsplit(struct devlink *devlink, u32 port_index)
+
+{
+ if (devlink->ops && devlink->ops->port_unsplit)
+ return devlink->ops->port_unsplit(devlink, port_index);
+ return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ u32 port_index;
+
+ if (!info->attrs[DEVLINK_ATTR_PORT_INDEX])
+ return -EINVAL;
+
+ port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+ return devlink_port_unsplit(devlink, port_index);
+}
+
+static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
+};
+
+static const struct genl_ops devlink_nl_ops[] = {
+ {
+ .cmd = DEVLINK_CMD_GET,
+ .doit = devlink_nl_cmd_get_doit,
+ .dumpit = devlink_nl_cmd_get_dumpit,
+ .policy = devlink_nl_policy,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_GET,
+ .doit = devlink_nl_cmd_port_get_doit,
+ .dumpit = devlink_nl_cmd_port_get_dumpit,
+ .policy = devlink_nl_policy,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_SET,
+ .doit = devlink_nl_cmd_port_set_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_SPLIT,
+ .doit = devlink_nl_cmd_port_split_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_UNSPLIT,
+ .doit = devlink_nl_cmd_port_unsplit_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+/**
+ * devlink_alloc - Allocate new devlink instance resources
+ *
+ * @ops: ops
+ * @priv_size: size of user private data
+ *
+ * Allocate new devlink instance resources, including devlink index
+ * and name.
+ */
+struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
+{
+ struct devlink *devlink;
+
+ devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
+ if (!devlink)
+ return NULL;
+ devlink->ops = ops;
+ devlink_net_set(devlink, &init_net);
+ INIT_LIST_HEAD(&devlink->port_list);
+ return devlink;
+}
+EXPORT_SYMBOL_GPL(devlink_alloc);
+
+/**
+ * devlink_register - Register devlink instance
+ *
+ * @devlink: devlink
+ */
+int devlink_register(struct devlink *devlink, struct device *dev)
+{
+ mutex_lock(&devlink_mutex);
+ devlink->dev = dev;
+ list_add_tail(&devlink->list, &devlink_list);
+ devlink_notify(devlink, DEVLINK_CMD_NEW);
+ mutex_unlock(&devlink_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_register);
+
+/**
+ * devlink_unregister - Unregister devlink instance
+ *
+ * @devlink: devlink
+ */
+void devlink_unregister(struct devlink *devlink)
+{
+ mutex_lock(&devlink_mutex);
+ devlink_notify(devlink, DEVLINK_CMD_DEL);
+ list_del(&devlink->list);
+ mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_unregister);
+
+/**
+ * devlink_free - Free devlink instance resources
+ *
+ * @devlink: devlink
+ */
+void devlink_free(struct devlink *devlink)
+{
+ kfree(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_free);
+
+/**
+ * devlink_port_register - Register devlink port
+ *
+ * @devlink: devlink
+ * @devlink_port: devlink port
+ * @port_index
+ *
+ * Register devlink port with provided port index. User can use
+ * any indexing, even hw-related one. devlink_port structure
+ * is convenient to be embedded inside user driver private structure.
+ * Note that the caller should take care of zeroing the devlink_port
+ * structure.
+ */
+int devlink_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index)
+{
+ mutex_lock(&devlink_port_mutex);
+ if (devlink_port_index_exists(devlink, port_index)) {
+ mutex_unlock(&devlink_port_mutex);
+ return -EEXIST;
+ }
+ devlink_port->devlink = devlink;
+ devlink_port->index = port_index;
+ devlink_port->type = DEVLINK_PORT_TYPE_NOTSET;
+ devlink_port->registered = true;
+ list_add_tail(&devlink_port->list, &devlink->port_list);
+ mutex_unlock(&devlink_port_mutex);
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_port_register);
+
+/**
+ * devlink_port_unregister - Unregister devlink port
+ *
+ * @devlink_port: devlink port
+ */
+void devlink_port_unregister(struct devlink_port *devlink_port)
+{
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
+ mutex_lock(&devlink_port_mutex);
+ list_del(&devlink_port->list);
+ mutex_unlock(&devlink_port_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_port_unregister);
+
+static void __devlink_port_type_set(struct devlink_port *devlink_port,
+ enum devlink_port_type type,
+ void *type_dev)
+{
+ devlink_port->type = type;
+ devlink_port->type_dev = type_dev;
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+}
+
+/**
+ * devlink_port_type_eth_set - Set port type to Ethernet
+ *
+ * @devlink_port: devlink port
+ * @netdev: related netdevice
+ */
+void devlink_port_type_eth_set(struct devlink_port *devlink_port,
+ struct net_device *netdev)
+{
+ return __devlink_port_type_set(devlink_port,
+ DEVLINK_PORT_TYPE_ETH, netdev);
+}
+EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
+
+/**
+ * devlink_port_type_ib_set - Set port type to InfiniBand
+ *
+ * @devlink_port: devlink port
+ * @ibdev: related IB device
+ */
+void devlink_port_type_ib_set(struct devlink_port *devlink_port,
+ struct ib_device *ibdev)
+{
+ return __devlink_port_type_set(devlink_port,
+ DEVLINK_PORT_TYPE_IB, ibdev);
+}
+EXPORT_SYMBOL_GPL(devlink_port_type_ib_set);
+
+/**
+ * devlink_port_type_clear - Clear port type
+ *
+ * @devlink_port: devlink port
+ */
+void devlink_port_type_clear(struct devlink_port *devlink_port)
+{
+ return __devlink_port_type_set(devlink_port,
+ DEVLINK_PORT_TYPE_NOTSET, NULL);
+}
+EXPORT_SYMBOL_GPL(devlink_port_type_clear);
+
+/**
+ * devlink_port_split_set - Set port is split
+ *
+ * @devlink_port: devlink port
+ * @split_group: split group - identifies group split port is part of
+ */
+void devlink_port_split_set(struct devlink_port *devlink_port,
+ u32 split_group)
+{
+ devlink_port->split = true;
+ devlink_port->split_group = split_group;
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+}
+EXPORT_SYMBOL_GPL(devlink_port_split_set);
+
+static int __init devlink_module_init(void)
+{
+ return genl_register_family_with_ops_groups(&devlink_nl_family,
+ devlink_nl_ops,
+ devlink_nl_mcgrps);
+}
+
+static void __exit devlink_module_exit(void)
+{
+ genl_unregister_family(&devlink_nl_family);
+}
+
+module_init(devlink_module_init);
+module_exit(devlink_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <[email protected]>");
+MODULE_DESCRIPTION("Network physical device Netlink interface");
+MODULE_ALIAS_GENL_FAMILY(DEVLINK_GENL_NAME);
diff --git a/net/core/filter.c b/net/core/filter.c
index a3aba15a8025..a66dc03c261f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1353,7 +1353,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
unsigned int len = (unsigned int) r4;
void *ptr;
- if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM)))
+ if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
return -EINVAL;
/* bpf verifier guarantees that:
@@ -1384,11 +1384,13 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
if (flags & BPF_F_RECOMPUTE_CSUM)
skb_postpush_rcsum(skb, ptr, len);
+ if (flags & BPF_F_INVALIDATE_HASH)
+ skb_clear_hash(skb);
return 0;
}
-const struct bpf_func_proto bpf_skb_store_bytes_proto = {
+static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
.func = bpf_skb_store_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -1419,7 +1421,7 @@ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return 0;
}
-const struct bpf_func_proto bpf_skb_load_bytes_proto = {
+static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
.func = bpf_skb_load_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -1447,6 +1449,12 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
return -EFAULT;
switch (flags & BPF_F_HDR_FIELD_MASK) {
+ case 0:
+ if (unlikely(from != 0))
+ return -EINVAL;
+
+ csum_replace_by_diff(ptr, to);
+ break;
case 2:
csum_replace2(ptr, from, to);
break;
@@ -1464,7 +1472,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
return 0;
}
-const struct bpf_func_proto bpf_l3_csum_replace_proto = {
+static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
.func = bpf_l3_csum_replace,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -1523,7 +1531,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
return 0;
}
-const struct bpf_func_proto bpf_l4_csum_replace_proto = {
+static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
.func = bpf_l4_csum_replace,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -1562,7 +1570,7 @@ static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
return csum_partial(sp->diff, diff_size, seed);
}
-const struct bpf_func_proto bpf_csum_diff_proto = {
+static const struct bpf_func_proto bpf_csum_diff_proto = {
.func = bpf_csum_diff,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -1597,11 +1605,10 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
}
skb2->dev = dev;
- skb_sender_cpu_clear(skb2);
return dev_queue_xmit(skb2);
}
-const struct bpf_func_proto bpf_clone_redirect_proto = {
+static const struct bpf_func_proto bpf_clone_redirect_proto = {
.func = bpf_clone_redirect,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -1650,11 +1657,10 @@ int skb_do_redirect(struct sk_buff *skb)
}
skb->dev = dev;
- skb_sender_cpu_clear(skb);
return dev_queue_xmit(skb);
}
-const struct bpf_func_proto bpf_redirect_proto = {
+static const struct bpf_func_proto bpf_redirect_proto = {
.func = bpf_redirect,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -1793,7 +1799,7 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
return 0;
}
-const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
+static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
.func = bpf_skb_get_tunnel_key,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -1803,6 +1809,32 @@ const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
.arg4_type = ARG_ANYTHING,
};
+static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ u8 *to = (u8 *) (long) r2;
+ const struct ip_tunnel_info *info = skb_tunnel_info(skb);
+
+ if (unlikely(!info ||
+ !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT)))
+ return -ENOENT;
+ if (unlikely(size < info->options_len))
+ return -ENOMEM;
+
+ ip_tunnel_info_opts_get(to, info);
+
+ return info->options_len;
+}
+
+static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
+ .func = bpf_skb_get_tunnel_opt,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_STACK,
+ .arg3_type = ARG_CONST_STACK_SIZE,
+};
+
static struct metadata_dst __percpu *md_dst;
static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
@@ -1813,7 +1845,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
u8 compat[sizeof(struct bpf_tunnel_key)];
struct ip_tunnel_info *info;
- if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6)))
+ if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
+ BPF_F_DONT_FRAGMENT)))
return -EINVAL;
if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
switch (size) {
@@ -1837,7 +1870,10 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
info = &md->u.tun_info;
info->mode = IP_TUNNEL_INFO_TX;
- info->key.tun_flags = TUNNEL_KEY;
+ info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
+ if (flags & BPF_F_DONT_FRAGMENT)
+ info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
+
info->key.tun_id = cpu_to_be64(from->tunnel_id);
info->key.tos = from->tunnel_tos;
info->key.ttl = from->tunnel_ttl;
@@ -1848,12 +1884,14 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
sizeof(from->remote_ipv6));
} else {
info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
+ if (flags & BPF_F_ZERO_CSUM_TX)
+ info->key.tun_flags &= ~TUNNEL_CSUM;
}
return 0;
}
-const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
+static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
.func = bpf_skb_set_tunnel_key,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -1863,17 +1901,58 @@ const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
.arg4_type = ARG_ANYTHING,
};
-static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
+#define BPF_TUNLEN_MAX 255
+
+static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ u8 *from = (u8 *) (long) r2;
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ const struct metadata_dst *md = this_cpu_ptr(md_dst);
+
+ if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
+ return -EINVAL;
+ if (unlikely(size > BPF_TUNLEN_MAX))
+ return -ENOMEM;
+
+ ip_tunnel_info_opts_set(info, from, size);
+
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
+ .func = bpf_skb_set_tunnel_opt,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_STACK,
+ .arg3_type = ARG_CONST_STACK_SIZE,
+};
+
+static const struct bpf_func_proto *
+bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
{
if (!md_dst) {
- /* race is not possible, since it's called from
- * verifier that is holding verifier mutex
+ BUILD_BUG_ON(FIELD_SIZEOF(struct ip_tunnel_info,
+ options_len) != 1);
+
+ /* Race is not possible, since it's called from verifier
+ * that is holding verifier mutex.
*/
- md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
+ md_dst = metadata_dst_alloc_percpu(BPF_TUNLEN_MAX,
+ GFP_KERNEL);
if (!md_dst)
return NULL;
}
- return &bpf_skb_set_tunnel_key_proto;
+
+ switch (which) {
+ case BPF_FUNC_skb_set_tunnel_key:
+ return &bpf_skb_set_tunnel_key_proto;
+ case BPF_FUNC_skb_set_tunnel_opt:
+ return &bpf_skb_set_tunnel_opt_proto;
+ default:
+ return NULL;
+ }
}
static const struct bpf_func_proto *
@@ -1927,7 +2006,11 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_skb_get_tunnel_key:
return &bpf_skb_get_tunnel_key_proto;
case BPF_FUNC_skb_set_tunnel_key:
- return bpf_get_skb_set_tunnel_key_proto();
+ return bpf_get_skb_set_tunnel_proto(func_id);
+ case BPF_FUNC_skb_get_tunnel_opt:
+ return &bpf_skb_get_tunnel_opt_proto;
+ case BPF_FUNC_skb_set_tunnel_opt:
+ return bpf_get_skb_set_tunnel_proto(func_id);
case BPF_FUNC_redirect:
return &bpf_redirect_proto;
case BPF_FUNC_get_route_realm:
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1474cfd2dc1c..20999aa596dd 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2856,7 +2856,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
*vlan_encapsulated_proto = htons(ETH_P_IP);
}
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
iph = (struct iphdr *) skb_put(skb, sizeof(struct iphdr));
@@ -2983,7 +2983,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
*vlan_encapsulated_proto = htons(ETH_P_IPV6);
}
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
iph = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 62737f437c8e..d2d9e5ebf58e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1391,15 +1391,6 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
};
-static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = {
- [IFLA_VF_STATS_RX_PACKETS] = { .type = NLA_U64 },
- [IFLA_VF_STATS_TX_PACKETS] = { .type = NLA_U64 },
- [IFLA_VF_STATS_RX_BYTES] = { .type = NLA_U64 },
- [IFLA_VF_STATS_TX_BYTES] = { .type = NLA_U64 },
- [IFLA_VF_STATS_BROADCAST] = { .type = NLA_U64 },
- [IFLA_VF_STATS_MULTICAST] = { .type = NLA_U64 },
-};
-
static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
[IFLA_PORT_VF] = { .type = NLA_U32 },
[IFLA_PORT_PROFILE] = { .type = NLA_STRING,
@@ -2979,6 +2970,7 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
out:
netif_addr_unlock_bh(dev);
+ cb->args[1] = err;
return idx;
}
EXPORT_SYMBOL(ndo_dflt_fdb_dump);
@@ -3012,6 +3004,7 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
ops = br_dev->netdev_ops;
}
+ cb->args[1] = 0;
for_each_netdev(net, dev) {
if (brport_idx && (dev->ifindex != brport_idx))
continue;
@@ -3039,12 +3032,16 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
idx = cops->ndo_fdb_dump(skb, cb, br_dev, dev,
idx);
}
+ if (cb->args[1] == -EMSGSIZE)
+ break;
if (dev->netdev_ops->ndo_fdb_dump)
idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
idx);
else
idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+ if (cb->args[1] == -EMSGSIZE)
+ break;
cops = NULL;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 488566b09c6d..9d7be61e5e6b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3023,6 +3023,24 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
EXPORT_SYMBOL_GPL(skb_append_pagefrags);
/**
+ * skb_push_rcsum - push skb and update receive checksum
+ * @skb: buffer to update
+ * @len: length of data pulled
+ *
+ * This function performs an skb_push on the packet and updates
+ * the CHECKSUM_COMPLETE checksum. It should be used on
+ * receive path processing instead of skb_push unless you know
+ * that the checksum difference is zero (e.g., a valid IP header)
+ * or you are setting ip_summed to CHECKSUM_NONE.
+ */
+static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
+{
+ skb_push(skb, len);
+ skb_postpush_rcsum(skb, skb->data, len);
+ return skb->data;
+}
+
+/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
* @len: length of data pulled
@@ -4167,9 +4185,9 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
if (!pskb_may_pull(skb_chk, offset))
goto err;
- __skb_pull(skb_chk, offset);
+ skb_pull_rcsum(skb_chk, offset);
ret = skb_chkf(skb_chk);
- __skb_push(skb_chk, offset);
+ skb_push_rcsum(skb_chk, offset);
if (ret)
goto err;
@@ -4302,7 +4320,6 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->skb_iif = 0;
skb->ignore_df = 0;
skb_dst_drop(skb);
- skb_sender_cpu_clear(skb);
secpath_reset(skb);
nf_reset(skb);
nf_reset_trace(skb);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index cde29239b60d..27bf03d11670 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -317,6 +317,24 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state)
return ret;
}
+static int dsa_slave_vlan_filtering(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (ds->drv->port_vlan_filtering)
+ return ds->drv->port_vlan_filtering(ds, p->port,
+ attr->u.vlan_filtering);
+
+ return 0;
+}
+
static int dsa_slave_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
@@ -333,6 +351,9 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
ret = ds->drv->port_stp_update(ds, p->port,
attr->u.stp_state);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ret = dsa_slave_vlan_filtering(dev, attr, trans);
+ break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 737c87a2a41e..0023c9048812 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -207,7 +207,7 @@ static int lowpan_device_event(struct notifier_block *unused,
struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
if (wdev->type != ARPHRD_IEEE802154)
- goto out;
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
@@ -219,11 +219,10 @@ static int lowpan_device_event(struct notifier_block *unused,
lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
break;
default:
- break;
+ return NOTIFY_DONE;
}
-out:
- return NOTIFY_DONE;
+ return NOTIFY_OK;
}
static struct notifier_block lowpan_dev_notifier = {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 209d1ed28954..0cc923f83e10 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1380,6 +1380,32 @@ out:
return pp;
}
+#define SECONDS_PER_DAY 86400
+
+/* inet_current_timestamp - Return IP network timestamp
+ *
+ * Return milliseconds since midnight in network byte order.
+ */
+__be32 inet_current_timestamp(void)
+{
+ u32 secs;
+ u32 msecs;
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+
+ /* Get secs since midnight. */
+ (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
+ /* Convert to msecs. */
+ msecs = secs * MSEC_PER_SEC;
+ /* Convert nsec to msec. */
+ msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
+
+ /* Convert to network byte order. */
+ return htons(msecs);
+}
+EXPORT_SYMBOL(inet_current_timestamp);
+
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
if (sk->sk_family == AF_INET)
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c102eb5ac55c..c34c7544d1db 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -665,7 +665,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
*/
if (!in_dev)
- goto out;
+ goto out_free_skb;
arp = arp_hdr(skb);
@@ -673,7 +673,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
default:
if (arp->ar_pro != htons(ETH_P_IP) ||
htons(dev_type) != arp->ar_hrd)
- goto out;
+ goto out_free_skb;
break;
case ARPHRD_ETHER:
case ARPHRD_FDDI:
@@ -690,17 +690,17 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
arp->ar_pro != htons(ETH_P_IP))
- goto out;
+ goto out_free_skb;
break;
case ARPHRD_AX25:
if (arp->ar_pro != htons(AX25_P_IP) ||
arp->ar_hrd != htons(ARPHRD_AX25))
- goto out;
+ goto out_free_skb;
break;
case ARPHRD_NETROM:
if (arp->ar_pro != htons(AX25_P_IP) ||
arp->ar_hrd != htons(ARPHRD_NETROM))
- goto out;
+ goto out_free_skb;
break;
}
@@ -708,7 +708,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
if (arp->ar_op != htons(ARPOP_REPLY) &&
arp->ar_op != htons(ARPOP_REQUEST))
- goto out;
+ goto out_free_skb;
/*
* Extract fields
@@ -733,7 +733,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
*/
if (ipv4_is_multicast(tip) ||
(!IN_DEV_ROUTE_LOCALNET(in_dev) && ipv4_is_loopback(tip)))
- goto out;
+ goto out_free_skb;
/*
* For some 802.11 wireless deployments (and possibly other networks),
@@ -741,7 +741,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
* and thus should not be accepted.
*/
if (sip == tip && IN_DEV_ORCONF(in_dev, DROP_GRATUITOUS_ARP))
- goto out;
+ goto out_free_skb;
/*
* Special case: We must set Frame Relay source Q.922 address
@@ -778,7 +778,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
!arp_ignore(in_dev, sip, tip))
arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip,
sha, dev->dev_addr, sha, reply_dst);
- goto out;
+ goto out_consume_skb;
}
if (arp->ar_op == htons(ARPOP_REQUEST) &&
@@ -803,7 +803,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
neigh_release(n);
}
}
- goto out;
+ goto out_consume_skb;
} else if (IN_DEV_FORWARD(in_dev)) {
if (addr_type == RTN_UNICAST &&
(arp_fwd_proxy(in_dev, dev, rt) ||
@@ -826,7 +826,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
in_dev->arp_parms, skb);
goto out_free_dst;
}
- goto out;
+ goto out_consume_skb;
}
}
}
@@ -876,11 +876,16 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
neigh_release(n);
}
-out:
+out_consume_skb:
consume_skb(skb);
+
out_free_dst:
dst_release(reply_dst);
- return 0;
+ return NET_RX_SUCCESS;
+
+out_free_skb:
+ kfree_skb(skb);
+ return NET_RX_DROP;
}
static void parp_redo(struct sk_buff *skb)
@@ -924,11 +929,11 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
consumeskb:
consume_skb(skb);
- return 0;
+ return NET_RX_SUCCESS;
freeskb:
kfree_skb(skb);
out_of_mem:
- return 0;
+ return NET_RX_DROP;
}
/*
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 36e26977c908..6333489771ed 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -931,7 +931,6 @@ static bool icmp_echo(struct sk_buff *skb)
*/
static bool icmp_timestamp(struct sk_buff *skb)
{
- struct timespec tv;
struct icmp_bxm icmp_param;
/*
* Too short.
@@ -942,9 +941,7 @@ static bool icmp_timestamp(struct sk_buff *skb)
/*
* Fill in the current time as ms since midnight UT:
*/
- getnstimeofday(&tv);
- icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
- tv.tv_nsec / NSEC_PER_MSEC);
+ icmp_param.data.times[1] = inet_current_timestamp();
icmp_param.data.times[2] = icmp_param.data.times[1];
if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
BUG();
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 2aea9f1a2a31..9b4ca87f70ba 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -350,9 +350,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
skb_dst_set(skb, &rt->dst);
skb->dev = dev;
- skb->reserved_tailroom = skb_end_offset(skb) -
- min(mtu, skb_end_offset(skb));
skb_reserve(skb, hlen);
+ skb_tailroom_reserve(skb, mtu, tlen);
skb_reset_network_header(skb);
pip = ip_hdr(skb);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index da0d7ce85844..af18f1e4889e 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -71,7 +71,6 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
if (unlikely(opt->optlen))
ip_forward_options(skb);
- skb_sender_cpu_clear(skb);
return dst_output(net, sk, skb);
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 202437d6087b..31936d387cfd 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -527,11 +527,12 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
+ struct rtable *rt = NULL;
struct flowi4 fl;
- struct rtable *rt;
int min_headroom;
int tunnel_hlen;
__be16 df, flags;
+ bool use_cache;
int err;
tun_info = skb_tunnel_info(skb);
@@ -540,13 +541,14 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_free_skb;
key = &tun_info->key;
- rt = !skb->mark ? dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr) :
- NULL;
+ use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
+ if (use_cache)
+ rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
if (!rt) {
rt = gre_get_rt(skb, dev, &fl, key);
if (IS_ERR(rt))
goto err_free_skb;
- if (!skb->mark)
+ if (use_cache)
dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
fl.saddr);
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index bd246792360b..4d158ff1def1 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -58,10 +58,9 @@ void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
if (opt->ts_needaddr)
ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt);
if (opt->ts_needtime) {
- struct timespec tv;
__be32 midtime;
- getnstimeofday(&tv);
- midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
+
+ midtime = inet_current_timestamp();
memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
}
return;
@@ -415,11 +414,10 @@ int ip_options_compile(struct net *net,
break;
}
if (timeptr) {
- struct timespec tv;
- u32 midtime;
- getnstimeofday(&tv);
- midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC;
- put_unaligned_be32(midtime, timeptr);
+ __be32 midtime;
+
+ midtime = inet_current_timestamp();
+ memcpy(timeptr, &midtime, 4);
opt->is_changed = 1;
}
} else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index f734c42acdaf..124bf0a66328 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1233,13 +1233,16 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
if (!skb)
return -EINVAL;
- cork->length += size;
if ((size + skb->len > mtu) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return -EOPNOTSUPP;
+
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
}
+ cork->length += size;
while (size > 0) {
if (skb_is_gso(skb)) {
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index dff8a05739a2..6aad0192443d 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -607,6 +607,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
dst = tnl_params->daddr;
if (dst == 0) {
/* NBMA tunnel */
@@ -706,7 +708,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
tunnel->err_count--;
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
dst_link_failure(skb);
} else
tunnel->err_count = 0;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 76dce90c4581..cf9700b1a106 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -1142,13 +1142,6 @@ static int ping_v4_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static const struct seq_operations ping_v4_seq_ops = {
- .show = ping_v4_seq_show,
- .start = ping_v4_seq_start,
- .next = ping_seq_next,
- .stop = ping_seq_stop,
-};
-
static int ping_seq_open(struct inode *inode, struct file *file)
{
struct ping_seq_afinfo *afinfo = PDE_DATA(inode);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index c26241f3057b..7b7eec439906 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -551,7 +551,7 @@ reset:
*/
if (crtt > tp->srtt_us) {
/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
- crtt /= 8 * USEC_PER_MSEC;
+ crtt /= 8 * USEC_PER_SEC / HZ;
inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
} else if (tp->srtt_us == 0) {
/* RFC6298: 5.7 We've failed to get a valid RTT sample from
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index fadd8b978951..ae90e4b34bd3 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -452,7 +452,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->rcv_wup = newtp->copied_seq =
newtp->rcv_nxt = treq->rcv_isn + 1;
- newtp->segs_in = 0;
+ newtp->segs_in = 1;
newtp->snd_sml = newtp->snd_una =
newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
@@ -812,6 +812,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
int ret = 0;
int state = child->sk_state;
+ tcp_sk(child)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
if (!sock_owned_by_user(child)) {
ret = tcp_rcv_state_process(child, skb);
/* Wakeup parent, send SIGIO */
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index ebf5ff57526e..f6c50af24a64 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -187,13 +187,13 @@ static int tcpprobe_sprint(char *tbuf, int n)
{
const struct tcp_log *p
= tcp_probe.log + tcp_probe.tail;
- struct timespec tv
- = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
+ struct timespec64 ts
+ = ktime_to_timespec64(ktime_sub(p->tstamp, tcp_probe.start));
return scnprintf(tbuf, n,
"%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
- (unsigned long)tv.tv_sec,
- (unsigned long)tv.tv_nsec,
+ (unsigned long)ts.tv_sec,
+ (unsigned long)ts.tv_nsec,
&p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
}
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 0ec08814f37d..96599d1a1318 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -89,6 +89,8 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
uh->source = src_port;
uh->len = htons(skb->len);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
udp_set_csum(nocheck, skb, src, dst, skb->len);
iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a2d6f6c242af..8c0dab2de5c9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3170,9 +3170,35 @@ static void addrconf_gre_config(struct net_device *dev)
}
#endif
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+/* If the host route is cached on the addr struct make sure it is associated
+ * with the proper table. e.g., enslavement can change and if so the cached
+ * host route needs to move to the new table.
+ */
+static void l3mdev_check_host_rt(struct inet6_dev *idev,
+ struct inet6_ifaddr *ifp)
+{
+ if (ifp->rt) {
+ u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
+
+ if (tb_id != ifp->rt->rt6i_table->tb6_id) {
+ ip6_del_rt(ifp->rt);
+ ifp->rt = NULL;
+ }
+ }
+}
+#else
+static void l3mdev_check_host_rt(struct inet6_dev *idev,
+ struct inet6_ifaddr *ifp)
+{
+}
+#endif
+
static int fixup_permanent_addr(struct inet6_dev *idev,
struct inet6_ifaddr *ifp)
{
+ l3mdev_check_host_rt(idev, ifp);
+
if (!ifp->rt) {
struct rt6_info *rt;
@@ -5293,12 +5319,10 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
if (rt)
ip6_del_rt(rt);
}
- if (ifp->rt) {
- dst_hold(&ifp->rt->dst);
+ dst_hold(&ifp->rt->dst);
+
+ ip6_del_rt(ifp->rt);
- ip6_del_rt(ifp->rt);
- ifp->rt = NULL;
- }
rt_genid_bump_ipv6(net);
break;
}
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 5c5d23e59da5..9508a20fbf61 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -257,7 +257,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
*fragoff = _frag_off;
return hp->nexthdr;
}
- return -ENOENT;
+ if (!found)
+ return -ENOENT;
+ if (fragoff)
+ *fragoff = _frag_off;
+ break;
}
hdrlen = 8;
} else if (nexthdr == NEXTHDR_AUTH) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index f7c9560b75fa..4e636e60a360 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -777,6 +777,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
__u32 mtu;
int err;
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a163102f1803..9428345d3a07 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -332,7 +332,6 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
static inline int ip6_forward_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
- skb_sender_cpu_clear(skb);
return dst_output(net, sk, skb);
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3f3aabd2f07b..eb2ac4bb09ce 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1089,6 +1089,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
tproto = ACCESS_ONCE(t->parms.proto);
if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 5ee56d0a8699..d64ee7e83664 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1574,9 +1574,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
return NULL;
skb->priority = TC_PRIO_CONTROL;
- skb->reserved_tailroom = skb_end_offset(skb) -
- min(mtu, skb_end_offset(skb));
skb_reserve(skb, hlen);
+ skb_tailroom_reserve(skb, mtu, tlen);
if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 0711f8fe4d44..fd25e447a5fa 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -922,11 +922,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
- /* a return value > 0 means to resubmit the input, but
- * it wants the return to be -protocol, or 0
- */
+ /* a return value > 0 means to resubmit the input */
if (ret > 0)
- return -ret;
+ return ret;
return 0;
}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 10ad4ac1fa0b..3a8f881b22f1 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -7,6 +7,7 @@
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2007-2010, Intel Corporation
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -61,16 +62,25 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
{
struct ieee80211_local *local = sta->local;
struct tid_ampdu_rx *tid_rx;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .action = IEEE80211_AMPDU_RX_STOP,
+ .tid = tid,
+ .amsdu = false,
+ .timeout = 0,
+ .ssn = 0,
+ };
lockdep_assert_held(&sta->ampdu_mlme.mtx);
tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid],
lockdep_is_held(&sta->ampdu_mlme.mtx));
- if (!tid_rx)
+ if (!test_bit(tid, sta->ampdu_mlme.agg_session_valid))
return;
RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
+ __clear_bit(tid, sta->ampdu_mlme.agg_session_valid);
ht_dbg(sta->sdata,
"Rx BA session stop requested for %pM tid %u %s reason: %d\n",
@@ -78,8 +88,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
(int)reason);
- if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
- &sta->sta, tid, NULL, 0, false))
+ if (drv_ampdu_action(local, sta->sdata, &params))
sdata_info(sta->sdata,
"HW problem - can not stop rx aggregation for %pM tid %d\n",
sta->sta.addr, tid);
@@ -89,6 +98,13 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
ieee80211_send_delba(sta->sdata, sta->sta.addr,
tid, WLAN_BACK_RECIPIENT, reason);
+ /*
+ * return here in case tid_rx is not assigned - which will happen if
+ * IEEE80211_HW_SUPPORTS_REORDERING_BUFFER is set.
+ */
+ if (!tid_rx)
+ return;
+
del_timer_sync(&tid_rx->session_timer);
/* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
@@ -237,6 +253,15 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
{
struct ieee80211_local *local = sta->sdata->local;
struct tid_ampdu_rx *tid_agg_rx;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .action = IEEE80211_AMPDU_RX_START,
+ .tid = tid,
+ .amsdu = false,
+ .timeout = timeout,
+ .ssn = start_seq_num,
+ };
+
int i, ret = -EOPNOTSUPP;
u16 status = WLAN_STATUS_REQUEST_DECLINED;
@@ -275,11 +300,12 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
/* make sure the size doesn't exceed the maximum supported by the hw */
if (buf_size > local->hw.max_rx_aggregation_subframes)
buf_size = local->hw.max_rx_aggregation_subframes;
+ params.buf_size = buf_size;
/* examine state machine */
mutex_lock(&sta->ampdu_mlme.mtx);
- if (sta->ampdu_mlme.tid_rx[tid]) {
+ if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
ht_dbg_ratelimited(sta->sdata,
"unexpected AddBA Req from %pM on tid %u\n",
sta->sta.addr, tid);
@@ -290,8 +316,18 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
false);
}
+ if (ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) {
+ ret = drv_ampdu_action(local, sta->sdata, &params);
+ ht_dbg(sta->sdata,
+ "Rx A-MPDU request on %pM tid %d result %d\n",
+ sta->sta.addr, tid, ret);
+ if (!ret)
+ status = WLAN_STATUS_SUCCESS;
+ goto end;
+ }
+
/* prepare A-MPDU MLME for Rx aggregation */
- tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
+ tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
if (!tid_agg_rx)
goto end;
@@ -322,8 +358,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
for (i = 0; i < buf_size; i++)
__skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
- ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
- &sta->sta, tid, &start_seq_num, 0, false);
+ ret = drv_ampdu_action(local, sta->sdata, &params);
ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
sta->sta.addr, tid, ret);
if (ret) {
@@ -341,6 +376,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
tid_agg_rx->timeout = timeout;
tid_agg_rx->stored_mpdu_num = 0;
tid_agg_rx->auto_seq = auto_seq;
+ tid_agg_rx->reorder_buf_filtered = 0;
status = WLAN_STATUS_SUCCESS;
/* activate it for RX */
@@ -352,6 +388,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
}
end:
+ if (status == WLAN_STATUS_SUCCESS)
+ __set_bit(tid, sta->ampdu_mlme.agg_session_valid);
mutex_unlock(&sta->ampdu_mlme.mtx);
end_no_lock:
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index ff757181b0a8..4932e9f243a2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,6 +7,7 @@
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2007-2010, Intel Corporation
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -295,7 +296,14 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
{
struct ieee80211_local *local = sta->local;
struct tid_ampdu_tx *tid_tx;
- enum ieee80211_ampdu_mlme_action action;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .tid = tid,
+ .buf_size = 0,
+ .amsdu = false,
+ .timeout = 0,
+ .ssn = 0,
+ };
int ret;
lockdep_assert_held(&sta->ampdu_mlme.mtx);
@@ -304,10 +312,10 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
case AGG_STOP_DECLINED:
case AGG_STOP_LOCAL_REQUEST:
case AGG_STOP_PEER_REQUEST:
- action = IEEE80211_AMPDU_TX_STOP_CONT;
+ params.action = IEEE80211_AMPDU_TX_STOP_CONT;
break;
case AGG_STOP_DESTROY_STA:
- action = IEEE80211_AMPDU_TX_STOP_FLUSH;
+ params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
break;
default:
WARN_ON_ONCE(1);
@@ -330,9 +338,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
spin_unlock_bh(&sta->lock);
if (reason != AGG_STOP_DESTROY_STA)
return -EALREADY;
- ret = drv_ampdu_action(local, sta->sdata,
- IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
- &sta->sta, tid, NULL, 0, false);
+ params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
+ ret = drv_ampdu_action(local, sta->sdata, &params);
WARN_ON_ONCE(ret);
return 0;
}
@@ -381,8 +388,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
WLAN_BACK_INITIATOR;
tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
- ret = drv_ampdu_action(local, sta->sdata, action,
- &sta->sta, tid, NULL, 0, false);
+ ret = drv_ampdu_action(local, sta->sdata, &params);
/* HW shall not deny going back to legacy */
if (WARN_ON(ret)) {
@@ -445,7 +451,14 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
struct tid_ampdu_tx *tid_tx;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- u16 start_seq_num;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .action = IEEE80211_AMPDU_TX_START,
+ .tid = tid,
+ .buf_size = 0,
+ .amsdu = false,
+ .timeout = 0,
+ };
int ret;
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
@@ -467,10 +480,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
*/
synchronize_net();
- start_seq_num = sta->tid_seq[tid] >> 4;
-
- ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
- &sta->sta, tid, &start_seq_num, 0, false);
+ params.ssn = sta->tid_seq[tid] >> 4;
+ ret = drv_ampdu_action(local, sdata, &params);
if (ret) {
ht_dbg(sdata,
"BA request denied - HW unavailable for %pM tid %d\n",
@@ -499,7 +510,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
- tid_tx->dialog_token, start_seq_num,
+ tid_tx->dialog_token, params.ssn,
IEEE80211_MAX_AMPDU_BUF,
tid_tx->timeout);
}
@@ -684,18 +695,24 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
struct tid_ampdu_tx *tid_tx;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .action = IEEE80211_AMPDU_TX_OPERATIONAL,
+ .tid = tid,
+ .timeout = 0,
+ .ssn = 0,
+ };
lockdep_assert_held(&sta->ampdu_mlme.mtx);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+ params.buf_size = tid_tx->buf_size;
+ params.amsdu = tid_tx->amsdu;
ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
sta->sta.addr, tid);
- drv_ampdu_action(local, sta->sdata,
- IEEE80211_AMPDU_TX_OPERATIONAL,
- &sta->sta, tid, NULL, tid_tx->buf_size,
- tid_tx->amsdu);
+ drv_ampdu_action(local, sta->sdata, &params);
/*
* synchronize with TX path, while splicing the TX path
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 166a29fe6c35..fe1704c4e8fb 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -339,8 +339,9 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- iv32 = key->u.tkip.tx.iv32;
- iv16 = key->u.tkip.tx.iv16;
+ pn64 = atomic64_read(&key->conf.tx_pn);
+ iv32 = TKIP_PN_TO_IV32(pn64);
+ iv16 = TKIP_PN_TO_IV16(pn64);
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
@@ -1131,6 +1132,34 @@ static int sta_apply_parameters(struct ieee80211_local *local,
sta->sta.max_sp = params->max_sp;
}
+ /* The sender might not have sent the last bit, consider it to be 0 */
+ if (params->ext_capab_len >= 8) {
+ u8 val = (params->ext_capab[7] &
+ WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB) >> 7;
+
+ /* we did get all the bits, take the MSB as well */
+ if (params->ext_capab_len >= 9) {
+ u8 val_msb = params->ext_capab[8] &
+ WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB;
+ val_msb <<= 1;
+ val |= val_msb;
+ }
+
+ switch (val) {
+ case 1:
+ sta->sta.max_amsdu_subframes = 32;
+ break;
+ case 2:
+ sta->sta.max_amsdu_subframes = 16;
+ break;
+ case 3:
+ sta->sta.max_amsdu_subframes = 8;
+ break;
+ default:
+ sta->sta.max_amsdu_subframes = 0;
+ }
+ }
+
/*
* cfg80211 validates this (1-2007) and allows setting the AID
* only when creating a new station entry
@@ -1160,6 +1189,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
params->ht_capa, sta);
+ /* VHT can override some HT caps such as the A-MSDU max length */
if (params->vht_capa)
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
params->vht_capa, sta);
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 1d1b9b7bdefe..283981108ca8 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -231,7 +231,7 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
!(sta->sdata->bss && sta->sdata->bss == sdata->bss))
continue;
- if (!sta->uploaded)
+ if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
continue;
max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 3e24d0ddb51b..4ab5c522ceee 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -126,6 +126,7 @@ static const char *hw_flag_names[] = {
FLAG(SUPPORTS_AMSDU_IN_AMPDU),
FLAG(BEACON_TX_STATUS),
FLAG(NEEDS_UNIQUE_STA_ADDR),
+ FLAG(SUPPORTS_REORDERING_BUFFER),
#undef FLAG
};
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 7961e7d0b61e..a2ef95f16f11 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -132,9 +132,10 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
len = scnprintf(buf, sizeof(buf), "\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
+ pn = atomic64_read(&key->conf.tx_pn);
len = scnprintf(buf, sizeof(buf), "%08x %04x\n",
- key->u.tkip.tx.iv32,
- key->u.tkip.tx.iv16);
+ TKIP_PN_TO_IV32(pn),
+ TKIP_PN_TO_IV16(pn));
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index ca1fe5576103..c258f1041d33 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -284,9 +284,7 @@ int drv_switch_vif_chanctx(struct ieee80211_local *local,
int drv_ampdu_action(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
int ret = -EOPNOTSUPP;
@@ -296,12 +294,10 @@ int drv_ampdu_action(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- trace_drv_ampdu_action(local, sdata, action, sta, tid,
- ssn, buf_size, amsdu);
+ trace_drv_ampdu_action(local, sdata, params);
if (local->ops->ampdu_action)
- ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
- sta, tid, ssn, buf_size, amsdu);
+ ret = local->ops->ampdu_action(&local->hw, &sdata->vif, params);
trace_drv_return_int(local, ret);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 154ce4b13406..18b0d65baff0 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -585,9 +585,7 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
int drv_ampdu_action(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
static inline int drv_get_survey(struct ieee80211_local *local, int idx,
struct survey_info *survey)
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 7a76ce639d58..f4a528773563 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -230,6 +230,11 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
/* set Rx highest rate */
ht_cap.mcs.rx_highest = ht_cap_ie->mcs.rx_highest;
+ if (ht_cap.cap & IEEE80211_HT_CAP_MAX_AMSDU)
+ sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_7935;
+ else
+ sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839;
+
apply:
changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 978d3bc31df7..fc3238376b39 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -7,6 +7,7 @@
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2009, Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1050,9 +1051,8 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
struct cfg80211_chan_def chandef;
enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
- ieee80211_ht_oper_to_chandef(channel,
- elems->ht_operation,
- &chandef);
+ cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+ ieee80211_chandef_ht_oper(elems->ht_operation, &chandef);
memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
@@ -1066,9 +1066,8 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
struct ieee80211_vht_cap cap_ie;
struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap;
- ieee80211_vht_oper_to_chandef(channel,
- elems->vht_operation,
- &chandef);
+ ieee80211_chandef_vht_oper(elems->vht_operation,
+ &chandef);
memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
&cap_ie, sta);
@@ -1485,14 +1484,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
- num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
- &ifibss->chandef,
- channels,
- ARRAY_SIZE(channels));
scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
- ieee80211_request_ibss_scan(sdata, ifibss->ssid,
- ifibss->ssid_len, channels, num,
- scan_width);
+
+ if (ifibss->fixed_channel) {
+ num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
+ &ifibss->chandef,
+ channels,
+ ARRAY_SIZE(channels));
+ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+ ifibss->ssid_len, channels,
+ num, scan_width);
+ } else {
+ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+ ifibss->ssid_len, NULL,
+ 0, scan_width);
+ }
} else {
int interval = IEEE80211_SCAN_INTERVAL;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b84f6aa32c08..804575ff7af5 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -92,7 +92,7 @@ struct ieee80211_fragment_entry {
u16 extra_len;
u16 last_frag;
u8 rx_queue;
- bool ccmp; /* Whether fragments were encrypted with CCMP */
+ bool check_sequential_pn; /* needed for CCMP/GCMP */
u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
};
@@ -716,7 +716,6 @@ struct ieee80211_if_mesh {
* back to wireless media and to the local net stack.
* @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
* @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
- * @IEEE80211_SDATA_MU_MIMO_OWNER: indicates interface owns MU-MIMO capability
*/
enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_ALLMULTI = BIT(0),
@@ -724,7 +723,6 @@ enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4),
IEEE80211_SDATA_IN_DRIVER = BIT(5),
- IEEE80211_SDATA_MU_MIMO_OWNER = BIT(6),
};
/**
@@ -804,6 +802,7 @@ enum txq_info_flags {
struct txq_info {
struct sk_buff_head queue;
unsigned long flags;
+ unsigned long byte_cnt;
/* keep last! */
struct ieee80211_txq txq;
@@ -1466,7 +1465,13 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
{
WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START &&
status->flag & RX_FLAG_MACTIME_END);
- return status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END);
+ if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END))
+ return true;
+ /* can't handle HT/VHT preamble yet */
+ if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
+ !(status->flag & (RX_FLAG_HT | RX_FLAG_VHT)))
+ return true;
+ return false;
}
u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
@@ -1714,6 +1719,8 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt);
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 opmode,
enum ieee80211_band band);
@@ -1829,20 +1836,6 @@ static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
}
-static inline bool ieee80211_rx_reorder_ready(struct sk_buff_head *frames)
-{
- struct sk_buff *tail = skb_peek_tail(frames);
- struct ieee80211_rx_status *status;
-
- if (!tail)
- return false;
-
- status = IEEE80211_SKB_RXCB(tail);
- if (status->flag & RX_FLAG_AMSDU_MORE)
- return false;
-
- return true;
-}
extern const int ieee802_1d_to_ac[8];
@@ -1986,12 +1979,10 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
/* channel management */
-void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
- const struct ieee80211_ht_operation *ht_oper,
- struct cfg80211_chan_def *chandef);
-void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
- const struct ieee80211_vht_operation *oper,
- struct cfg80211_chan_def *chandef);
+bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
+ struct cfg80211_chan_def *chandef);
+bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+ struct cfg80211_chan_def *chandef);
u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
int __must_check
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index c9e325d2e120..453b4e741780 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -977,7 +977,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.txq) {
struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+ spin_lock_bh(&txqi->queue.lock);
ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
+ txqi->byte_cnt = 0;
+ spin_unlock_bh(&txqi->queue.lock);
+
atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
}
@@ -1271,6 +1275,16 @@ static void ieee80211_iface_work(struct work_struct *work)
}
}
mutex_unlock(&local->sta_mtx);
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_VHT) {
+ switch (mgmt->u.action.u.vht_group_notif.action_code) {
+ case WLAN_VHT_ACTION_GROUPID_MGMT:
+ ieee80211_process_mu_groups(sdata, mgmt);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
} else if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *)mgmt;
/*
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5e5bc599da4c..3df7b0392d30 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -932,50 +932,6 @@ void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
}
EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify);
-void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
- struct ieee80211_key_seq *seq)
-{
- struct ieee80211_key *key;
- u64 pn64;
-
- if (WARN_ON(!(keyconf->flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
- return;
-
- key = container_of(keyconf, struct ieee80211_key, conf);
-
- switch (key->conf.cipher) {
- case WLAN_CIPHER_SUITE_TKIP:
- seq->tkip.iv32 = key->u.tkip.tx.iv32;
- seq->tkip.iv16 = key->u.tkip.tx.iv16;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_CCMP_256:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
- offsetof(typeof(*seq), aes_cmac));
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
- offsetof(typeof(*seq), aes_gmac));
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
- offsetof(typeof(*seq), gcmp));
- pn64 = atomic64_read(&key->conf.tx_pn);
- seq->ccmp.pn[5] = pn64;
- seq->ccmp.pn[4] = pn64 >> 8;
- seq->ccmp.pn[3] = pn64 >> 16;
- seq->ccmp.pn[2] = pn64 >> 24;
- seq->ccmp.pn[1] = pn64 >> 32;
- seq->ccmp.pn[0] = pn64 >> 40;
- break;
- default:
- WARN_ON(1);
- }
-}
-EXPORT_SYMBOL(ieee80211_get_key_tx_seq);
-
void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq)
{
@@ -1029,48 +985,6 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
}
EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
-void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
- struct ieee80211_key_seq *seq)
-{
- struct ieee80211_key *key;
- u64 pn64;
-
- key = container_of(keyconf, struct ieee80211_key, conf);
-
- switch (key->conf.cipher) {
- case WLAN_CIPHER_SUITE_TKIP:
- key->u.tkip.tx.iv32 = seq->tkip.iv32;
- key->u.tkip.tx.iv16 = seq->tkip.iv16;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_CCMP_256:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
- offsetof(typeof(*seq), aes_cmac));
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
- offsetof(typeof(*seq), aes_gmac));
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
- offsetof(typeof(*seq), gcmp));
- pn64 = (u64)seq->ccmp.pn[5] |
- ((u64)seq->ccmp.pn[4] << 8) |
- ((u64)seq->ccmp.pn[3] << 16) |
- ((u64)seq->ccmp.pn[2] << 24) |
- ((u64)seq->ccmp.pn[1] << 32) |
- ((u64)seq->ccmp.pn[0] << 40);
- atomic64_set(&key->conf.tx_pn, pn64);
- break;
- default:
- WARN_ON(1);
- break;
- }
-}
-EXPORT_SYMBOL_GPL(ieee80211_set_key_tx_seq);
-
void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq)
{
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 9951ef06323e..4aa20cef0859 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -44,13 +44,17 @@ enum ieee80211_internal_tkip_state {
};
struct tkip_ctx {
- u32 iv32; /* current iv32 */
- u16 iv16; /* current iv16 */
u16 p1k[5]; /* p1k cache */
u32 p1k_iv32; /* iv32 for which p1k computed */
enum ieee80211_internal_tkip_state state;
};
+struct tkip_ctx_rx {
+ struct tkip_ctx ctx;
+ u32 iv32; /* current iv32 */
+ u16 iv16; /* current iv16 */
+};
+
struct ieee80211_key {
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
@@ -71,7 +75,7 @@ struct ieee80211_key {
struct tkip_ctx tx;
/* last received RSC */
- struct tkip_ctx rx[IEEE80211_NUM_TIDS];
+ struct tkip_ctx_rx rx[IEEE80211_NUM_TIDS];
/* number of mic failures */
u32 mic_failures;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6f85b6ab8e51..d32cefcb63b0 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -91,11 +91,10 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.bss_conf.basic_rates != basic_rates)
return false;
- ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
- ie->ht_operation, &sta_chan_def);
-
- ieee80211_vht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
- ie->vht_operation, &sta_chan_def);
+ cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
+ NL80211_CHAN_NO_HT);
+ ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
+ ieee80211_chandef_vht_oper(ie->vht_operation, &sta_chan_def);
if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
&sta_chan_def))
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 4a8019f79fb2..87c017a3b1ce 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -137,8 +137,6 @@ struct mesh_path {
* @copy_node: function to copy nodes of the table
* @size_order: determines size of the table, there will be 2^size_order hash
* buckets
- * @mean_chain_len: maximum average length for the hash buckets' list, if it is
- * reached, the table will grow
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
*
@@ -154,7 +152,6 @@ struct mesh_table {
void (*free_node) (struct hlist_node *p, bool free_leafs);
int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
int size_order;
- int mean_chain_len;
struct hlist_head *known_gates;
spinlock_t gates_lock;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index c6be0b4f4058..5b6aec1a0630 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -205,9 +205,9 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- skb_set_mac_header(skb, 0);
- skb_set_network_header(skb, 0);
- skb_set_transport_header(skb, 0);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index dadf8dc6f1cf..2ba7aa56b11c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -55,16 +55,21 @@ int mpp_paths_generation;
static DEFINE_RWLOCK(pathtbl_resize_lock);
+static inline struct mesh_table *resize_dereference_paths(
+ struct mesh_table __rcu *table)
+{
+ return rcu_dereference_protected(table,
+ lockdep_is_held(&pathtbl_resize_lock));
+}
+
static inline struct mesh_table *resize_dereference_mesh_paths(void)
{
- return rcu_dereference_protected(mesh_paths,
- lockdep_is_held(&pathtbl_resize_lock));
+ return resize_dereference_paths(mesh_paths);
}
static inline struct mesh_table *resize_dereference_mpp_paths(void)
{
- return rcu_dereference_protected(mpp_paths,
- lockdep_is_held(&pathtbl_resize_lock));
+ return resize_dereference_paths(mpp_paths);
}
/*
@@ -160,11 +165,10 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
int i;
if (atomic_read(&oldtbl->entries)
- < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
+ < MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1))
return -EAGAIN;
newtbl->free_node = oldtbl->free_node;
- newtbl->mean_chain_len = oldtbl->mean_chain_len;
newtbl->copy_node = oldtbl->copy_node;
newtbl->known_gates = oldtbl->known_gates;
atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
@@ -585,7 +589,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
hlist_add_head_rcu(&new_node->list, bucket);
if (atomic_inc_return(&tbl->entries) >=
- tbl->mean_chain_len * (tbl->hash_mask + 1))
+ MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
grow = 1;
mesh_paths_generation++;
@@ -714,7 +718,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
hlist_add_head_rcu(&new_node->list, bucket);
if (atomic_inc_return(&tbl->entries) >=
- tbl->mean_chain_len * (tbl->hash_mask + 1))
+ MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
grow = 1;
spin_unlock(&tbl->hashwlock[hash_idx]);
@@ -835,6 +839,29 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
rcu_read_unlock();
}
+static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
+ const u8 *proxy)
+{
+ struct mesh_table *tbl;
+ struct mesh_path *mpp;
+ struct mpath_node *node;
+ int i;
+
+ rcu_read_lock();
+ read_lock_bh(&pathtbl_resize_lock);
+ tbl = resize_dereference_mpp_paths();
+ for_each_mesh_entry(tbl, node, i) {
+ mpp = node->mpath;
+ if (ether_addr_equal(mpp->mpp, proxy)) {
+ spin_lock(&tbl->hashwlock[i]);
+ __mesh_path_del(tbl, node);
+ spin_unlock(&tbl->hashwlock[i]);
+ }
+ }
+ read_unlock_bh(&pathtbl_resize_lock);
+ rcu_read_unlock();
+}
+
static void table_flush_by_iface(struct mesh_table *tbl,
struct ieee80211_sub_if_data *sdata)
{
@@ -876,14 +903,17 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
}
/**
- * mesh_path_del - delete a mesh path from the table
+ * table_path_del - delete a path from the mesh or mpp table
*
- * @addr: dst address (ETH_ALEN length)
+ * @tbl: mesh or mpp path table
* @sdata: local subif
+ * @addr: dst address (ETH_ALEN length)
*
* Returns: 0 if successful
*/
-int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+static int table_path_del(struct mesh_table __rcu *rcu_tbl,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
@@ -892,8 +922,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
int hash_idx;
int err = 0;
- read_lock_bh(&pathtbl_resize_lock);
- tbl = resize_dereference_mesh_paths();
+ tbl = resize_dereference_paths(rcu_tbl);
hash_idx = mesh_table_hash(addr, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
@@ -909,9 +938,50 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
err = -ENXIO;
enddel:
- mesh_paths_generation++;
spin_unlock(&tbl->hashwlock[hash_idx]);
+ return err;
+}
+
+/**
+ * mesh_path_del - delete a mesh path from the table
+ *
+ * @addr: dst address (ETH_ALEN length)
+ * @sdata: local subif
+ *
+ * Returns: 0 if successful
+ */
+int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+{
+ int err = 0;
+
+ /* flush relevant mpp entries first */
+ mpp_flush_by_proxy(sdata, addr);
+
+ read_lock_bh(&pathtbl_resize_lock);
+ err = table_path_del(mesh_paths, sdata, addr);
+ mesh_paths_generation++;
read_unlock_bh(&pathtbl_resize_lock);
+
+ return err;
+}
+
+/**
+ * mpp_path_del - delete a mesh proxy path from the table
+ *
+ * @addr: addr address (ETH_ALEN length)
+ * @sdata: local subif
+ *
+ * Returns: 0 if successful
+ */
+static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+{
+ int err = 0;
+
+ read_lock_bh(&pathtbl_resize_lock);
+ err = table_path_del(mpp_paths, sdata, addr);
+ mpp_paths_generation++;
+ read_unlock_bh(&pathtbl_resize_lock);
+
return err;
}
@@ -1076,7 +1146,6 @@ int mesh_pathtbl_init(void)
return -ENOMEM;
tbl_path->free_node = &mesh_path_node_free;
tbl_path->copy_node = &mesh_path_node_copy;
- tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
if (!tbl_path->known_gates) {
ret = -ENOMEM;
@@ -1092,7 +1161,6 @@ int mesh_pathtbl_init(void)
}
tbl_mpp->free_node = &mesh_path_node_free;
tbl_mpp->copy_node = &mesh_path_node_copy;
- tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
if (!tbl_mpp->known_gates) {
ret = -ENOMEM;
@@ -1131,6 +1199,17 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
mesh_path_del(mpath->sdata, mpath->dst);
}
+
+ tbl = rcu_dereference(mpp_paths);
+ for_each_mesh_entry(tbl, node, i) {
+ if (node->mpath->sdata != sdata)
+ continue;
+ mpath = node->mpath;
+ if ((!(mpath->flags & MESH_PATH_FIXED)) &&
+ time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
+ mpp_path_del(mpath->sdata, mpath->dst);
+ }
+
rcu_read_unlock();
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index bd3d55eb21d4..a07e93c21c9e 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -976,6 +976,10 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
mpl_dbg(sdata, "Mesh plink error: no more free plinks\n");
goto out;
}
+
+ /* new matching peer */
+ event = OPN_ACPT;
+ goto out;
} else {
if (!test_sta_flag(sta, WLAN_STA_AUTH)) {
mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
@@ -985,12 +989,6 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
goto out;
}
- /* new matching peer */
- if (!sta) {
- event = OPN_ACPT;
- goto out;
- }
-
switch (ftype) {
case WLAN_SP_MESH_PEERING_OPEN:
if (!matches_local)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index bfbb1acafdd1..281b8d6e5109 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -6,7 +6,7 @@
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -196,16 +196,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
/* check 40 MHz support, if we have it */
if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
- switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- chandef->width = NL80211_CHAN_WIDTH_40;
- chandef->center_freq1 += 10;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- chandef->width = NL80211_CHAN_WIDTH_40;
- chandef->center_freq1 -= 10;
- break;
- }
+ ieee80211_chandef_ht_oper(ht_oper, chandef);
} else {
/* 40 MHz (and 80 MHz) must be supported for VHT */
ret = IEEE80211_STA_DISABLE_VHT;
@@ -219,35 +210,11 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
goto out;
}
- vht_chandef.chan = channel;
- vht_chandef.center_freq1 =
- ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
- channel->band);
- vht_chandef.center_freq2 = 0;
-
- switch (vht_oper->chan_width) {
- case IEEE80211_VHT_CHANWIDTH_USE_HT:
- vht_chandef.width = chandef->width;
- vht_chandef.center_freq1 = chandef->center_freq1;
- break;
- case IEEE80211_VHT_CHANWIDTH_80MHZ:
- vht_chandef.width = NL80211_CHAN_WIDTH_80;
- break;
- case IEEE80211_VHT_CHANWIDTH_160MHZ:
- vht_chandef.width = NL80211_CHAN_WIDTH_160;
- break;
- case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
- vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
- vht_chandef.center_freq2 =
- ieee80211_channel_to_frequency(
- vht_oper->center_freq_seg2_idx,
- channel->band);
- break;
- default:
+ vht_chandef = *chandef;
+ if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
- "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
- vht_oper->chan_width);
+ "AP VHT information is invalid, disable VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
goto out;
}
@@ -592,7 +559,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sub_if_data *other;
list_for_each_entry_rcu(other, &local->interfaces, list) {
- if (other->flags & IEEE80211_SDATA_MU_MIMO_OWNER) {
+ if (other->vif.mu_mimo_owner) {
disable_mu_mimo = true;
break;
}
@@ -600,7 +567,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
if (disable_mu_mimo)
cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
else
- sdata->flags |= IEEE80211_SDATA_MU_MIMO_OWNER;
+ sdata->vif.mu_mimo_owner = true;
}
mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
@@ -1638,8 +1605,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
void ieee80211_dfs_cac_timer_work(struct work_struct *work)
{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *delayed_work = to_delayed_work(work);
struct ieee80211_sub_if_data *sdata =
container_of(delayed_work, struct ieee80211_sub_if_data,
dfs_cac_timer_work);
@@ -2079,7 +2045,14 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
- sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
+
+ /* reset MU-MIMO ownership and group data */
+ memset(sdata->vif.bss_conf.mu_group.membership, 0,
+ sizeof(sdata->vif.bss_conf.mu_group.membership));
+ memset(sdata->vif.bss_conf.mu_group.position, 0,
+ sizeof(sdata->vif.bss_conf.mu_group.position));
+ changed |= BSS_CHANGED_MU_GROUPS;
+ sdata->vif.mu_mimo_owner = false;
sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -2536,7 +2509,8 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
eth_zero_addr(sdata->u.mgd.bssid);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
- sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
+ sdata->vif.mu_mimo_owner = false;
+
mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
mutex_unlock(&sdata->local->mtx);
@@ -3571,6 +3545,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
elems.ht_cap_elem, elems.ht_operation,
elems.vht_operation, bssid, &changed)) {
mutex_unlock(&local->sta_mtx);
+ sdata_info(sdata,
+ "failed to follow AP %pM bandwidth change, disconnect\n",
+ bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
true, deauth_buf);
@@ -3946,11 +3923,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
* We actually lost the connection ... or did we?
* Let's make sure!
*/
- wiphy_debug(local->hw.wiphy,
- "%s: No probe response from AP %pM"
- " after %dms, disconnecting.\n",
- sdata->name,
- bssid, probe_wait_ms);
+ mlme_dbg(sdata,
+ "No probe response from AP %pM after %dms, disconnecting.\n",
+ bssid, probe_wait_ms);
ieee80211_sta_connection_lost(sdata, bssid,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
@@ -4536,6 +4511,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+ sdata_info(sdata,
+ "disconnect from AP %pM for new auth to %pM\n",
+ ifmgd->associated->bssid, req->bss->bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
@@ -4604,6 +4582,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+ sdata_info(sdata,
+ "disconnect from AP %pM for new assoc to %pM\n",
+ ifmgd->associated->bssid, req->bss->bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 3ece7d1034c8..b54f398cda5d 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
* computing cur_tp
*/
tmp_mrs = &mi->r[idx].stats;
- tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma);
+ tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
return tmp_cur_tp;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 3928dbd24e25..370d677b547b 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -414,15 +414,16 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
(max_tp_group != MINSTREL_CCK_GROUP))
return;
+ max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
+ max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
+ max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
+
if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
mrs->prob_ewma);
if (cur_tp_avg > tmp_tp_avg)
mi->max_prob_rate = index;
- max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
- max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
- max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group,
max_gpr_idx,
max_gpr_prob);
@@ -431,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
} else {
if (mrs->prob_ewma > tmp_prob)
mi->max_prob_rate = index;
- if (mrs->prob_ewma > mg->rates[mg->max_group_prob_rate].prob_ewma)
+ if (mrs->prob_ewma > max_gpr_prob)
mg->max_group_prob_rate = index;
}
}
@@ -691,7 +692,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
if (likely(sta->ampdu_mlme.tid_tx[tid]))
return;
- ieee80211_start_tx_ba_session(pubsta, tid, 5000);
+ ieee80211_start_tx_ba_session(pubsta, tid, 0);
}
static void
@@ -871,7 +872,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
* - if station is in dynamic SMPS (and streams > 1)
* - for fallback rates, to increase chances of getting through
*/
- if (offset > 0 &&
+ if (offset > 0 ||
(mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
group->streams > 1)) {
ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
@@ -1334,7 +1335,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
prob = mi->groups[i].rates[j].prob_ewma;
/* convert tp_avg from pkt per second in kbps */
- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024;
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
+ tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
return tp_avg;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bc081850ac0e..dc27becb9b71 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -4,6 +4,7 @@
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007-2010 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,6 +19,7 @@
#include <linux/etherdevice.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
+#include <linux/bitops.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
@@ -122,7 +124,8 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
hdr = (void *)(skb->data + rtap_vendor_space);
if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
- RX_FLAG_FAILED_PLCP_CRC))
+ RX_FLAG_FAILED_PLCP_CRC |
+ RX_FLAG_ONLY_MONITOR))
return true;
if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -507,7 +510,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
return NULL;
}
- if (!local->monitors) {
+ if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
if (should_drop_frame(origskb, present_fcs_len,
rtap_vendor_space)) {
dev_kfree_skb(origskb);
@@ -797,6 +800,26 @@ static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
}
+static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
+ int index)
+{
+ struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
+ struct sk_buff *tail = skb_peek_tail(frames);
+ struct ieee80211_rx_status *status;
+
+ if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
+ return true;
+
+ if (!tail)
+ return false;
+
+ status = IEEE80211_SKB_RXCB(tail);
+ if (status->flag & RX_FLAG_AMSDU_MORE)
+ return false;
+
+ return true;
+}
+
static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
int index,
@@ -811,7 +834,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
if (skb_queue_empty(skb_list))
goto no_frame;
- if (!ieee80211_rx_reorder_ready(skb_list)) {
+ if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
__skb_queue_purge(skb_list);
goto no_frame;
}
@@ -825,6 +848,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
}
no_frame:
+ tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
}
@@ -865,7 +889,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
/* release the buffer until next missing frame */
index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
- if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) &&
+ if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
tid_agg_rx->stored_mpdu_num) {
/*
* No buffers ready to be released, but check whether any
@@ -874,8 +898,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
int skipped = 1;
for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
j = (j + 1) % tid_agg_rx->buf_size) {
- if (!ieee80211_rx_reorder_ready(
- &tid_agg_rx->reorder_buf[j])) {
+ if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
skipped++;
continue;
}
@@ -902,8 +925,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
skipped) & IEEE80211_SN_MASK;
skipped = 0;
}
- } else while (ieee80211_rx_reorder_ready(
- &tid_agg_rx->reorder_buf[index])) {
+ } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
frames);
index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
@@ -914,8 +936,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
for (; j != (index - 1) % tid_agg_rx->buf_size;
j = (j + 1) % tid_agg_rx->buf_size) {
- if (ieee80211_rx_reorder_ready(
- &tid_agg_rx->reorder_buf[j]))
+ if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
break;
}
@@ -986,7 +1007,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
index = mpdu_seq_num % tid_agg_rx->buf_size;
/* check if we already stored this frame */
- if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) {
+ if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
dev_kfree_skb(skb);
goto out;
}
@@ -1099,6 +1120,9 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ if (status->flag & RX_FLAG_DUP_VALIDATED)
+ return RX_CONTINUE;
+
/*
* Drop duplicate 802.11 retransmissions
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
@@ -1753,7 +1777,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
entry->seq = seq;
entry->rx_queue = rx_queue;
entry->last_frag = frag;
- entry->ccmp = 0;
+ entry->check_sequential_pn = false;
entry->extra_len = 0;
return entry;
@@ -1849,15 +1873,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
rx->seqno_idx, &(rx->skb));
if (rx->key &&
(rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
ieee80211_has_protected(fc)) {
int queue = rx->security_idx;
- /* Store CCMP PN so that we can verify that the next
- * fragment has a sequential PN value. */
- entry->ccmp = 1;
+
+ /* Store CCMP/GCMP PN so that we can verify that the
+ * next fragment has a sequential PN value.
+ */
+ entry->check_sequential_pn = true;
memcpy(entry->last_pn,
rx->key->u.ccmp.rx_pn[queue],
IEEE80211_CCMP_PN_LEN);
+ BUILD_BUG_ON(offsetof(struct ieee80211_key,
+ u.ccmp.rx_pn) !=
+ offsetof(struct ieee80211_key,
+ u.gcmp.rx_pn));
+ BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
+ sizeof(rx->key->u.gcmp.rx_pn[queue]));
+ BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
+ IEEE80211_GCMP_PN_LEN);
}
return RX_QUEUED;
}
@@ -1872,15 +1908,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
}
- /* Verify that MPDUs within one MSDU have sequential PN values.
- * (IEEE 802.11i, 8.3.3.4.5) */
- if (entry->ccmp) {
+ /* "The receiver shall discard MSDUs and MMPDUs whose constituent
+ * MPDU PN values are not incrementing in steps of 1."
+ * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
+ * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
+ */
+ if (entry->check_sequential_pn) {
int i;
u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
int queue;
+
if (!rx->key ||
(rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
return RX_DROP_UNUSABLE;
memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -2199,9 +2241,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
skb->dev = dev;
__skb_queue_head_init(&frame_list);
- if (skb_linearize(skb))
- return RX_DROP_UNUSABLE;
-
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
rx->sdata->vif.type,
rx->local->hw.extra_tx_headroom, true);
@@ -2231,7 +2270,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- u16 q, hdrlen;
+ u16 ac, q, hdrlen;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2290,6 +2329,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
spin_lock_bh(&mppath->state_lock);
if (!ether_addr_equal(mppath->mpp, mpp_addr))
memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
+ mppath->exp_time = jiffies;
spin_unlock_bh(&mppath->state_lock);
}
rcu_read_unlock();
@@ -2300,7 +2340,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
ether_addr_equal(sdata->vif.addr, hdr->addr3))
return RX_CONTINUE;
- q = ieee80211_select_queue_80211(sdata, skb, hdr);
+ ac = ieee80211_select_queue_80211(sdata, skb, hdr);
+ q = sdata->vif.hw_queue[ac];
if (ieee80211_queue_stopped(&local->hw, q)) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
return RX_DROP_MONITOR;
@@ -2738,6 +2779,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
opmode, status->band);
goto handled;
}
+ case WLAN_VHT_ACTION_GROUPID_MGMT: {
+ if (len < IEEE80211_MIN_ACTION_SIZE + 25)
+ goto invalid;
+ goto queue;
+ }
default:
break;
}
@@ -3073,7 +3119,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
false);
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
@@ -3275,6 +3321,85 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
ieee80211_rx_handlers(&rx, &frames);
}
+void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ u16 ssn, u64 filtered,
+ u16 received_mpdus)
+{
+ struct sta_info *sta;
+ struct tid_ampdu_rx *tid_agg_rx;
+ struct sk_buff_head frames;
+ struct ieee80211_rx_data rx = {
+ /* This is OK -- must be QoS data frame */
+ .security_idx = tid,
+ .seqno_idx = tid,
+ };
+ int i, diff;
+
+ if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
+ return;
+
+ __skb_queue_head_init(&frames);
+
+ sta = container_of(pubsta, struct sta_info, sta);
+
+ rx.sta = sta;
+ rx.sdata = sta->sdata;
+ rx.local = sta->local;
+
+ rcu_read_lock();
+ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+ if (!tid_agg_rx)
+ goto out;
+
+ spin_lock_bh(&tid_agg_rx->reorder_lock);
+
+ if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
+ int release;
+
+ /* release all frames in the reorder buffer */
+ release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
+ IEEE80211_SN_MODULO;
+ ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
+ release, &frames);
+ /* update ssn to match received ssn */
+ tid_agg_rx->head_seq_num = ssn;
+ } else {
+ ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
+ &frames);
+ }
+
+ /* handle the case that received ssn is behind the mac ssn.
+ * it can be tid_agg_rx->buf_size behind and still be valid */
+ diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
+ if (diff >= tid_agg_rx->buf_size) {
+ tid_agg_rx->reorder_buf_filtered = 0;
+ goto release;
+ }
+ filtered = filtered >> diff;
+ ssn += diff;
+
+ /* update bitmap */
+ for (i = 0; i < tid_agg_rx->buf_size; i++) {
+ int index = (ssn + i) % tid_agg_rx->buf_size;
+
+ tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
+ if (filtered & BIT_ULL(i))
+ tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
+ }
+
+ /* now process also frames that the filter marking released */
+ ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
+
+release:
+ spin_unlock_bh(&tid_agg_rx->reorder_lock);
+
+ ieee80211_rx_handlers(&rx, &frames);
+
+ out:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
+
/* main receive path */
static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
@@ -3366,6 +3491,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
return false;
/* ignore action frames to TDLS-peers */
if (ieee80211_is_action(hdr->frame_control) &&
+ !is_broadcast_ether_addr(bssid) &&
!ether_addr_equal(bssid, hdr->addr1))
return false;
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index a4a4f89d3ba0..d20bab5c146c 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -116,6 +116,7 @@ static void __cleanup_single_sta(struct sta_info *sta)
ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
atomic_sub(n, &sdata->txqs_len[txqi->txq.ac]);
+ txqi->byte_cnt = 0;
}
}
@@ -498,11 +499,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct station_info sinfo;
+ struct station_info *sinfo;
int err = 0;
lockdep_assert_held(&local->sta_mtx);
+ sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
+ if (!sinfo) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
/* check if STA exists already */
if (sta_info_get_bss(sdata, sta->sta.addr)) {
err = -EEXIST;
@@ -530,14 +537,12 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
/* accept BA sessions now */
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
- ieee80211_recalc_min_chandef(sdata);
ieee80211_sta_debugfs_add(sta);
rate_control_add_sta_debugfs(sta);
- memset(&sinfo, 0, sizeof(sinfo));
- sinfo.filled = 0;
- sinfo.generation = local->sta_generation;
- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ sinfo->generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
+ kfree(sinfo);
sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
@@ -557,6 +562,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
__cleanup_single_sta(sta);
out_err:
mutex_unlock(&local->sta_mtx);
+ kfree(sinfo);
rcu_read_lock();
return err;
}
@@ -898,7 +904,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct station_info sinfo = {};
+ struct station_info *sinfo;
int ret;
/*
@@ -936,12 +942,14 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
- sta_set_sinfo(sta, &sinfo);
- cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (sinfo)
+ sta_set_sinfo(sta, sinfo);
+ cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
+ kfree(sinfo);
rate_control_remove_sta_debugfs(sta);
ieee80211_sta_debugfs_remove(sta);
- ieee80211_recalc_min_chandef(sdata);
cleanup_single_sta(sta);
}
@@ -1808,14 +1816,17 @@ int sta_info_move_state(struct sta_info *sta,
clear_bit(WLAN_STA_AUTH, &sta->_flags);
break;
case IEEE80211_STA_AUTH:
- if (sta->sta_state == IEEE80211_STA_NONE)
+ if (sta->sta_state == IEEE80211_STA_NONE) {
set_bit(WLAN_STA_AUTH, &sta->_flags);
- else if (sta->sta_state == IEEE80211_STA_ASSOC)
+ } else if (sta->sta_state == IEEE80211_STA_ASSOC) {
clear_bit(WLAN_STA_ASSOC, &sta->_flags);
+ ieee80211_recalc_min_chandef(sta->sdata);
+ }
break;
case IEEE80211_STA_ASSOC:
if (sta->sta_state == IEEE80211_STA_AUTH) {
set_bit(WLAN_STA_ASSOC, &sta->_flags);
+ ieee80211_recalc_min_chandef(sta->sdata);
} else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
(sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d6051629ed15..053f5c4fa495 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -1,6 +1,7 @@
/*
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -167,6 +168,8 @@ struct tid_ampdu_tx {
*
* @reorder_buf: buffer to reorder incoming aggregated MPDUs. An MPDU may be an
* A-MSDU with individually reported subframes.
+ * @reorder_buf_filtered: bitmap indicating where there are filtered frames in
+ * the reorder buffer that should be ignored when releasing frames
* @reorder_time: jiffies when skb was added
* @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
* @reorder_timer: releases expired frames from the reorder buffer.
@@ -194,6 +197,7 @@ struct tid_ampdu_tx {
struct tid_ampdu_rx {
struct rcu_head rcu_head;
spinlock_t reorder_lock;
+ u64 reorder_buf_filtered;
struct sk_buff_head *reorder_buf;
unsigned long *reorder_time;
struct timer_list session_timer;
@@ -212,20 +216,21 @@ struct tid_ampdu_rx {
/**
* struct sta_ampdu_mlme - STA aggregation information.
*
+ * @mtx: mutex to protect all TX data (except non-NULL assignments
+ * to tid_tx[idx], which are protected by the sta spinlock)
+ * tid_start_tx is also protected by sta->lock.
* @tid_rx: aggregation info for Rx per TID -- RCU protected
- * @tid_tx: aggregation info for Tx per TID
- * @tid_start_tx: sessions where start was requested
- * @addba_req_num: number of times addBA request has been sent.
- * @last_addba_req_time: timestamp of the last addBA request.
- * @dialog_token_allocator: dialog token enumerator for each new session;
- * @work: work struct for starting/stopping aggregation
* @tid_rx_timer_expired: bitmap indicating on which TIDs the
* RX timer expired until the work for it runs
* @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the
* driver requested to close until the work for it runs
- * @mtx: mutex to protect all TX data (except non-NULL assignments
- * to tid_tx[idx], which are protected by the sta spinlock)
- * tid_start_tx is also protected by sta->lock.
+ * @agg_session_valid: bitmap indicating which TID has a rx BA session open on
+ * @work: work struct for starting/stopping aggregation
+ * @tid_tx: aggregation info for Tx per TID
+ * @tid_start_tx: sessions where start was requested
+ * @last_addba_req_time: timestamp of the last addBA request.
+ * @addba_req_num: number of times addBA request has been sent.
+ * @dialog_token_allocator: dialog token enumerator for each new session;
*/
struct sta_ampdu_mlme {
struct mutex mtx;
@@ -233,6 +238,7 @@ struct sta_ampdu_mlme {
struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS];
unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
+ unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
/* tx */
struct work_struct work;
struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 6101deb805a8..8b1b2ea03eb5 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -697,7 +697,7 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
rtap_len, shift);
/* XXX: is this sufficient for BPF? */
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 0ae207771a58..b3622823bad2 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -1,6 +1,7 @@
/*
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
+ * Copyright (C) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -142,15 +143,14 @@ static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets
* of the IV. Returns pointer to the octet following IVs (i.e., beginning of
* the packet payload). */
-u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key)
+u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn)
{
- lockdep_assert_held(&key->u.tkip.txlock);
-
- pos = write_tkip_iv(pos, key->u.tkip.tx.iv16);
- *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */;
- put_unaligned_le32(key->u.tkip.tx.iv32, pos);
+ pos = write_tkip_iv(pos, TKIP_PN_TO_IV16(pn));
+ *pos++ = (keyconf->keyidx << 6) | (1 << 5) /* Ext IV */;
+ put_unaligned_le32(TKIP_PN_TO_IV32(pn), pos);
return pos + 4;
}
+EXPORT_SYMBOL_GPL(ieee80211_tkip_add_iv);
static void ieee80211_compute_tkip_p1k(struct ieee80211_key *key, u32 iv32)
{
@@ -250,6 +250,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
u8 rc4key[16], keyid, *pos = payload;
int res;
const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
+ struct tkip_ctx_rx *rx_ctx = &key->u.tkip.rx[queue];
if (payload_len < 12)
return -1;
@@ -265,37 +266,36 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
if ((keyid >> 6) != key->conf.keyidx)
return TKIP_DECRYPT_INVALID_KEYIDX;
- if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
- (iv32 < key->u.tkip.rx[queue].iv32 ||
- (iv32 == key->u.tkip.rx[queue].iv32 &&
- iv16 <= key->u.tkip.rx[queue].iv16)))
+ if (rx_ctx->ctx.state != TKIP_STATE_NOT_INIT &&
+ (iv32 < rx_ctx->iv32 ||
+ (iv32 == rx_ctx->iv32 && iv16 <= rx_ctx->iv16)))
return TKIP_DECRYPT_REPLAY;
if (only_iv) {
res = TKIP_DECRYPT_OK;
- key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
+ rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED;
goto done;
}
- if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT ||
- key->u.tkip.rx[queue].iv32 != iv32) {
+ if (rx_ctx->ctx.state == TKIP_STATE_NOT_INIT ||
+ rx_ctx->iv32 != iv32) {
/* IV16 wrapped around - perform TKIP phase 1 */
- tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
+ tkip_mixing_phase1(tk, &rx_ctx->ctx, ta, iv32);
}
if (key->local->ops->update_tkip_key &&
key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
- key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) {
+ rx_ctx->ctx.state != TKIP_STATE_PHASE1_HW_UPLOADED) {
struct ieee80211_sub_if_data *sdata = key->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(key->sdata->bss,
struct ieee80211_sub_if_data, u.ap);
drv_update_tkip_key(key->local, sdata, &key->conf, key->sta,
- iv32, key->u.tkip.rx[queue].p1k);
- key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
+ iv32, rx_ctx->ctx.p1k);
+ rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED;
}
- tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
+ tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key);
res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12);
done:
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index e3ecb659b90a..a1bcbfbefe7c 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -13,8 +13,6 @@
#include <linux/crypto.h>
#include "key.h"
-u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key);
-
int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
struct ieee80211_key *key,
struct sk_buff *skb,
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index a6b4442776a0..2b0a17ee907a 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -80,7 +80,23 @@
#define KEY_PR_FMT " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d"
#define KEY_PR_ARG __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx
-
+#define AMPDU_ACTION_ENTRY __field(enum ieee80211_ampdu_mlme_action, \
+ ieee80211_ampdu_mlme_action) \
+ STA_ENTRY \
+ __field(u16, tid) \
+ __field(u16, ssn) \
+ __field(u8, buf_size) \
+ __field(bool, amsdu) \
+ __field(u16, timeout)
+#define AMPDU_ACTION_ASSIGN STA_NAMED_ASSIGN(params->sta); \
+ __entry->tid = params->tid; \
+ __entry->ssn = params->ssn; \
+ __entry->buf_size = params->buf_size; \
+ __entry->amsdu = params->amsdu; \
+ __entry->timeout = params->timeout;
+#define AMPDU_ACTION_PR_FMT STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d"
+#define AMPDU_ACTION_PR_ARG STA_PR_ARG, __entry->tid, __entry->ssn, \
+ __entry->buf_size, __entry->amsdu, __entry->timeout
/*
* Tracing for driver callbacks.
@@ -970,38 +986,25 @@ DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
TRACE_EVENT(drv_ampdu_action,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, u8 buf_size, bool amsdu),
+ struct ieee80211_ampdu_params *params),
- TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size, amsdu),
+ TP_ARGS(local, sdata, params),
TP_STRUCT__entry(
LOCAL_ENTRY
- STA_ENTRY
- __field(u32, action)
- __field(u16, tid)
- __field(u16, ssn)
- __field(u8, buf_size)
- __field(bool, amsdu)
VIF_ENTRY
+ AMPDU_ACTION_ENTRY
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- STA_ASSIGN;
- __entry->action = action;
- __entry->tid = tid;
- __entry->ssn = ssn ? *ssn : 0;
- __entry->buf_size = buf_size;
- __entry->amsdu = amsdu;
+ AMPDU_ACTION_ASSIGN;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d amsdu:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
- __entry->tid, __entry->buf_size, __entry->amsdu
+ LOCAL_PR_FMT VIF_PR_FMT AMPDU_ACTION_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, AMPDU_ACTION_PR_ARG
)
);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3311ce0f3d6c..62ad5321257d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -710,6 +710,10 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
info->control.short_preamble = txrc.short_preamble;
+ /* don't ask rate control when rate already injected via radiotap */
+ if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)
+ return TX_CONTINUE;
+
if (tx->sta)
assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
@@ -1266,7 +1270,11 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
if (atomic_read(&sdata->txqs_len[ac]) >= local->hw.txq_ac_max_pending)
netif_stop_subqueue(sdata->dev, ac);
- skb_queue_tail(&txqi->queue, skb);
+ spin_lock_bh(&txqi->queue.lock);
+ txqi->byte_cnt += skb->len;
+ __skb_queue_tail(&txqi->queue, skb);
+ spin_unlock_bh(&txqi->queue.lock);
+
drv_wake_tx_queue(local, txqi);
return;
@@ -1294,6 +1302,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
if (!skb)
goto out;
+ txqi->byte_cnt -= skb->len;
+
atomic_dec(&sdata->txqs_len[ac]);
if (__netif_subqueue_stopped(sdata->dev, ac))
ieee80211_propagate_queue_wake(local, sdata->vif.hw_queue[ac]);
@@ -1665,15 +1675,24 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
ieee80211_tx(sdata, sta, skb, false);
}
-static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
+static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local,
+ struct sk_buff *skb)
{
struct ieee80211_radiotap_iterator iterator;
struct ieee80211_radiotap_header *rthdr =
(struct ieee80211_radiotap_header *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_supported_band *sband =
+ local->hw.wiphy->bands[info->band];
int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
NULL);
u16 txflags;
+ u16 rate = 0;
+ bool rate_found = false;
+ u8 rate_retries = 0;
+ u16 rate_flags = 0;
+ u8 mcs_known, mcs_flags;
+ int i;
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_CTL_DONTFRAG;
@@ -1724,6 +1743,35 @@ static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
info->flags |= IEEE80211_TX_CTL_NO_ACK;
break;
+ case IEEE80211_RADIOTAP_RATE:
+ rate = *iterator.this_arg;
+ rate_flags = 0;
+ rate_found = true;
+ break;
+
+ case IEEE80211_RADIOTAP_DATA_RETRIES:
+ rate_retries = *iterator.this_arg;
+ break;
+
+ case IEEE80211_RADIOTAP_MCS:
+ mcs_known = iterator.this_arg[0];
+ mcs_flags = iterator.this_arg[1];
+ if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS))
+ break;
+
+ rate_found = true;
+ rate = iterator.this_arg[2];
+ rate_flags = IEEE80211_TX_RC_MCS;
+
+ if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI &&
+ mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
+ rate_flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
+ mcs_flags & IEEE80211_RADIOTAP_MCS_BW_40)
+ rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+
/*
* Please update the file
* Documentation/networking/mac80211-injection.txt
@@ -1738,6 +1786,32 @@ static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
return false;
+ if (rate_found) {
+ info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ info->control.rates[i].idx = -1;
+ info->control.rates[i].flags = 0;
+ info->control.rates[i].count = 0;
+ }
+
+ if (rate_flags & IEEE80211_TX_RC_MCS) {
+ info->control.rates[0].idx = rate;
+ } else {
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (rate * 5 != sband->bitrates[i].bitrate)
+ continue;
+
+ info->control.rates[0].idx = i;
+ break;
+ }
+ }
+
+ info->control.rates[0].flags = rate_flags;
+ info->control.rates[0].count = min_t(u8, rate_retries + 1,
+ local->hw.max_rate_tries);
+ }
+
/*
* remove the radiotap header
* iterator->_max_length was sanity-checked against
@@ -1818,10 +1892,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_INJECTED;
- /* process and remove the injection radiotap header */
- if (!ieee80211_parse_tx_radiotap(skb))
- goto fail;
-
rcu_read_lock();
/*
@@ -1883,6 +1953,11 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
goto fail_rcu;
info->band = chandef->chan->band;
+
+ /* process and remove the injection radiotap header */
+ if (!ieee80211_parse_tx_radiotap(local, skb))
+ goto fail_rcu;
+
ieee80211_xmit(sdata, NULL, skb);
rcu_read_unlock();
@@ -2099,8 +2174,11 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
mpp_lookup = true;
}
- if (mpp_lookup)
+ if (mpp_lookup) {
mppath = mpp_path_lookup(sdata, skb->data);
+ if (mppath)
+ mppath->exp_time = jiffies;
+ }
if (mppath && mpath)
mesh_path_del(mpath->sdata, mpath->dst);
@@ -2380,7 +2458,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
/* Update skb pointers to various headers since this modified frame
* is going to go through Linux networking code that may potentially
* need things like pointer to IP header. */
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb_set_network_header(skb, nh_pos);
skb_set_transport_header(skb, h_pos);
@@ -3895,9 +3973,9 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
{
int ac = ieee802_1d_to_ac[tid & 7];
- skb_set_mac_header(skb, 0);
- skb_set_network_header(skb, 0);
- skb_set_transport_header(skb, 0);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
skb_set_queue_mapping(skb, ac);
skb->priority = tid;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 58f58bd5202f..7390de4946a9 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015-2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1928,6 +1928,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
BSS_CHANGED_IDLE |
BSS_CHANGED_TXPOWER;
+ if (sdata->vif.mu_mimo_owner)
+ changed |= BSS_CHANGED_MU_GROUPS;
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
changed |= BSS_CHANGED_ASSOC |
@@ -2371,10 +2374,23 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
switch (chandef->width) {
case NL80211_CHAN_WIDTH_160:
- vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ /*
+ * Convert 160 MHz channel width to new style as interop
+ * workaround.
+ */
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ vht_oper->center_freq_seg2_idx = vht_oper->center_freq_seg1_idx;
+ if (chandef->chan->center_freq < chandef->center_freq1)
+ vht_oper->center_freq_seg1_idx -= 8;
+ else
+ vht_oper->center_freq_seg1_idx += 8;
break;
case NL80211_CHAN_WIDTH_80P80:
- vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+ /*
+ * Convert 80+80 MHz channel width to new style as interop
+ * workaround.
+ */
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
break;
case NL80211_CHAN_WIDTH_80:
vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
@@ -2390,17 +2406,13 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
return pos + sizeof(struct ieee80211_vht_operation);
}
-void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
- const struct ieee80211_ht_operation *ht_oper,
- struct cfg80211_chan_def *chandef)
+bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
+ struct cfg80211_chan_def *chandef)
{
enum nl80211_channel_type channel_type;
- if (!ht_oper) {
- cfg80211_chandef_create(chandef, control_chan,
- NL80211_CHAN_NO_HT);
- return;
- }
+ if (!ht_oper)
+ return false;
switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_NONE:
@@ -2414,42 +2426,66 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
break;
default:
channel_type = NL80211_CHAN_NO_HT;
+ return false;
}
- cfg80211_chandef_create(chandef, control_chan, channel_type);
+ cfg80211_chandef_create(chandef, chandef->chan, channel_type);
+ return true;
}
-void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
- const struct ieee80211_vht_operation *oper,
- struct cfg80211_chan_def *chandef)
+bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+ struct cfg80211_chan_def *chandef)
{
+ struct cfg80211_chan_def new = *chandef;
+ int cf1, cf2;
+
if (!oper)
- return;
+ return false;
- chandef->chan = control_chan;
+ cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
+ chandef->chan->band);
+ cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg2_idx,
+ chandef->chan->band);
switch (oper->chan_width) {
case IEEE80211_VHT_CHANWIDTH_USE_HT:
break;
case IEEE80211_VHT_CHANWIDTH_80MHZ:
- chandef->width = NL80211_CHAN_WIDTH_80;
+ new.width = NL80211_CHAN_WIDTH_80;
+ new.center_freq1 = cf1;
+ /* If needed, adjust based on the newer interop workaround. */
+ if (oper->center_freq_seg2_idx) {
+ unsigned int diff;
+
+ diff = abs(oper->center_freq_seg2_idx -
+ oper->center_freq_seg1_idx);
+ if (diff == 8) {
+ new.width = NL80211_CHAN_WIDTH_160;
+ new.center_freq1 = cf2;
+ } else if (diff > 8) {
+ new.width = NL80211_CHAN_WIDTH_80P80;
+ new.center_freq2 = cf2;
+ }
+ }
break;
case IEEE80211_VHT_CHANWIDTH_160MHZ:
- chandef->width = NL80211_CHAN_WIDTH_160;
+ new.width = NL80211_CHAN_WIDTH_160;
+ new.center_freq1 = cf1;
break;
case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
- chandef->width = NL80211_CHAN_WIDTH_80P80;
+ new.width = NL80211_CHAN_WIDTH_80P80;
+ new.center_freq1 = cf1;
+ new.center_freq2 = cf2;
break;
default:
- break;
+ return false;
}
- chandef->center_freq1 =
- ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
- control_chan->band);
- chandef->center_freq2 =
- ieee80211_channel_to_frequency(oper->center_freq_seg2_idx,
- control_chan->band);
+ if (!cfg80211_chandef_valid(&new))
+ return false;
+
+ *chandef = new;
+ return true;
}
int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
@@ -2672,6 +2708,18 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
sband = local->hw.wiphy->bands[status->band];
bitrate = sband->bitrates[status->rate_idx].bitrate;
ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift));
+
+ if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
+ /* TODO: handle HT/VHT preambles */
+ if (status->band == IEEE80211_BAND_5GHZ) {
+ ts += 20 << shift;
+ mpdu_offset += 2;
+ } else if (status->flag & RX_FLAG_SHORTPRE) {
+ ts += 96;
+ } else {
+ ts += 192;
+ }
+ }
}
rate = cfg80211_calculate_bitrate(&ri);
@@ -3357,3 +3405,17 @@ void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
txqi->txq.ac = IEEE80211_AC_BE;
}
}
+
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+ unsigned long *frame_cnt,
+ unsigned long *byte_cnt)
+{
+ struct txq_info *txqi = to_txq_info(txq);
+
+ if (frame_cnt)
+ *frame_cnt = txqi->queue.qlen;
+
+ if (byte_cnt)
+ *byte_cnt = txqi->byte_cnt;
+}
+EXPORT_SYMBOL(ieee80211_txq_get_depth);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index c38b2f07a919..89e04d55aa18 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -1,6 +1,9 @@
/*
* VHT handling
*
+ * Portions of this file
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -278,6 +281,23 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
}
sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+
+ /* If HT IE reported 3839 bytes only, stay with that size. */
+ if (sta->sta.max_amsdu_len == IEEE80211_MAX_MPDU_LEN_HT_3839)
+ return;
+
+ switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ break;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+ sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ break;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
+ default:
+ sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ break;
+ }
}
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
@@ -425,6 +445,43 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
return changed;
}
+void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt)
+{
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+
+ if (!sdata->vif.mu_mimo_owner)
+ return;
+
+ if (!memcmp(mgmt->u.action.u.vht_group_notif.position,
+ bss_conf->mu_group.position, WLAN_USER_POSITION_LEN) &&
+ !memcmp(mgmt->u.action.u.vht_group_notif.membership,
+ bss_conf->mu_group.membership, WLAN_MEMBERSHIP_LEN))
+ return;
+
+ memcpy(bss_conf->mu_group.membership,
+ mgmt->u.action.u.vht_group_notif.membership,
+ WLAN_MEMBERSHIP_LEN);
+ memcpy(bss_conf->mu_group.position,
+ mgmt->u.action.u.vht_group_notif.position,
+ WLAN_USER_POSITION_LEN);
+
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MU_GROUPS);
+}
+
+void ieee80211_update_mu_groups(struct ieee80211_vif *vif,
+ const u8 *membership, const u8 *position)
+{
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if (WARN_ON_ONCE(!vif->mu_mimo_owner))
+ return;
+
+ memcpy(bss_conf->mu_group.membership, membership, WLAN_MEMBERSHIP_LEN);
+ memcpy(bss_conf->mu_group.position, position, WLAN_USER_POSITION_LEN);
+}
+EXPORT_SYMBOL_GPL(ieee80211_update_mu_groups);
+
void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 opmode,
enum ieee80211_band band)
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index d824c38971ed..18848258adde 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -1,6 +1,7 @@
/*
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2008, Jouni Malinen <[email protected]>
+ * Copyright (C) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -183,7 +184,6 @@ mic_fail_no_key:
return RX_DROP_UNUSABLE;
}
-
static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -191,6 +191,7 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
unsigned int hdrlen;
int len, tail;
+ u64 pn;
u8 *pos;
if (info->control.hw_key &&
@@ -222,12 +223,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
return 0;
/* Increase IV for the frame */
- spin_lock(&key->u.tkip.txlock);
- key->u.tkip.tx.iv16++;
- if (key->u.tkip.tx.iv16 == 0)
- key->u.tkip.tx.iv32++;
- pos = ieee80211_tkip_add_iv(pos, key);
- spin_unlock(&key->u.tkip.txlock);
+ pn = atomic64_inc_return(&key->conf.tx_pn);
+ pos = ieee80211_tkip_add_iv(pos, &key->conf, pn);
/* hwaccel - with software IV */
if (info->control.hw_key)
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index e8cab5bb80c6..87da85ae5a6b 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -218,7 +218,6 @@ void ieee802154_unregister_hw(struct ieee802154_hw *hw)
tasklet_kill(&local->tasklet);
flush_workqueue(local->workqueue);
- destroy_workqueue(local->workqueue);
rtnl_lock();
@@ -226,6 +225,7 @@ void ieee802154_unregister_hw(struct ieee802154_hw *hw)
rtnl_unlock();
+ destroy_workqueue(local->workqueue);
wpan_phy_unregister(local->phy);
}
EXPORT_SYMBOL(ieee802154_unregister_hw);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index a3f5cd9b3c4c..dc196a0f501d 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -531,8 +531,6 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
if (ret == NF_ACCEPT) {
nf_reset(skb);
skb_forward_csum(skb);
- if (!skb->sk)
- skb_sender_cpu_clear(skb);
}
return ret;
}
@@ -573,8 +571,6 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
if (!local) {
skb_forward_csum(skb);
- if (!skb->sk)
- skb_sender_cpu_clear(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
@@ -595,8 +591,6 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
if (!local) {
ip_vs_drop_early_demux_sk(skb);
skb_forward_csum(skb);
- if (!skb->sk)
- skb_sender_cpu_clear(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
index 8414ee1a0319..7ec69723940f 100644
--- a/net/netfilter/nf_dup_netdev.c
+++ b/net/netfilter/nf_dup_netdev.c
@@ -31,7 +31,6 @@ void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif)
skb_push(skb, skb->mac_len);
skb->dev = dev;
- skb_sender_cpu_clear(skb);
dev_queue_xmit(skb);
}
EXPORT_SYMBOL_GPL(nf_dup_netdev_egress);
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index f0cb92f3ddaf..ada67422234b 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -55,8 +55,8 @@ struct netlbl_domhsh_tbl {
static DEFINE_SPINLOCK(netlbl_domhsh_lock);
#define netlbl_domhsh_rcu_deref(p) \
rcu_dereference_check(p, lockdep_is_held(&netlbl_domhsh_lock))
-static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL;
-static struct netlbl_dom_map *netlbl_domhsh_def = NULL;
+static struct netlbl_domhsh_tbl *netlbl_domhsh;
+static struct netlbl_dom_map *netlbl_domhsh_def;
/*
* Domain Hash Table Helper Functions
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index b0380927f05f..9eaa9a1e8629 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -116,11 +116,11 @@ struct netlbl_unlhsh_walk_arg {
static DEFINE_SPINLOCK(netlbl_unlhsh_lock);
#define netlbl_unlhsh_rcu_deref(p) \
rcu_dereference_check(p, lockdep_is_held(&netlbl_unlhsh_lock))
-static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL;
-static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL;
+static struct netlbl_unlhsh_tbl *netlbl_unlhsh;
+static struct netlbl_unlhsh_iface *netlbl_unlhsh_def;
/* Accept unlabeled packets flag */
-static u8 netlabel_unlabel_acceptflg = 0;
+static u8 netlabel_unlabel_acceptflg;
/* NetLabel Generic NETLINK unlabeled family */
static struct genl_family netlbl_unlabel_gnl_family = {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index c4e8455d5d56..e6a7d494df24 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1908,6 +1908,29 @@ static struct vport *lookup_vport(struct net *net,
return ERR_PTR(-EINVAL);
}
+/* Called with ovs_mutex */
+static void update_headroom(struct datapath *dp)
+{
+ unsigned dev_headroom, max_headroom = 0;
+ struct net_device *dev;
+ struct vport *vport;
+ int i;
+
+ for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
+ hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
+ dev = vport->dev;
+ dev_headroom = netdev_get_fwd_headroom(dev);
+ if (dev_headroom > max_headroom)
+ max_headroom = dev_headroom;
+ }
+ }
+
+ dp->max_headroom = max_headroom;
+ for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
+ hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
+ netdev_set_rx_headroom(vport->dev, max_headroom);
+}
+
static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
@@ -1973,6 +1996,12 @@ restart:
err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
info->snd_seq, 0, OVS_VPORT_CMD_NEW);
+
+ if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
+ update_headroom(dp);
+ else
+ netdev_set_rx_headroom(vport->dev, dp->max_headroom);
+
BUG_ON(err < 0);
ovs_unlock();
@@ -2039,8 +2068,10 @@ exit_unlock_free:
static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
+ bool must_update_headroom = false;
struct nlattr **a = info->attrs;
struct sk_buff *reply;
+ struct datapath *dp;
struct vport *vport;
int err;
@@ -2062,7 +2093,16 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
info->snd_seq, 0, OVS_VPORT_CMD_DEL);
BUG_ON(err < 0);
+
+ /* the vport deletion may trigger dp headroom update */
+ dp = vport->dp;
+ if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
+ must_update_headroom = true;
+ netdev_reset_rx_headroom(vport->dev);
ovs_dp_detach_port(vport);
+
+ if (must_update_headroom)
+ update_headroom(dp);
ovs_unlock();
ovs_notify(&dp_vport_genl_family, reply, info);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 67bdecd9fdc1..427e39a045cf 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -68,6 +68,8 @@ struct dp_stats_percpu {
* ovs_mutex and RCU.
* @stats_percpu: Per-CPU datapath statistics.
* @net: Reference to net namespace.
+ * @max_headroom: the maximum headroom of all vports in this datapath; it will
+ * be used by all the internal vports in this dp.
*
* Context: See the comment on locking at the top of datapath.c for additional
* locking information.
@@ -89,6 +91,8 @@ struct datapath {
possible_net_t net;
u32 user_features;
+
+ u32 max_headroom;
};
/**
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index ec76398a792f..83a5534abd31 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -138,6 +138,11 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
return stats;
}
+void internal_set_rx_headroom(struct net_device *dev, int new_hr)
+{
+ dev->needed_headroom = new_hr;
+}
+
static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_open = internal_dev_open,
.ndo_stop = internal_dev_stop,
@@ -145,6 +150,7 @@ static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = internal_dev_change_mtu,
.ndo_get_stats64 = internal_get_stats,
+ .ndo_set_rx_headroom = internal_set_rx_headroom,
};
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -158,7 +164,8 @@ static void do_setup(struct net_device *netdev)
netdev->netdev_ops = &internal_dev_netdev_ops;
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
- netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
+ IFF_PHONY_HEADROOM;
netdev->destructor = internal_dev_destructor;
netdev->ethtool_ops = &internal_dev_ethtool_ops;
netdev->rtnl_link_ops = &internal_dev_link_ops;
@@ -199,6 +206,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
err = -ENOMEM;
goto error_free_netdev;
}
+ vport->dev->needed_headroom = vport->dp->max_headroom;
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
internal_dev = internal_dev_priv(vport->dev);
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index f2c670ba7b9b..bffde4b46c5d 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -4,14 +4,13 @@ config RDS
depends on INET
---help---
The RDS (Reliable Datagram Sockets) protocol provides reliable,
- sequenced delivery of datagrams over Infiniband, iWARP,
- or TCP.
+ sequenced delivery of datagrams over Infiniband or TCP.
config RDS_RDMA
- tristate "RDS over Infiniband and iWARP"
+ tristate "RDS over Infiniband"
depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
---help---
- Allow RDS to use Infiniband and iWARP as a transport.
+ Allow RDS to use Infiniband as a transport.
This transport supports RDMA operations.
config RDS_TCP
diff --git a/net/rds/Makefile b/net/rds/Makefile
index 56d3f6023ced..0e72bec1529f 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -6,9 +6,7 @@ rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \
obj-$(CONFIG_RDS_RDMA) += rds_rdma.o
rds_rdma-y := rdma_transport.o \
ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
- ib_sysctl.o ib_rdma.o \
- iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \
- iw_sysctl.o iw_rdma.o
+ ib_sysctl.o ib_rdma.o ib_fmr.o ib_frmr.o
obj-$(CONFIG_RDS_TCP) += rds_tcp.o
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index b5476aebd68d..6beaeb1138f3 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -277,6 +277,27 @@ static int rds_set_transport(struct rds_sock *rs, char __user *optval,
return rs->rs_transport ? 0 : -ENOPROTOOPT;
}
+static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
+ int optlen)
+{
+ int val, valbool;
+
+ if (optlen != sizeof(int))
+ return -EFAULT;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ valbool = val ? 1 : 0;
+
+ if (valbool)
+ sock_set_flag(sk, SOCK_RCVTSTAMP);
+ else
+ sock_reset_flag(sk, SOCK_RCVTSTAMP);
+
+ return 0;
+}
+
static int rds_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
@@ -312,6 +333,11 @@ static int rds_setsockopt(struct socket *sock, int level, int optname,
ret = rds_set_transport(rs, optval, optlen);
release_sock(sock->sk);
break;
+ case SO_TIMESTAMP:
+ lock_sock(sock->sk);
+ ret = rds_enable_recvtstamp(sock->sk, optval, optlen);
+ release_sock(sock->sk);
+ break;
default:
ret = -ENOPROTOOPT;
}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 9481d55ff6cb..b5342fddaf98 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -42,15 +42,16 @@
#include "rds.h"
#include "ib.h"
+#include "ib_mr.h"
-unsigned int rds_ib_fmr_1m_pool_size = RDS_FMR_1M_POOL_SIZE;
-unsigned int rds_ib_fmr_8k_pool_size = RDS_FMR_8K_POOL_SIZE;
+unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
+unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
-module_param(rds_ib_fmr_1m_pool_size, int, 0444);
-MODULE_PARM_DESC(rds_ib_fmr_1m_pool_size, " Max number of 1M fmr per HCA");
-module_param(rds_ib_fmr_8k_pool_size, int, 0444);
-MODULE_PARM_DESC(rds_ib_fmr_8k_pool_size, " Max number of 8K fmr per HCA");
+module_param(rds_ib_mr_1m_pool_size, int, 0444);
+MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA");
+module_param(rds_ib_mr_8k_pool_size, int, 0444);
+MODULE_PARM_DESC(rds_ib_mr_8k_pool_size, " Max number of 8K mr per HCA");
module_param(rds_ib_retry_count, int, 0444);
MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
@@ -139,14 +140,20 @@ static void rds_ib_add_one(struct ib_device *device)
rds_ibdev->max_wrs = device->attrs.max_qp_wr;
rds_ibdev->max_sge = min(device->attrs.max_sge, RDS_IB_MAX_SGE);
+ rds_ibdev->has_fr = (device->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS);
+ rds_ibdev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
+ device->map_phys_fmr && device->unmap_fmr);
+ rds_ibdev->use_fastreg = (rds_ibdev->has_fr && !rds_ibdev->has_fmr);
+
rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
- rds_ibdev->max_1m_fmrs = device->attrs.max_mr ?
+ rds_ibdev->max_1m_mrs = device->attrs.max_mr ?
min_t(unsigned int, (device->attrs.max_mr / 2),
- rds_ib_fmr_1m_pool_size) : rds_ib_fmr_1m_pool_size;
+ rds_ib_mr_1m_pool_size) : rds_ib_mr_1m_pool_size;
- rds_ibdev->max_8k_fmrs = device->attrs.max_mr ?
+ rds_ibdev->max_8k_mrs = device->attrs.max_mr ?
min_t(unsigned int, ((device->attrs.max_mr / 2) * RDS_MR_8K_SCALE),
- rds_ib_fmr_8k_pool_size) : rds_ib_fmr_8k_pool_size;
+ rds_ib_mr_8k_pool_size) : rds_ib_mr_8k_pool_size;
rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom;
rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
@@ -172,10 +179,14 @@ static void rds_ib_add_one(struct ib_device *device)
goto put_dev;
}
- rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_fmrs = %d, max_8k_fmrs = %d\n",
+ rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_mrs = %d, max_8k_mrs = %d\n",
device->attrs.max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
- rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_fmrs,
- rds_ibdev->max_8k_fmrs);
+ rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_mrs,
+ rds_ibdev->max_8k_mrs);
+
+ pr_info("RDS/IB: %s: %s supported and preferred\n",
+ device->name,
+ rds_ibdev->use_fastreg ? "FRMR" : "FMR");
INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
INIT_LIST_HEAD(&rds_ibdev->conn_list);
@@ -364,7 +375,7 @@ void rds_ib_exit(void)
rds_ib_sysctl_exit();
rds_ib_recv_exit();
rds_trans_unregister(&rds_ib_transport);
- rds_ib_fmr_exit();
+ rds_ib_mr_exit();
}
struct rds_transport rds_ib_transport = {
@@ -400,13 +411,13 @@ int rds_ib_init(void)
INIT_LIST_HEAD(&rds_ib_devices);
- ret = rds_ib_fmr_init();
+ ret = rds_ib_mr_init();
if (ret)
goto out;
ret = ib_register_client(&rds_ib_client);
if (ret)
- goto out_fmr_exit;
+ goto out_mr_exit;
ret = rds_ib_sysctl_init();
if (ret)
@@ -430,8 +441,8 @@ out_sysctl:
rds_ib_sysctl_exit();
out_ibreg:
rds_ib_unregister_client();
-out_fmr_exit:
- rds_ib_fmr_exit();
+out_mr_exit:
+ rds_ib_mr_exit();
out:
return ret;
}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index b3fdebb57460..627fb79aee65 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -9,17 +9,12 @@
#include "rds.h"
#include "rdma_transport.h"
-#define RDS_FMR_1M_POOL_SIZE (8192 / 2)
-#define RDS_FMR_1M_MSG_SIZE 256
-#define RDS_FMR_8K_MSG_SIZE 2
-#define RDS_MR_8K_SCALE (256 / (RDS_FMR_8K_MSG_SIZE + 1))
-#define RDS_FMR_8K_POOL_SIZE (RDS_MR_8K_SCALE * (8192 / 2))
-
#define RDS_IB_MAX_SGE 8
#define RDS_IB_RECV_SGE 2
#define RDS_IB_DEFAULT_RECV_WR 1024
#define RDS_IB_DEFAULT_SEND_WR 256
+#define RDS_IB_DEFAULT_FR_WR 512
#define RDS_IB_DEFAULT_RETRY_COUNT 2
@@ -28,7 +23,6 @@
#define RDS_IB_RECYCLE_BATCH_COUNT 32
#define RDS_IB_WC_MAX 32
-#define RDS_IB_SEND_OP BIT_ULL(63)
extern struct rw_semaphore rds_ib_devices_lock;
extern struct list_head rds_ib_devices;
@@ -129,6 +123,9 @@ struct rds_ib_connection {
struct ib_wc i_send_wc[RDS_IB_WC_MAX];
struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
+ /* To control the number of wrs from fastreg */
+ atomic_t i_fastreg_wrs;
+
/* interrupt handling */
struct tasklet_struct i_send_tasklet;
struct tasklet_struct i_recv_tasklet;
@@ -207,12 +204,16 @@ struct rds_ib_device {
struct list_head conn_list;
struct ib_device *dev;
struct ib_pd *pd;
- unsigned int max_fmrs;
+ bool has_fmr;
+ bool has_fr;
+ bool use_fastreg;
+
+ unsigned int max_mrs;
struct rds_ib_mr_pool *mr_1m_pool;
struct rds_ib_mr_pool *mr_8k_pool;
unsigned int fmr_max_remaps;
- unsigned int max_8k_fmrs;
- unsigned int max_1m_fmrs;
+ unsigned int max_8k_mrs;
+ unsigned int max_1m_mrs;
int max_sge;
unsigned int max_wrs;
unsigned int max_initiator_depth;
@@ -266,6 +267,8 @@ struct rds_ib_statistics {
uint64_t s_ib_rdma_mr_1m_pool_flush;
uint64_t s_ib_rdma_mr_1m_pool_wait;
uint64_t s_ib_rdma_mr_1m_pool_depleted;
+ uint64_t s_ib_rdma_mr_8k_reused;
+ uint64_t s_ib_rdma_mr_1m_reused;
uint64_t s_ib_atomic_cswp;
uint64_t s_ib_atomic_fadd;
};
@@ -317,8 +320,6 @@ struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
extern struct ib_client rds_ib_client;
-extern unsigned int rds_ib_fmr_1m_pool_size;
-extern unsigned int rds_ib_fmr_8k_pool_size;
extern unsigned int rds_ib_retry_count;
extern spinlock_t ib_nodev_conns_lock;
@@ -348,17 +349,7 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
void rds_ib_destroy_nodev_conns(void);
-struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
- int npages);
-void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
-void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
-void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
- struct rds_sock *rs, u32 *key_ret);
-void rds_ib_sync_mr(void *trans_private, int dir);
-void rds_ib_free_mr(void *trans_private, int invalidate);
-void rds_ib_flush_mrs(void);
-int rds_ib_fmr_init(void);
-void rds_ib_fmr_exit(void);
+void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
/* ib_recv.c */
int rds_ib_recv_init(void);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index da5a7fb98c77..8764970f0c24 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -236,12 +236,10 @@ static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
tasklet_schedule(&ic->i_recv_tasklet);
}
-static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
- struct ib_wc *wcs,
- struct rds_ib_ack_state *ack_state)
+static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
+ struct ib_wc *wcs)
{
- int nr;
- int i;
+ int nr, i;
struct ib_wc *wc;
while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
@@ -251,10 +249,12 @@ static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
(unsigned long long)wc->wr_id, wc->status,
wc->byte_len, be32_to_cpu(wc->ex.imm_data));
- if (wc->wr_id & RDS_IB_SEND_OP)
+ if (wc->wr_id <= ic->i_send_ring.w_nr ||
+ wc->wr_id == RDS_IB_ACK_WR_ID)
rds_ib_send_cqe_handler(ic, wc);
else
- rds_ib_recv_cqe_handler(ic, wc, ack_state);
+ rds_ib_mr_cqe_handler(ic, wc);
+
}
}
}
@@ -263,14 +263,12 @@ static void rds_ib_tasklet_fn_send(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
struct rds_connection *conn = ic->conn;
- struct rds_ib_ack_state state;
rds_ib_stats_inc(s_ib_tasklet_call);
- memset(&state, 0, sizeof(state));
- poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state);
+ poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
- poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state);
+ poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
if (rds_conn_up(conn) &&
(!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
@@ -278,6 +276,25 @@ static void rds_ib_tasklet_fn_send(unsigned long data)
rds_send_xmit(ic->conn);
}
+static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
+ struct ib_wc *wcs,
+ struct rds_ib_ack_state *ack_state)
+{
+ int nr, i;
+ struct ib_wc *wc;
+
+ while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
+ for (i = 0; i < nr; i++) {
+ wc = wcs + i;
+ rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
+ (unsigned long long)wc->wr_id, wc->status,
+ wc->byte_len, be32_to_cpu(wc->ex.imm_data));
+
+ rds_ib_recv_cqe_handler(ic, wc, ack_state);
+ }
+ }
+}
+
static void rds_ib_tasklet_fn_recv(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
@@ -291,9 +308,9 @@ static void rds_ib_tasklet_fn_recv(unsigned long data)
rds_ib_stats_inc(s_ib_tasklet_call);
memset(&state, 0, sizeof(state));
- poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
+ poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
- poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
+ poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
if (state.ack_next_valid)
rds_ib_set_ack(ic, state.ack_next, state.ack_required);
@@ -351,7 +368,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
struct ib_qp_init_attr attr;
struct ib_cq_init_attr cq_attr = {};
struct rds_ib_device *rds_ibdev;
- int ret;
+ int ret, fr_queue_space;
/*
* It's normal to see a null device if an incoming connection races
@@ -361,6 +378,12 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!rds_ibdev)
return -EOPNOTSUPP;
+ /* The fr_queue_space is currently set to 512, to add extra space on
+ * completion queue and send queue. This extra space is used for FRMR
+ * registration and invalidation work requests
+ */
+ fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0);
+
/* add the conn now so that connection establishment has the dev */
rds_ib_add_conn(rds_ibdev, conn);
@@ -372,7 +395,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
/* Protection domain and memory range */
ic->i_pd = rds_ibdev->pd;
- cq_attr.cqe = ic->i_send_ring.w_nr + 1;
+ cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
rds_ib_cq_event_handler, conn,
@@ -412,7 +435,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
attr.event_handler = rds_ib_qp_event_handler;
attr.qp_context = conn;
/* + 1 to allow for the single ack message */
- attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1;
+ attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1;
attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
attr.cap.max_send_sge = rds_ibdev->max_sge;
attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
@@ -420,6 +443,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
attr.qp_type = IB_QPT_RC;
attr.send_cq = ic->i_send_cq;
attr.recv_cq = ic->i_recv_cq;
+ atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
/*
* XXX this can fail if max_*_wr is too large? Are we supposed
@@ -739,7 +763,8 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
*/
wait_event(rds_ib_ring_empty_wait,
rds_ib_ring_empty(&ic->i_recv_ring) &&
- (atomic_read(&ic->i_signaled_sends) == 0));
+ (atomic_read(&ic->i_signaled_sends) == 0) &&
+ (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
tasklet_kill(&ic->i_send_tasklet);
tasklet_kill(&ic->i_recv_tasklet);
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
new file mode 100644
index 000000000000..4fe8f4fec4ee
--- /dev/null
+++ b/net/rds/ib_fmr.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2016 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ib_mr.h"
+
+struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
+{
+ struct rds_ib_mr_pool *pool;
+ struct rds_ib_mr *ibmr = NULL;
+ struct rds_ib_fmr *fmr;
+ int err = 0;
+
+ if (npages <= RDS_MR_8K_MSG_SIZE)
+ pool = rds_ibdev->mr_8k_pool;
+ else
+ pool = rds_ibdev->mr_1m_pool;
+
+ ibmr = rds_ib_try_reuse_ibmr(pool);
+ if (ibmr)
+ return ibmr;
+
+ ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
+ rdsibdev_to_node(rds_ibdev));
+ if (!ibmr) {
+ err = -ENOMEM;
+ goto out_no_cigar;
+ }
+
+ fmr = &ibmr->u.fmr;
+ fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
+ (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC),
+ &pool->fmr_attr);
+ if (IS_ERR(fmr->fmr)) {
+ err = PTR_ERR(fmr->fmr);
+ fmr->fmr = NULL;
+ pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
+ goto out_no_cigar;
+ }
+
+ ibmr->pool = pool;
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
+ else
+ rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
+
+ return ibmr;
+
+out_no_cigar:
+ if (ibmr) {
+ if (fmr->fmr)
+ ib_dealloc_fmr(fmr->fmr);
+ kfree(ibmr);
+ }
+ atomic_dec(&pool->item_count);
+ return ERR_PTR(err);
+}
+
+int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
+ struct scatterlist *sg, unsigned int nents)
+{
+ struct ib_device *dev = rds_ibdev->dev;
+ struct rds_ib_fmr *fmr = &ibmr->u.fmr;
+ struct scatterlist *scat = sg;
+ u64 io_addr = 0;
+ u64 *dma_pages;
+ u32 len;
+ int page_cnt, sg_dma_len;
+ int i, j;
+ int ret;
+
+ sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
+ if (unlikely(!sg_dma_len)) {
+ pr_warn("RDS/IB: %s failed!\n", __func__);
+ return -EBUSY;
+ }
+
+ len = 0;
+ page_cnt = 0;
+
+ for (i = 0; i < sg_dma_len; ++i) {
+ unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
+ u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
+
+ if (dma_addr & ~PAGE_MASK) {
+ if (i > 0)
+ return -EINVAL;
+ else
+ ++page_cnt;
+ }
+ if ((dma_addr + dma_len) & ~PAGE_MASK) {
+ if (i < sg_dma_len - 1)
+ return -EINVAL;
+ else
+ ++page_cnt;
+ }
+
+ len += dma_len;
+ }
+
+ page_cnt += len >> PAGE_SHIFT;
+ if (page_cnt > ibmr->pool->fmr_attr.max_pages)
+ return -EINVAL;
+
+ dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
+ rdsibdev_to_node(rds_ibdev));
+ if (!dma_pages)
+ return -ENOMEM;
+
+ page_cnt = 0;
+ for (i = 0; i < sg_dma_len; ++i) {
+ unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
+ u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
+
+ for (j = 0; j < dma_len; j += PAGE_SIZE)
+ dma_pages[page_cnt++] =
+ (dma_addr & PAGE_MASK) + j;
+ }
+
+ ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
+ if (ret)
+ goto out;
+
+ /* Success - we successfully remapped the MR, so we can
+ * safely tear down the old mapping.
+ */
+ rds_ib_teardown_mr(ibmr);
+
+ ibmr->sg = scat;
+ ibmr->sg_len = nents;
+ ibmr->sg_dma_len = sg_dma_len;
+ ibmr->remap_count++;
+
+ if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
+ rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
+ else
+ rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
+ ret = 0;
+
+out:
+ kfree(dma_pages);
+
+ return ret;
+}
+
+struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *rds_ibdev,
+ struct scatterlist *sg,
+ unsigned long nents,
+ u32 *key)
+{
+ struct rds_ib_mr *ibmr = NULL;
+ struct rds_ib_fmr *fmr;
+ int ret;
+
+ ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
+ if (IS_ERR(ibmr))
+ return ibmr;
+
+ ibmr->device = rds_ibdev;
+ fmr = &ibmr->u.fmr;
+ ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
+ if (ret == 0)
+ *key = fmr->fmr->rkey;
+ else
+ rds_ib_free_mr(ibmr, 0);
+
+ return ibmr;
+}
+
+void rds_ib_unreg_fmr(struct list_head *list, unsigned int *nfreed,
+ unsigned long *unpinned, unsigned int goal)
+{
+ struct rds_ib_mr *ibmr, *next;
+ struct rds_ib_fmr *fmr;
+ LIST_HEAD(fmr_list);
+ int ret = 0;
+ unsigned int freed = *nfreed;
+
+ /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
+ list_for_each_entry(ibmr, list, unmap_list) {
+ fmr = &ibmr->u.fmr;
+ list_add(&fmr->fmr->list, &fmr_list);
+ }
+
+ ret = ib_unmap_fmr(&fmr_list);
+ if (ret)
+ pr_warn("RDS/IB: FMR invalidation failed (err=%d)\n", ret);
+
+ /* Now we can destroy the DMA mapping and unpin any pages */
+ list_for_each_entry_safe(ibmr, next, list, unmap_list) {
+ fmr = &ibmr->u.fmr;
+ *unpinned += ibmr->sg_len;
+ __rds_ib_teardown_mr(ibmr);
+ if (freed < goal ||
+ ibmr->remap_count >= ibmr->pool->fmr_attr.max_maps) {
+ if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
+ rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
+ else
+ rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
+ list_del(&ibmr->unmap_list);
+ ib_dealloc_fmr(fmr->fmr);
+ kfree(ibmr);
+ freed++;
+ }
+ }
+ *nfreed = freed;
+}
+
+void rds_ib_free_fmr_list(struct rds_ib_mr *ibmr)
+{
+ struct rds_ib_mr_pool *pool = ibmr->pool;
+
+ if (ibmr->remap_count >= pool->fmr_attr.max_maps)
+ llist_add(&ibmr->llnode, &pool->drop_list);
+ else
+ llist_add(&ibmr->llnode, &pool->free_list);
+}
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
new file mode 100644
index 000000000000..93ff038ea9d1
--- /dev/null
+++ b/net/rds/ib_frmr.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2016 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ib_mr.h"
+
+static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
+ int npages)
+{
+ struct rds_ib_mr_pool *pool;
+ struct rds_ib_mr *ibmr = NULL;
+ struct rds_ib_frmr *frmr;
+ int err = 0;
+
+ if (npages <= RDS_MR_8K_MSG_SIZE)
+ pool = rds_ibdev->mr_8k_pool;
+ else
+ pool = rds_ibdev->mr_1m_pool;
+
+ ibmr = rds_ib_try_reuse_ibmr(pool);
+ if (ibmr)
+ return ibmr;
+
+ ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
+ rdsibdev_to_node(rds_ibdev));
+ if (!ibmr) {
+ err = -ENOMEM;
+ goto out_no_cigar;
+ }
+
+ frmr = &ibmr->u.frmr;
+ frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG,
+ pool->fmr_attr.max_pages);
+ if (IS_ERR(frmr->mr)) {
+ pr_warn("RDS/IB: %s failed to allocate MR", __func__);
+ goto out_no_cigar;
+ }
+
+ ibmr->pool = pool;
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
+ else
+ rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
+
+ if (atomic_read(&pool->item_count) > pool->max_items_soft)
+ pool->max_items_soft = pool->max_items;
+
+ frmr->fr_state = FRMR_IS_FREE;
+ return ibmr;
+
+out_no_cigar:
+ kfree(ibmr);
+ atomic_dec(&pool->item_count);
+ return ERR_PTR(err);
+}
+
+static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop)
+{
+ struct rds_ib_mr_pool *pool = ibmr->pool;
+
+ if (drop)
+ llist_add(&ibmr->llnode, &pool->drop_list);
+ else
+ llist_add(&ibmr->llnode, &pool->free_list);
+ atomic_add(ibmr->sg_len, &pool->free_pinned);
+ atomic_inc(&pool->dirty_count);
+
+ /* If we've pinned too many pages, request a flush */
+ if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
+ atomic_read(&pool->dirty_count) >= pool->max_items / 5)
+ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+}
+
+static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
+{
+ struct rds_ib_frmr *frmr = &ibmr->u.frmr;
+ struct ib_send_wr *failed_wr;
+ struct ib_reg_wr reg_wr;
+ int ret;
+
+ while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
+ atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ cpu_relax();
+ }
+
+ ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, PAGE_SIZE);
+ if (unlikely(ret != ibmr->sg_len))
+ return ret < 0 ? ret : -EINVAL;
+
+ /* Perform a WR for the fast_reg_mr. Each individual page
+ * in the sg list is added to the fast reg page list and placed
+ * inside the fast_reg_mr WR. The key used is a rolling 8bit
+ * counter, which should guarantee uniqueness.
+ */
+ ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
+ frmr->fr_state = FRMR_IS_INUSE;
+
+ memset(&reg_wr, 0, sizeof(reg_wr));
+ reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
+ reg_wr.wr.opcode = IB_WR_REG_MR;
+ reg_wr.wr.num_sge = 0;
+ reg_wr.mr = frmr->mr;
+ reg_wr.key = frmr->mr->rkey;
+ reg_wr.access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+ reg_wr.wr.send_flags = IB_SEND_SIGNALED;
+
+ failed_wr = &reg_wr.wr;
+ ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, &failed_wr);
+ WARN_ON(failed_wr != &reg_wr.wr);
+ if (unlikely(ret)) {
+ /* Failure here can be because of -ENOMEM as well */
+ frmr->fr_state = FRMR_IS_STALE;
+ atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ if (printk_ratelimit())
+ pr_warn("RDS/IB: %s returned error(%d)\n",
+ __func__, ret);
+ }
+ return ret;
+}
+
+static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
+ struct rds_ib_mr_pool *pool,
+ struct rds_ib_mr *ibmr,
+ struct scatterlist *sg, unsigned int sg_len)
+{
+ struct ib_device *dev = rds_ibdev->dev;
+ struct rds_ib_frmr *frmr = &ibmr->u.frmr;
+ int i;
+ u32 len;
+ int ret = 0;
+
+ /* We want to teardown old ibmr values here and fill it up with
+ * new sg values
+ */
+ rds_ib_teardown_mr(ibmr);
+
+ ibmr->sg = sg;
+ ibmr->sg_len = sg_len;
+ ibmr->sg_dma_len = 0;
+ frmr->sg_byte_len = 0;
+ WARN_ON(ibmr->sg_dma_len);
+ ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!ibmr->sg_dma_len)) {
+ pr_warn("RDS/IB: %s failed!\n", __func__);
+ return -EBUSY;
+ }
+
+ frmr->sg_byte_len = 0;
+ frmr->dma_npages = 0;
+ len = 0;
+
+ ret = -EINVAL;
+ for (i = 0; i < ibmr->sg_dma_len; ++i) {
+ unsigned int dma_len = ib_sg_dma_len(dev, &ibmr->sg[i]);
+ u64 dma_addr = ib_sg_dma_address(dev, &ibmr->sg[i]);
+
+ frmr->sg_byte_len += dma_len;
+ if (dma_addr & ~PAGE_MASK) {
+ if (i > 0)
+ goto out_unmap;
+ else
+ ++frmr->dma_npages;
+ }
+
+ if ((dma_addr + dma_len) & ~PAGE_MASK) {
+ if (i < ibmr->sg_dma_len - 1)
+ goto out_unmap;
+ else
+ ++frmr->dma_npages;
+ }
+
+ len += dma_len;
+ }
+ frmr->dma_npages += len >> PAGE_SHIFT;
+
+ if (frmr->dma_npages > ibmr->pool->fmr_attr.max_pages) {
+ ret = -EMSGSIZE;
+ goto out_unmap;
+ }
+
+ ret = rds_ib_post_reg_frmr(ibmr);
+ if (ret)
+ goto out_unmap;
+
+ if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
+ rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
+ else
+ rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
+
+ return ret;
+
+out_unmap:
+ ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len,
+ DMA_BIDIRECTIONAL);
+ ibmr->sg_dma_len = 0;
+ return ret;
+}
+
+static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
+{
+ struct ib_send_wr *s_wr, *failed_wr;
+ struct rds_ib_frmr *frmr = &ibmr->u.frmr;
+ struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id;
+ int ret = -EINVAL;
+
+ if (!i_cm_id || !i_cm_id->qp || !frmr->mr)
+ goto out;
+
+ if (frmr->fr_state != FRMR_IS_INUSE)
+ goto out;
+
+ while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
+ atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ cpu_relax();
+ }
+
+ frmr->fr_inv = true;
+ s_wr = &frmr->fr_wr;
+
+ memset(s_wr, 0, sizeof(*s_wr));
+ s_wr->wr_id = (unsigned long)(void *)ibmr;
+ s_wr->opcode = IB_WR_LOCAL_INV;
+ s_wr->ex.invalidate_rkey = frmr->mr->rkey;
+ s_wr->send_flags = IB_SEND_SIGNALED;
+
+ failed_wr = s_wr;
+ ret = ib_post_send(i_cm_id->qp, s_wr, &failed_wr);
+ WARN_ON(failed_wr != s_wr);
+ if (unlikely(ret)) {
+ frmr->fr_state = FRMR_IS_STALE;
+ frmr->fr_inv = false;
+ atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
+ goto out;
+ }
+out:
+ return ret;
+}
+
+void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
+{
+ struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id;
+ struct rds_ib_frmr *frmr = &ibmr->u.frmr;
+
+ if (wc->status != IB_WC_SUCCESS) {
+ frmr->fr_state = FRMR_IS_STALE;
+ if (rds_conn_up(ic->conn))
+ rds_ib_conn_error(ic->conn,
+ "frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
+ &ic->conn->c_laddr,
+ &ic->conn->c_faddr,
+ wc->status,
+ ib_wc_status_msg(wc->status),
+ wc->vendor_err);
+ }
+
+ if (frmr->fr_inv) {
+ frmr->fr_state = FRMR_IS_FREE;
+ frmr->fr_inv = false;
+ }
+
+ atomic_inc(&ic->i_fastreg_wrs);
+}
+
+void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
+ unsigned long *unpinned, unsigned int goal)
+{
+ struct rds_ib_mr *ibmr, *next;
+ struct rds_ib_frmr *frmr;
+ int ret = 0;
+ unsigned int freed = *nfreed;
+
+ /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
+ list_for_each_entry(ibmr, list, unmap_list) {
+ if (ibmr->sg_dma_len)
+ ret |= rds_ib_post_inv(ibmr);
+ }
+ if (ret)
+ pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);
+
+ /* Now we can destroy the DMA mapping and unpin any pages */
+ list_for_each_entry_safe(ibmr, next, list, unmap_list) {
+ *unpinned += ibmr->sg_len;
+ frmr = &ibmr->u.frmr;
+ __rds_ib_teardown_mr(ibmr);
+ if (freed < goal || frmr->fr_state == FRMR_IS_STALE) {
+ /* Don't de-allocate if the MR is not free yet */
+ if (frmr->fr_state == FRMR_IS_INUSE)
+ continue;
+
+ if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
+ rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
+ else
+ rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
+ list_del(&ibmr->unmap_list);
+ if (frmr->mr)
+ ib_dereg_mr(frmr->mr);
+ kfree(ibmr);
+ freed++;
+ }
+ }
+ *nfreed = freed;
+}
+
+struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
+ struct rds_ib_connection *ic,
+ struct scatterlist *sg,
+ unsigned long nents, u32 *key)
+{
+ struct rds_ib_mr *ibmr = NULL;
+ struct rds_ib_frmr *frmr;
+ int ret;
+
+ do {
+ if (ibmr)
+ rds_ib_free_frmr(ibmr, true);
+ ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
+ if (IS_ERR(ibmr))
+ return ibmr;
+ frmr = &ibmr->u.frmr;
+ } while (frmr->fr_state != FRMR_IS_FREE);
+
+ ibmr->ic = ic;
+ ibmr->device = rds_ibdev;
+ ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
+ if (ret == 0) {
+ *key = frmr->mr->rkey;
+ } else {
+ rds_ib_free_frmr(ibmr, false);
+ ibmr = ERR_PTR(ret);
+ }
+
+ return ibmr;
+}
+
+void rds_ib_free_frmr_list(struct rds_ib_mr *ibmr)
+{
+ struct rds_ib_mr_pool *pool = ibmr->pool;
+ struct rds_ib_frmr *frmr = &ibmr->u.frmr;
+
+ if (frmr->fr_state == FRMR_IS_STALE)
+ llist_add(&ibmr->llnode, &pool->drop_list);
+ else
+ llist_add(&ibmr->llnode, &pool->free_list);
+}
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
new file mode 100644
index 000000000000..1c754f4acbe5
--- /dev/null
+++ b/net/rds/ib_mr.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2016 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _RDS_IB_MR_H
+#define _RDS_IB_MR_H
+
+#include <linux/kernel.h>
+
+#include "rds.h"
+#include "ib.h"
+
+#define RDS_MR_1M_POOL_SIZE (8192 / 2)
+#define RDS_MR_1M_MSG_SIZE 256
+#define RDS_MR_8K_MSG_SIZE 2
+#define RDS_MR_8K_SCALE (256 / (RDS_MR_8K_MSG_SIZE + 1))
+#define RDS_MR_8K_POOL_SIZE (RDS_MR_8K_SCALE * (8192 / 2))
+
+struct rds_ib_fmr {
+ struct ib_fmr *fmr;
+ u64 *dma;
+};
+
+enum rds_ib_fr_state {
+ FRMR_IS_FREE, /* mr invalidated & ready for use */
+ FRMR_IS_INUSE, /* mr is in use or used & can be invalidated */
+ FRMR_IS_STALE, /* Stale MR and needs to be dropped */
+};
+
+struct rds_ib_frmr {
+ struct ib_mr *mr;
+ enum rds_ib_fr_state fr_state;
+ bool fr_inv;
+ struct ib_send_wr fr_wr;
+ unsigned int dma_npages;
+ unsigned int sg_byte_len;
+};
+
+/* This is stored as mr->r_trans_private. */
+struct rds_ib_mr {
+ struct rds_ib_device *device;
+ struct rds_ib_mr_pool *pool;
+ struct rds_ib_connection *ic;
+
+ struct llist_node llnode;
+
+ /* unmap_list is for freeing */
+ struct list_head unmap_list;
+ unsigned int remap_count;
+
+ struct scatterlist *sg;
+ unsigned int sg_len;
+ int sg_dma_len;
+
+ union {
+ struct rds_ib_fmr fmr;
+ struct rds_ib_frmr frmr;
+ } u;
+};
+
+/* Our own little MR pool */
+struct rds_ib_mr_pool {
+ unsigned int pool_type;
+ struct mutex flush_lock; /* serialize fmr invalidate */
+ struct delayed_work flush_worker; /* flush worker */
+
+ atomic_t item_count; /* total # of MRs */
+ atomic_t dirty_count; /* # dirty of MRs */
+
+ struct llist_head drop_list; /* MRs not reached max_maps */
+ struct llist_head free_list; /* unused MRs */
+ struct llist_head clean_list; /* unused & unmapped MRs */
+ wait_queue_head_t flush_wait;
+
+ atomic_t free_pinned; /* memory pinned by free MRs */
+ unsigned long max_items;
+ unsigned long max_items_soft;
+ unsigned long max_free_pinned;
+ struct ib_fmr_attr fmr_attr;
+ bool use_fastreg;
+};
+
+extern struct workqueue_struct *rds_ib_mr_wq;
+extern unsigned int rds_ib_mr_1m_pool_size;
+extern unsigned int rds_ib_mr_8k_pool_size;
+extern bool prefer_frmr;
+
+struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
+ int npages);
+void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
+ struct rds_info_rdma_connection *iinfo);
+void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
+void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
+ struct rds_sock *rs, u32 *key_ret);
+void rds_ib_sync_mr(void *trans_private, int dir);
+void rds_ib_free_mr(void *trans_private, int invalidate);
+void rds_ib_flush_mrs(void);
+int rds_ib_mr_init(void);
+void rds_ib_mr_exit(void);
+
+void __rds_ib_teardown_mr(struct rds_ib_mr *);
+void rds_ib_teardown_mr(struct rds_ib_mr *);
+struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *, int);
+int rds_ib_map_fmr(struct rds_ib_device *, struct rds_ib_mr *,
+ struct scatterlist *, unsigned int);
+struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *);
+int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *, int, struct rds_ib_mr **);
+struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *, struct scatterlist *,
+ unsigned long, u32 *);
+struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *);
+void rds_ib_unreg_fmr(struct list_head *, unsigned int *,
+ unsigned long *, unsigned int);
+void rds_ib_free_fmr_list(struct rds_ib_mr *);
+struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
+ struct rds_ib_connection *ic,
+ struct scatterlist *sg,
+ unsigned long nents, u32 *key);
+void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
+ unsigned long *unpinned, unsigned int goal);
+void rds_ib_free_frmr_list(struct rds_ib_mr *);
+#endif
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index a2340748ec86..f7164ac1ffc1 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -35,78 +35,13 @@
#include <linux/rculist.h>
#include <linux/llist.h>
-#include "rds.h"
-#include "ib.h"
+#include "ib_mr.h"
+
+struct workqueue_struct *rds_ib_mr_wq;
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
#define CLEAN_LIST_BUSY_BIT 0
-/*
- * This is stored as mr->r_trans_private.
- */
-struct rds_ib_mr {
- struct rds_ib_device *device;
- struct rds_ib_mr_pool *pool;
- struct ib_fmr *fmr;
-
- struct llist_node llnode;
-
- /* unmap_list is for freeing */
- struct list_head unmap_list;
- unsigned int remap_count;
-
- struct scatterlist *sg;
- unsigned int sg_len;
- u64 *dma;
- int sg_dma_len;
-};
-
-/*
- * Our own little FMR pool
- */
-struct rds_ib_mr_pool {
- unsigned int pool_type;
- struct mutex flush_lock; /* serialize fmr invalidate */
- struct delayed_work flush_worker; /* flush worker */
-
- atomic_t item_count; /* total # of MRs */
- atomic_t dirty_count; /* # dirty of MRs */
-
- struct llist_head drop_list; /* MRs that have reached their max_maps limit */
- struct llist_head free_list; /* unused MRs */
- struct llist_head clean_list; /* global unused & unamapped MRs */
- wait_queue_head_t flush_wait;
-
- atomic_t free_pinned; /* memory pinned by free MRs */
- unsigned long max_items;
- unsigned long max_items_soft;
- unsigned long max_free_pinned;
- struct ib_fmr_attr fmr_attr;
-};
-
-static struct workqueue_struct *rds_ib_fmr_wq;
-
-int rds_ib_fmr_init(void)
-{
- rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
- if (!rds_ib_fmr_wq)
- return -ENOMEM;
- return 0;
-}
-
-/* By the time this is called all the IB devices should have been torn down and
- * had their pools freed. As each pool is freed its work struct is waited on,
- * so the pool flushing work queue should be idle by the time we get here.
- */
-void rds_ib_fmr_exit(void)
-{
- destroy_workqueue(rds_ib_fmr_wq);
-}
-
-static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
-static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
-static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
-
static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
{
struct rds_ib_device *rds_ibdev;
@@ -235,41 +170,6 @@ void rds_ib_destroy_nodev_conns(void)
rds_conn_destroy(ic->conn);
}
-struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
- int pool_type)
-{
- struct rds_ib_mr_pool *pool;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool)
- return ERR_PTR(-ENOMEM);
-
- pool->pool_type = pool_type;
- init_llist_head(&pool->free_list);
- init_llist_head(&pool->drop_list);
- init_llist_head(&pool->clean_list);
- mutex_init(&pool->flush_lock);
- init_waitqueue_head(&pool->flush_wait);
- INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
-
- if (pool_type == RDS_IB_MR_1M_POOL) {
- /* +1 allows for unaligned MRs */
- pool->fmr_attr.max_pages = RDS_FMR_1M_MSG_SIZE + 1;
- pool->max_items = RDS_FMR_1M_POOL_SIZE;
- } else {
- /* pool_type == RDS_IB_MR_8K_POOL */
- pool->fmr_attr.max_pages = RDS_FMR_8K_MSG_SIZE + 1;
- pool->max_items = RDS_FMR_8K_POOL_SIZE;
- }
-
- pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
- pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
- pool->fmr_attr.page_shift = PAGE_SHIFT;
- pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
-
- return pool;
-}
-
void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
{
struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
@@ -278,16 +178,7 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co
iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
}
-void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
-{
- cancel_delayed_work_sync(&pool->flush_worker);
- rds_ib_flush_mr_pool(pool, 1, NULL);
- WARN_ON(atomic_read(&pool->item_count));
- WARN_ON(atomic_read(&pool->free_pinned));
- kfree(pool);
-}
-
-static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
+struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
struct llist_node *ret;
@@ -297,8 +188,13 @@ static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
flag = this_cpu_ptr(&clean_list_grace);
set_bit(CLEAN_LIST_BUSY_BIT, flag);
ret = llist_del_first(&pool->clean_list);
- if (ret)
+ if (ret) {
ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
+ else
+ rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
+ }
clear_bit(CLEAN_LIST_BUSY_BIT, flag);
preempt_enable();
@@ -317,190 +213,6 @@ static inline void wait_clean_list_grace(void)
}
}
-static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev,
- int npages)
-{
- struct rds_ib_mr_pool *pool;
- struct rds_ib_mr *ibmr = NULL;
- int err = 0, iter = 0;
-
- if (npages <= RDS_FMR_8K_MSG_SIZE)
- pool = rds_ibdev->mr_8k_pool;
- else
- pool = rds_ibdev->mr_1m_pool;
-
- if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
-
- /* Switch pools if one of the pool is reaching upper limit */
- if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- pool = rds_ibdev->mr_1m_pool;
- else
- pool = rds_ibdev->mr_8k_pool;
- }
-
- while (1) {
- ibmr = rds_ib_reuse_fmr(pool);
- if (ibmr)
- return ibmr;
-
- /* No clean MRs - now we have the choice of either
- * allocating a fresh MR up to the limit imposed by the
- * driver, or flush any dirty unused MRs.
- * We try to avoid stalling in the send path if possible,
- * so we allocate as long as we're allowed to.
- *
- * We're fussy with enforcing the FMR limit, though. If the driver
- * tells us we can't use more than N fmrs, we shouldn't start
- * arguing with it */
- if (atomic_inc_return(&pool->item_count) <= pool->max_items)
- break;
-
- atomic_dec(&pool->item_count);
-
- if (++iter > 2) {
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
- return ERR_PTR(-EAGAIN);
- }
-
- /* We do have some empty MRs. Flush them out. */
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
- rds_ib_flush_mr_pool(pool, 0, &ibmr);
- if (ibmr)
- return ibmr;
- }
-
- ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
- if (!ibmr) {
- err = -ENOMEM;
- goto out_no_cigar;
- }
-
- ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
- (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE|
- IB_ACCESS_REMOTE_ATOMIC),
- &pool->fmr_attr);
- if (IS_ERR(ibmr->fmr)) {
- err = PTR_ERR(ibmr->fmr);
- ibmr->fmr = NULL;
- printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
- goto out_no_cigar;
- }
-
- ibmr->pool = pool;
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
-
- return ibmr;
-
-out_no_cigar:
- if (ibmr) {
- if (ibmr->fmr)
- ib_dealloc_fmr(ibmr->fmr);
- kfree(ibmr);
- }
- atomic_dec(&pool->item_count);
- return ERR_PTR(err);
-}
-
-static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
- struct scatterlist *sg, unsigned int nents)
-{
- struct ib_device *dev = rds_ibdev->dev;
- struct scatterlist *scat = sg;
- u64 io_addr = 0;
- u64 *dma_pages;
- u32 len;
- int page_cnt, sg_dma_len;
- int i, j;
- int ret;
-
- sg_dma_len = ib_dma_map_sg(dev, sg, nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!sg_dma_len)) {
- printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
- return -EBUSY;
- }
-
- len = 0;
- page_cnt = 0;
-
- for (i = 0; i < sg_dma_len; ++i) {
- unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
- u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
-
- if (dma_addr & ~PAGE_MASK) {
- if (i > 0)
- return -EINVAL;
- else
- ++page_cnt;
- }
- if ((dma_addr + dma_len) & ~PAGE_MASK) {
- if (i < sg_dma_len - 1)
- return -EINVAL;
- else
- ++page_cnt;
- }
-
- len += dma_len;
- }
-
- page_cnt += len >> PAGE_SHIFT;
- if (page_cnt > ibmr->pool->fmr_attr.max_pages)
- return -EINVAL;
-
- dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
- rdsibdev_to_node(rds_ibdev));
- if (!dma_pages)
- return -ENOMEM;
-
- page_cnt = 0;
- for (i = 0; i < sg_dma_len; ++i) {
- unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
- u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
-
- for (j = 0; j < dma_len; j += PAGE_SIZE)
- dma_pages[page_cnt++] =
- (dma_addr & PAGE_MASK) + j;
- }
-
- ret = ib_map_phys_fmr(ibmr->fmr,
- dma_pages, page_cnt, io_addr);
- if (ret)
- goto out;
-
- /* Success - we successfully remapped the MR, so we can
- * safely tear down the old mapping. */
- rds_ib_teardown_mr(ibmr);
-
- ibmr->sg = scat;
- ibmr->sg_len = nents;
- ibmr->sg_dma_len = sg_dma_len;
- ibmr->remap_count++;
-
- if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
- ret = 0;
-
-out:
- kfree(dma_pages);
-
- return ret;
-}
-
void rds_ib_sync_mr(void *trans_private, int direction)
{
struct rds_ib_mr *ibmr = trans_private;
@@ -518,7 +230,7 @@ void rds_ib_sync_mr(void *trans_private, int direction)
}
}
-static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
+void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
{
struct rds_ib_device *rds_ibdev = ibmr->device;
@@ -549,7 +261,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
}
}
-static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
+void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
{
unsigned int pinned = ibmr->sg_len;
@@ -623,17 +335,15 @@ static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
* If the number of MRs allocated exceeds the limit, we also try
* to free as many MRs as needed to get back to this limit.
*/
-static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
- int free_all, struct rds_ib_mr **ibmr_ret)
+int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
+ int free_all, struct rds_ib_mr **ibmr_ret)
{
- struct rds_ib_mr *ibmr, *next;
+ struct rds_ib_mr *ibmr;
struct llist_node *clean_nodes;
struct llist_node *clean_tail;
LIST_HEAD(unmap_list);
- LIST_HEAD(fmr_list);
unsigned long unpinned = 0;
unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
- int ret = 0;
if (pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
@@ -643,7 +353,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
if (ibmr_ret) {
DEFINE_WAIT(wait);
while (!mutex_trylock(&pool->flush_lock)) {
- ibmr = rds_ib_reuse_fmr(pool);
+ ibmr = rds_ib_reuse_mr(pool);
if (ibmr) {
*ibmr_ret = ibmr;
finish_wait(&pool->flush_wait, &wait);
@@ -655,7 +365,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
if (llist_empty(&pool->clean_list))
schedule();
- ibmr = rds_ib_reuse_fmr(pool);
+ ibmr = rds_ib_reuse_mr(pool);
if (ibmr) {
*ibmr_ret = ibmr;
finish_wait(&pool->flush_wait, &wait);
@@ -667,7 +377,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
mutex_lock(&pool->flush_lock);
if (ibmr_ret) {
- ibmr = rds_ib_reuse_fmr(pool);
+ ibmr = rds_ib_reuse_mr(pool);
if (ibmr) {
*ibmr_ret = ibmr;
goto out;
@@ -687,30 +397,10 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
if (list_empty(&unmap_list))
goto out;
- /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
- list_for_each_entry(ibmr, &unmap_list, unmap_list)
- list_add(&ibmr->fmr->list, &fmr_list);
-
- ret = ib_unmap_fmr(&fmr_list);
- if (ret)
- printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
-
- /* Now we can destroy the DMA mapping and unpin any pages */
- list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
- unpinned += ibmr->sg_len;
- __rds_ib_teardown_mr(ibmr);
- if (nfreed < free_goal ||
- ibmr->remap_count >= pool->fmr_attr.max_maps) {
- if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
- list_del(&ibmr->unmap_list);
- ib_dealloc_fmr(ibmr->fmr);
- kfree(ibmr);
- nfreed++;
- }
- }
+ if (pool->use_fastreg)
+ rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
+ else
+ rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
if (!list_empty(&unmap_list)) {
/* we have to make sure that none of the things we're about
@@ -743,7 +433,47 @@ out:
if (waitqueue_active(&pool->flush_wait))
wake_up(&pool->flush_wait);
out_nolock:
- return ret;
+ return 0;
+}
+
+struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
+{
+ struct rds_ib_mr *ibmr = NULL;
+ int iter = 0;
+
+ if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
+ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+ while (1) {
+ ibmr = rds_ib_reuse_mr(pool);
+ if (ibmr)
+ return ibmr;
+
+ if (atomic_inc_return(&pool->item_count) <= pool->max_items)
+ break;
+
+ atomic_dec(&pool->item_count);
+
+ if (++iter > 2) {
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
+ else
+ rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ /* We do have some empty MRs. Flush them out. */
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
+ else
+ rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
+
+ rds_ib_flush_mr_pool(pool, 0, &ibmr);
+ if (ibmr)
+ return ibmr;
+ }
+
+ return ibmr;
}
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
@@ -762,10 +492,10 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
/* Return it to the pool's free list */
- if (ibmr->remap_count >= pool->fmr_attr.max_maps)
- llist_add(&ibmr->llnode, &pool->drop_list);
+ if (rds_ibdev->use_fastreg)
+ rds_ib_free_frmr_list(ibmr);
else
- llist_add(&ibmr->llnode, &pool->free_list);
+ rds_ib_free_fmr_list(ibmr);
atomic_add(ibmr->sg_len, &pool->free_pinned);
atomic_inc(&pool->dirty_count);
@@ -773,7 +503,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 5)
- queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
if (invalidate) {
if (likely(!in_interrupt())) {
@@ -782,7 +512,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
/* We get here if the user created a MR marked
* as use_once and invalidate at the same time.
*/
- queue_delayed_work(rds_ib_fmr_wq,
+ queue_delayed_work(rds_ib_mr_wq,
&pool->flush_worker, 10);
}
}
@@ -810,6 +540,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
{
struct rds_ib_device *rds_ibdev;
struct rds_ib_mr *ibmr = NULL;
+ struct rds_ib_connection *ic = rs->rs_conn->c_transport_data;
int ret;
rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
@@ -823,29 +554,81 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
goto out;
}
- ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
- if (IS_ERR(ibmr)) {
- rds_ib_dev_put(rds_ibdev);
- return ibmr;
- }
-
- ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
- if (ret == 0)
- *key_ret = ibmr->fmr->rkey;
+ if (rds_ibdev->use_fastreg)
+ ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
else
- printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
-
- ibmr->device = rds_ibdev;
- rds_ibdev = NULL;
+ ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
+ if (ibmr)
+ rds_ibdev = NULL;
out:
- if (ret) {
- if (ibmr)
- rds_ib_free_mr(ibmr, 0);
- ibmr = ERR_PTR(ret);
- }
+ if (!ibmr)
+ pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
+
if (rds_ibdev)
rds_ib_dev_put(rds_ibdev);
+
return ibmr;
}
+void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
+{
+ cancel_delayed_work_sync(&pool->flush_worker);
+ rds_ib_flush_mr_pool(pool, 1, NULL);
+ WARN_ON(atomic_read(&pool->item_count));
+ WARN_ON(atomic_read(&pool->free_pinned));
+ kfree(pool);
+}
+
+struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
+ int pool_type)
+{
+ struct rds_ib_mr_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->pool_type = pool_type;
+ init_llist_head(&pool->free_list);
+ init_llist_head(&pool->drop_list);
+ init_llist_head(&pool->clean_list);
+ mutex_init(&pool->flush_lock);
+ init_waitqueue_head(&pool->flush_wait);
+ INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
+
+ if (pool_type == RDS_IB_MR_1M_POOL) {
+ /* +1 allows for unaligned MRs */
+ pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
+ pool->max_items = RDS_MR_1M_POOL_SIZE;
+ } else {
+ /* pool_type == RDS_IB_MR_8K_POOL */
+ pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
+ pool->max_items = RDS_MR_8K_POOL_SIZE;
+ }
+
+ pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
+ pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
+ pool->fmr_attr.page_shift = PAGE_SHIFT;
+ pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
+ pool->use_fastreg = rds_ibdev->use_fastreg;
+
+ return pool;
+}
+
+int rds_ib_mr_init(void)
+{
+ rds_ib_mr_wq = create_workqueue("rds_mr_flushd");
+ if (!rds_ib_mr_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/* By the time this is called all the IB devices should have been torn down and
+ * had their pools freed. As each pool is freed its work struct is waited on,
+ * so the pool flushing work queue should be idle by the time we get here.
+ */
+void rds_ib_mr_exit(void)
+{
+ destroy_workqueue(rds_ib_mr_wq);
+}
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index eac30bf486d7..f27d2c82b036 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -195,7 +195,7 @@ void rds_ib_send_init_ring(struct rds_ib_connection *ic)
send->s_op = NULL;
- send->s_wr.wr_id = i | RDS_IB_SEND_OP;
+ send->s_wr.wr_id = i;
send->s_wr.sg_list = send->s_sge;
send->s_wr.ex.imm_data = 0;
@@ -263,9 +263,7 @@ void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
oldest = rds_ib_ring_oldest(&ic->i_send_ring);
- completed = rds_ib_ring_completed(&ic->i_send_ring,
- (wc->wr_id & ~RDS_IB_SEND_OP),
- oldest);
+ completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest);
for (i = 0; i < completed; i++) {
send = &ic->i_sends[oldest];
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index d77e04473056..7e78dca1f252 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -73,6 +73,8 @@ static const char *const rds_ib_stat_names[] = {
"ib_rdma_mr_1m_pool_flush",
"ib_rdma_mr_1m_pool_wait",
"ib_rdma_mr_1m_pool_depleted",
+ "ib_rdma_mr_8k_reused",
+ "ib_rdma_mr_1m_reused",
"ib_atomic_cswp",
"ib_atomic_fadd",
};
diff --git a/net/rds/iw.c b/net/rds/iw.c
deleted file mode 100644
index f4a9fff829e0..000000000000
--- a/net/rds/iw.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Copyright (c) 2006 Oracle. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/kernel.h>
-#include <linux/in.h>
-#include <linux/if.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include "rds.h"
-#include "iw.h"
-
-unsigned int fastreg_pool_size = RDS_FASTREG_POOL_SIZE;
-unsigned int fastreg_message_size = RDS_FASTREG_SIZE + 1; /* +1 allows for unaligned MRs */
-
-module_param(fastreg_pool_size, int, 0444);
-MODULE_PARM_DESC(fastreg_pool_size, " Max number of fastreg MRs per device");
-module_param(fastreg_message_size, int, 0444);
-MODULE_PARM_DESC(fastreg_message_size, " Max size of a RDMA transfer (fastreg MRs)");
-
-struct list_head rds_iw_devices;
-
-/* NOTE: if also grabbing iwdev lock, grab this first */
-DEFINE_SPINLOCK(iw_nodev_conns_lock);
-LIST_HEAD(iw_nodev_conns);
-
-static void rds_iw_add_one(struct ib_device *device)
-{
- struct rds_iw_device *rds_iwdev;
-
- /* Only handle iwarp devices */
- if (device->node_type != RDMA_NODE_RNIC)
- return;
-
- rds_iwdev = kmalloc(sizeof *rds_iwdev, GFP_KERNEL);
- if (!rds_iwdev)
- return;
-
- spin_lock_init(&rds_iwdev->spinlock);
-
- rds_iwdev->dma_local_lkey = !!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY);
- rds_iwdev->max_wrs = device->attrs.max_qp_wr;
- rds_iwdev->max_sge = min(device->attrs.max_sge, RDS_IW_MAX_SGE);
-
- rds_iwdev->dev = device;
- rds_iwdev->pd = ib_alloc_pd(device);
- if (IS_ERR(rds_iwdev->pd))
- goto free_dev;
-
- if (!rds_iwdev->dma_local_lkey) {
- rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd,
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(rds_iwdev->mr))
- goto err_pd;
- } else
- rds_iwdev->mr = NULL;
-
- rds_iwdev->mr_pool = rds_iw_create_mr_pool(rds_iwdev);
- if (IS_ERR(rds_iwdev->mr_pool)) {
- rds_iwdev->mr_pool = NULL;
- goto err_mr;
- }
-
- INIT_LIST_HEAD(&rds_iwdev->cm_id_list);
- INIT_LIST_HEAD(&rds_iwdev->conn_list);
- list_add_tail(&rds_iwdev->list, &rds_iw_devices);
-
- ib_set_client_data(device, &rds_iw_client, rds_iwdev);
- return;
-
-err_mr:
- if (rds_iwdev->mr)
- ib_dereg_mr(rds_iwdev->mr);
-err_pd:
- ib_dealloc_pd(rds_iwdev->pd);
-free_dev:
- kfree(rds_iwdev);
-}
-
-static void rds_iw_remove_one(struct ib_device *device, void *client_data)
-{
- struct rds_iw_device *rds_iwdev = client_data;
- struct rds_iw_cm_id *i_cm_id, *next;
-
- if (!rds_iwdev)
- return;
-
- spin_lock_irq(&rds_iwdev->spinlock);
- list_for_each_entry_safe(i_cm_id, next, &rds_iwdev->cm_id_list, list) {
- list_del(&i_cm_id->list);
- kfree(i_cm_id);
- }
- spin_unlock_irq(&rds_iwdev->spinlock);
-
- rds_iw_destroy_conns(rds_iwdev);
-
- if (rds_iwdev->mr_pool)
- rds_iw_destroy_mr_pool(rds_iwdev->mr_pool);
-
- if (rds_iwdev->mr)
- ib_dereg_mr(rds_iwdev->mr);
-
- ib_dealloc_pd(rds_iwdev->pd);
-
- list_del(&rds_iwdev->list);
- kfree(rds_iwdev);
-}
-
-struct ib_client rds_iw_client = {
- .name = "rds_iw",
- .add = rds_iw_add_one,
- .remove = rds_iw_remove_one
-};
-
-static int rds_iw_conn_info_visitor(struct rds_connection *conn,
- void *buffer)
-{
- struct rds_info_rdma_connection *iinfo = buffer;
- struct rds_iw_connection *ic;
-
- /* We will only ever look at IB transports */
- if (conn->c_trans != &rds_iw_transport)
- return 0;
-
- iinfo->src_addr = conn->c_laddr;
- iinfo->dst_addr = conn->c_faddr;
-
- memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
- memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
- if (rds_conn_state(conn) == RDS_CONN_UP) {
- struct rds_iw_device *rds_iwdev;
- struct rdma_dev_addr *dev_addr;
-
- ic = conn->c_transport_data;
- dev_addr = &ic->i_cm_id->route.addr.dev_addr;
-
- rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
- rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
-
- rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
- iinfo->max_send_wr = ic->i_send_ring.w_nr;
- iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
- iinfo->max_send_sge = rds_iwdev->max_sge;
- rds_iw_get_mr_info(rds_iwdev, iinfo);
- }
- return 1;
-}
-
-static void rds_iw_ic_info(struct socket *sock, unsigned int len,
- struct rds_info_iterator *iter,
- struct rds_info_lengths *lens)
-{
- rds_for_each_conn_info(sock, len, iter, lens,
- rds_iw_conn_info_visitor,
- sizeof(struct rds_info_rdma_connection));
-}
-
-
-/*
- * Early RDS/IB was built to only bind to an address if there is an IPoIB
- * device with that address set.
- *
- * If it were me, I'd advocate for something more flexible. Sending and
- * receiving should be device-agnostic. Transports would try and maintain
- * connections between peers who have messages queued. Userspace would be
- * allowed to influence which paths have priority. We could call userspace
- * asserting this policy "routing".
- */
-static int rds_iw_laddr_check(struct net *net, __be32 addr)
-{
- int ret;
- struct rdma_cm_id *cm_id;
- struct sockaddr_in sin;
-
- /* Create a CMA ID and try to bind it. This catches both
- * IB and iWARP capable NICs.
- */
- cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
- if (IS_ERR(cm_id))
- return PTR_ERR(cm_id);
-
- memset(&sin, 0, sizeof(sin));
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = addr;
-
- /* rdma_bind_addr will only succeed for IB & iWARP devices */
- ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
- /* due to this, we will claim to support IB devices unless we
- check node_type. */
- if (ret || !cm_id->device ||
- cm_id->device->node_type != RDMA_NODE_RNIC)
- ret = -EADDRNOTAVAIL;
-
- rdsdebug("addr %pI4 ret %d node type %d\n",
- &addr, ret,
- cm_id->device ? cm_id->device->node_type : -1);
-
- rdma_destroy_id(cm_id);
-
- return ret;
-}
-
-void rds_iw_exit(void)
-{
- rds_info_deregister_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
- rds_iw_destroy_nodev_conns();
- ib_unregister_client(&rds_iw_client);
- rds_iw_sysctl_exit();
- rds_iw_recv_exit();
- rds_trans_unregister(&rds_iw_transport);
-}
-
-struct rds_transport rds_iw_transport = {
- .laddr_check = rds_iw_laddr_check,
- .xmit_complete = rds_iw_xmit_complete,
- .xmit = rds_iw_xmit,
- .xmit_rdma = rds_iw_xmit_rdma,
- .recv = rds_iw_recv,
- .conn_alloc = rds_iw_conn_alloc,
- .conn_free = rds_iw_conn_free,
- .conn_connect = rds_iw_conn_connect,
- .conn_shutdown = rds_iw_conn_shutdown,
- .inc_copy_to_user = rds_iw_inc_copy_to_user,
- .inc_free = rds_iw_inc_free,
- .cm_initiate_connect = rds_iw_cm_initiate_connect,
- .cm_handle_connect = rds_iw_cm_handle_connect,
- .cm_connect_complete = rds_iw_cm_connect_complete,
- .stats_info_copy = rds_iw_stats_info_copy,
- .exit = rds_iw_exit,
- .get_mr = rds_iw_get_mr,
- .sync_mr = rds_iw_sync_mr,
- .free_mr = rds_iw_free_mr,
- .flush_mrs = rds_iw_flush_mrs,
- .t_owner = THIS_MODULE,
- .t_name = "iwarp",
- .t_type = RDS_TRANS_IWARP,
- .t_prefer_loopback = 1,
-};
-
-int rds_iw_init(void)
-{
- int ret;
-
- INIT_LIST_HEAD(&rds_iw_devices);
-
- ret = ib_register_client(&rds_iw_client);
- if (ret)
- goto out;
-
- ret = rds_iw_sysctl_init();
- if (ret)
- goto out_ibreg;
-
- ret = rds_iw_recv_init();
- if (ret)
- goto out_sysctl;
-
- ret = rds_trans_register(&rds_iw_transport);
- if (ret)
- goto out_recv;
-
- rds_info_register_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
-
- goto out;
-
-out_recv:
- rds_iw_recv_exit();
-out_sysctl:
- rds_iw_sysctl_exit();
-out_ibreg:
- ib_unregister_client(&rds_iw_client);
-out:
- return ret;
-}
-
-MODULE_LICENSE("GPL");
-
diff --git a/net/rds/iw.h b/net/rds/iw.h
deleted file mode 100644
index 5af01d1758b3..000000000000
--- a/net/rds/iw.h
+++ /dev/null
@@ -1,398 +0,0 @@
-#ifndef _RDS_IW_H
-#define _RDS_IW_H
-
-#include <linux/interrupt.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/rdma_cm.h>
-#include "rds.h"
-#include "rdma_transport.h"
-
-#define RDS_FASTREG_SIZE 20
-#define RDS_FASTREG_POOL_SIZE 2048
-
-#define RDS_IW_MAX_SGE 8
-#define RDS_IW_RECV_SGE 2
-
-#define RDS_IW_DEFAULT_RECV_WR 1024
-#define RDS_IW_DEFAULT_SEND_WR 256
-
-#define RDS_IW_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
-
-extern struct list_head rds_iw_devices;
-
-/*
- * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
- * try and minimize the amount of memory tied up both the device and
- * socket receive queues.
- */
-/* page offset of the final full frag that fits in the page */
-#define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
-struct rds_page_frag {
- struct list_head f_item;
- struct page *f_page;
- unsigned long f_offset;
- dma_addr_t f_mapped;
-};
-
-struct rds_iw_incoming {
- struct list_head ii_frags;
- struct rds_incoming ii_inc;
-};
-
-struct rds_iw_connect_private {
- /* Add new fields at the end, and don't permute existing fields. */
- __be32 dp_saddr;
- __be32 dp_daddr;
- u8 dp_protocol_major;
- u8 dp_protocol_minor;
- __be16 dp_protocol_minor_mask; /* bitmask */
- __be32 dp_reserved1;
- __be64 dp_ack_seq;
- __be32 dp_credit; /* non-zero enables flow ctl */
-};
-
-struct rds_iw_scatterlist {
- struct scatterlist *list;
- unsigned int len;
- int dma_len;
- unsigned int dma_npages;
- unsigned int bytes;
-};
-
-struct rds_iw_mapping {
- spinlock_t m_lock; /* protect the mapping struct */
- struct list_head m_list;
- struct rds_iw_mr *m_mr;
- uint32_t m_rkey;
- struct rds_iw_scatterlist m_sg;
-};
-
-struct rds_iw_send_work {
- struct rds_message *s_rm;
-
- /* We should really put these into a union: */
- struct rm_rdma_op *s_op;
- struct rds_iw_mapping *s_mapping;
- struct ib_mr *s_mr;
- unsigned char s_remap_count;
-
- union {
- struct ib_send_wr s_send_wr;
- struct ib_rdma_wr s_rdma_wr;
- struct ib_reg_wr s_reg_wr;
- };
- struct ib_sge s_sge[RDS_IW_MAX_SGE];
- unsigned long s_queued;
-};
-
-struct rds_iw_recv_work {
- struct rds_iw_incoming *r_iwinc;
- struct rds_page_frag *r_frag;
- struct ib_recv_wr r_wr;
- struct ib_sge r_sge[2];
-};
-
-struct rds_iw_work_ring {
- u32 w_nr;
- u32 w_alloc_ptr;
- u32 w_alloc_ctr;
- u32 w_free_ptr;
- atomic_t w_free_ctr;
-};
-
-struct rds_iw_device;
-
-struct rds_iw_connection {
-
- struct list_head iw_node;
- struct rds_iw_device *rds_iwdev;
- struct rds_connection *conn;
-
- /* alphabet soup, IBTA style */
- struct rdma_cm_id *i_cm_id;
- struct ib_pd *i_pd;
- struct ib_mr *i_mr;
- struct ib_cq *i_send_cq;
- struct ib_cq *i_recv_cq;
-
- /* tx */
- struct rds_iw_work_ring i_send_ring;
- struct rds_message *i_rm;
- struct rds_header *i_send_hdrs;
- u64 i_send_hdrs_dma;
- struct rds_iw_send_work *i_sends;
-
- /* rx */
- struct tasklet_struct i_recv_tasklet;
- struct mutex i_recv_mutex;
- struct rds_iw_work_ring i_recv_ring;
- struct rds_iw_incoming *i_iwinc;
- u32 i_recv_data_rem;
- struct rds_header *i_recv_hdrs;
- u64 i_recv_hdrs_dma;
- struct rds_iw_recv_work *i_recvs;
- struct rds_page_frag i_frag;
- u64 i_ack_recv; /* last ACK received */
-
- /* sending acks */
- unsigned long i_ack_flags;
-#ifdef KERNEL_HAS_ATOMIC64
- atomic64_t i_ack_next; /* next ACK to send */
-#else
- spinlock_t i_ack_lock; /* protect i_ack_next */
- u64 i_ack_next; /* next ACK to send */
-#endif
- struct rds_header *i_ack;
- struct ib_send_wr i_ack_wr;
- struct ib_sge i_ack_sge;
- u64 i_ack_dma;
- unsigned long i_ack_queued;
-
- /* Flow control related information
- *
- * Our algorithm uses a pair variables that we need to access
- * atomically - one for the send credits, and one posted
- * recv credits we need to transfer to remote.
- * Rather than protect them using a slow spinlock, we put both into
- * a single atomic_t and update it using cmpxchg
- */
- atomic_t i_credits;
-
- /* Protocol version specific information */
- unsigned int i_flowctl:1; /* enable/disable flow ctl */
- unsigned int i_dma_local_lkey:1;
- unsigned int i_fastreg_posted:1; /* fastreg posted on this connection */
- /* Batched completions */
- unsigned int i_unsignaled_wrs;
- long i_unsignaled_bytes;
-};
-
-/* This assumes that atomic_t is at least 32 bits */
-#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
-#define IB_GET_POST_CREDITS(v) ((v) >> 16)
-#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
-#define IB_SET_POST_CREDITS(v) ((v) << 16)
-
-struct rds_iw_cm_id {
- struct list_head list;
- struct rdma_cm_id *cm_id;
-};
-
-struct rds_iw_device {
- struct list_head list;
- struct list_head cm_id_list;
- struct list_head conn_list;
- struct ib_device *dev;
- struct ib_pd *pd;
- struct ib_mr *mr;
- struct rds_iw_mr_pool *mr_pool;
- int max_sge;
- unsigned int max_wrs;
- unsigned int dma_local_lkey:1;
- spinlock_t spinlock; /* protect the above */
-};
-
-/* bits for i_ack_flags */
-#define IB_ACK_IN_FLIGHT 0
-#define IB_ACK_REQUESTED 1
-
-/* Magic WR_ID for ACKs */
-#define RDS_IW_ACK_WR_ID ((u64)0xffffffffffffffffULL)
-#define RDS_IW_REG_WR_ID ((u64)0xefefefefefefefefULL)
-#define RDS_IW_LOCAL_INV_WR_ID ((u64)0xdfdfdfdfdfdfdfdfULL)
-
-struct rds_iw_statistics {
- uint64_t s_iw_connect_raced;
- uint64_t s_iw_listen_closed_stale;
- uint64_t s_iw_tx_cq_call;
- uint64_t s_iw_tx_cq_event;
- uint64_t s_iw_tx_ring_full;
- uint64_t s_iw_tx_throttle;
- uint64_t s_iw_tx_sg_mapping_failure;
- uint64_t s_iw_tx_stalled;
- uint64_t s_iw_tx_credit_updates;
- uint64_t s_iw_rx_cq_call;
- uint64_t s_iw_rx_cq_event;
- uint64_t s_iw_rx_ring_empty;
- uint64_t s_iw_rx_refill_from_cq;
- uint64_t s_iw_rx_refill_from_thread;
- uint64_t s_iw_rx_alloc_limit;
- uint64_t s_iw_rx_credit_updates;
- uint64_t s_iw_ack_sent;
- uint64_t s_iw_ack_send_failure;
- uint64_t s_iw_ack_send_delayed;
- uint64_t s_iw_ack_send_piggybacked;
- uint64_t s_iw_ack_received;
- uint64_t s_iw_rdma_mr_alloc;
- uint64_t s_iw_rdma_mr_free;
- uint64_t s_iw_rdma_mr_used;
- uint64_t s_iw_rdma_mr_pool_flush;
- uint64_t s_iw_rdma_mr_pool_wait;
- uint64_t s_iw_rdma_mr_pool_depleted;
-};
-
-extern struct workqueue_struct *rds_iw_wq;
-
-/*
- * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
- * doesn't define it.
- */
-static inline void rds_iw_dma_sync_sg_for_cpu(struct ib_device *dev,
- struct scatterlist *sg, unsigned int sg_dma_len, int direction)
-{
- unsigned int i;
-
- for (i = 0; i < sg_dma_len; ++i) {
- ib_dma_sync_single_for_cpu(dev,
- ib_sg_dma_address(dev, &sg[i]),
- ib_sg_dma_len(dev, &sg[i]),
- direction);
- }
-}
-#define ib_dma_sync_sg_for_cpu rds_iw_dma_sync_sg_for_cpu
-
-static inline void rds_iw_dma_sync_sg_for_device(struct ib_device *dev,
- struct scatterlist *sg, unsigned int sg_dma_len, int direction)
-{
- unsigned int i;
-
- for (i = 0; i < sg_dma_len; ++i) {
- ib_dma_sync_single_for_device(dev,
- ib_sg_dma_address(dev, &sg[i]),
- ib_sg_dma_len(dev, &sg[i]),
- direction);
- }
-}
-#define ib_dma_sync_sg_for_device rds_iw_dma_sync_sg_for_device
-
-static inline u32 rds_iw_local_dma_lkey(struct rds_iw_connection *ic)
-{
- return ic->i_dma_local_lkey ? ic->i_cm_id->device->local_dma_lkey : ic->i_mr->lkey;
-}
-
-/* ib.c */
-extern struct rds_transport rds_iw_transport;
-extern struct ib_client rds_iw_client;
-
-extern unsigned int fastreg_pool_size;
-extern unsigned int fastreg_message_size;
-
-extern spinlock_t iw_nodev_conns_lock;
-extern struct list_head iw_nodev_conns;
-
-/* ib_cm.c */
-int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp);
-void rds_iw_conn_free(void *arg);
-int rds_iw_conn_connect(struct rds_connection *conn);
-void rds_iw_conn_shutdown(struct rds_connection *conn);
-void rds_iw_state_change(struct sock *sk);
-int rds_iw_listen_init(void);
-void rds_iw_listen_stop(void);
-void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...);
-int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
- struct rdma_cm_event *event);
-int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id);
-void rds_iw_cm_connect_complete(struct rds_connection *conn,
- struct rdma_cm_event *event);
-
-
-#define rds_iw_conn_error(conn, fmt...) \
- __rds_iw_conn_error(conn, KERN_WARNING "RDS/IW: " fmt)
-
-/* ib_rdma.c */
-int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
-void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn);
-void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn);
-void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock);
-static inline void rds_iw_destroy_nodev_conns(void)
-{
- __rds_iw_destroy_conns(&iw_nodev_conns, &iw_nodev_conns_lock);
-}
-static inline void rds_iw_destroy_conns(struct rds_iw_device *rds_iwdev)
-{
- __rds_iw_destroy_conns(&rds_iwdev->conn_list, &rds_iwdev->spinlock);
-}
-struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *);
-void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo);
-void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *);
-void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
- struct rds_sock *rs, u32 *key_ret);
-void rds_iw_sync_mr(void *trans_private, int dir);
-void rds_iw_free_mr(void *trans_private, int invalidate);
-void rds_iw_flush_mrs(void);
-
-/* ib_recv.c */
-int rds_iw_recv_init(void);
-void rds_iw_recv_exit(void);
-int rds_iw_recv(struct rds_connection *conn);
-int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
- gfp_t page_gfp, int prefill);
-void rds_iw_inc_free(struct rds_incoming *inc);
-int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
-void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
-void rds_iw_recv_tasklet_fn(unsigned long data);
-void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
-void rds_iw_recv_clear_ring(struct rds_iw_connection *ic);
-void rds_iw_recv_init_ack(struct rds_iw_connection *ic);
-void rds_iw_attempt_ack(struct rds_iw_connection *ic);
-void rds_iw_ack_send_complete(struct rds_iw_connection *ic);
-u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic);
-
-/* ib_ring.c */
-void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr);
-void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr);
-u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos);
-void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val);
-void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val);
-int rds_iw_ring_empty(struct rds_iw_work_ring *ring);
-int rds_iw_ring_low(struct rds_iw_work_ring *ring);
-u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring);
-u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest);
-extern wait_queue_head_t rds_iw_ring_empty_wait;
-
-/* ib_send.c */
-void rds_iw_xmit_complete(struct rds_connection *conn);
-int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
- unsigned int hdr_off, unsigned int sg, unsigned int off);
-void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
-void rds_iw_send_init_ring(struct rds_iw_connection *ic);
-void rds_iw_send_clear_ring(struct rds_iw_connection *ic);
-int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
-void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
-void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
-int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
- u32 *adv_credits, int need_posted, int max_posted);
-
-/* ib_stats.c */
-DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats);
-#define rds_iw_stats_inc(member) rds_stats_inc_which(rds_iw_stats, member)
-unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter,
- unsigned int avail);
-
-/* ib_sysctl.c */
-int rds_iw_sysctl_init(void);
-void rds_iw_sysctl_exit(void);
-extern unsigned long rds_iw_sysctl_max_send_wr;
-extern unsigned long rds_iw_sysctl_max_recv_wr;
-extern unsigned long rds_iw_sysctl_max_unsig_wrs;
-extern unsigned long rds_iw_sysctl_max_unsig_bytes;
-extern unsigned long rds_iw_sysctl_max_recv_allocation;
-extern unsigned int rds_iw_sysctl_flow_control;
-
-/*
- * Helper functions for getting/setting the header and data SGEs in
- * RDS packets (not RDMA)
- */
-static inline struct ib_sge *
-rds_iw_header_sge(struct rds_iw_connection *ic, struct ib_sge *sge)
-{
- return &sge[0];
-}
-
-static inline struct ib_sge *
-rds_iw_data_sge(struct rds_iw_connection *ic, struct ib_sge *sge)
-{
- return &sge[1];
-}
-
-#endif
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
deleted file mode 100644
index aea4c911bc76..000000000000
--- a/net/rds/iw_cm.c
+++ /dev/null
@@ -1,769 +0,0 @@
-/*
- * Copyright (c) 2006 Oracle. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/kernel.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/ratelimit.h>
-
-#include "rds.h"
-#include "iw.h"
-
-/*
- * Set the selected protocol version
- */
-static void rds_iw_set_protocol(struct rds_connection *conn, unsigned int version)
-{
- conn->c_version = version;
-}
-
-/*
- * Set up flow control
- */
-static void rds_iw_set_flow_control(struct rds_connection *conn, u32 credits)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- if (rds_iw_sysctl_flow_control && credits != 0) {
- /* We're doing flow control */
- ic->i_flowctl = 1;
- rds_iw_send_add_credits(conn, credits);
- } else {
- ic->i_flowctl = 0;
- }
-}
-
-/*
- * Connection established.
- * We get here for both outgoing and incoming connection.
- */
-void rds_iw_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
-{
- const struct rds_iw_connect_private *dp = NULL;
- struct rds_iw_connection *ic = conn->c_transport_data;
- struct rds_iw_device *rds_iwdev;
- int err;
-
- if (event->param.conn.private_data_len) {
- dp = event->param.conn.private_data;
-
- rds_iw_set_protocol(conn,
- RDS_PROTOCOL(dp->dp_protocol_major,
- dp->dp_protocol_minor));
- rds_iw_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
- }
-
- /* update ib_device with this local ipaddr & conn */
- rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
- err = rds_iw_update_cm_id(rds_iwdev, ic->i_cm_id);
- if (err)
- printk(KERN_ERR "rds_iw_update_ipaddr failed (%d)\n", err);
- rds_iw_add_conn(rds_iwdev, conn);
-
- /* If the peer gave us the last packet it saw, process this as if
- * we had received a regular ACK. */
- if (dp && dp->dp_ack_seq)
- rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
-
- printk(KERN_NOTICE "RDS/IW: connected to %pI4<->%pI4 version %u.%u%s\n",
- &conn->c_laddr, &conn->c_faddr,
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version),
- ic->i_flowctl ? ", flow control" : "");
-
- rds_connect_complete(conn);
-}
-
-static void rds_iw_cm_fill_conn_param(struct rds_connection *conn,
- struct rdma_conn_param *conn_param,
- struct rds_iw_connect_private *dp,
- u32 protocol_version)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- memset(conn_param, 0, sizeof(struct rdma_conn_param));
- /* XXX tune these? */
- conn_param->responder_resources = 1;
- conn_param->initiator_depth = 1;
-
- if (dp) {
- memset(dp, 0, sizeof(*dp));
- dp->dp_saddr = conn->c_laddr;
- dp->dp_daddr = conn->c_faddr;
- dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
- dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
- dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IW_SUPPORTED_PROTOCOLS);
- dp->dp_ack_seq = rds_iw_piggyb_ack(ic);
-
- /* Advertise flow control */
- if (ic->i_flowctl) {
- unsigned int credits;
-
- credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
- dp->dp_credit = cpu_to_be32(credits);
- atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
- }
-
- conn_param->private_data = dp;
- conn_param->private_data_len = sizeof(*dp);
- }
-}
-
-static void rds_iw_cq_event_handler(struct ib_event *event, void *data)
-{
- rdsdebug("event %u data %p\n", event->event, data);
-}
-
-static void rds_iw_qp_event_handler(struct ib_event *event, void *data)
-{
- struct rds_connection *conn = data;
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event);
-
- switch (event->event) {
- case IB_EVENT_COMM_EST:
- rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
- break;
- case IB_EVENT_QP_REQ_ERR:
- case IB_EVENT_QP_FATAL:
- default:
- rdsdebug("Fatal QP Event %u "
- "- connection %pI4->%pI4, reconnecting\n",
- event->event, &conn->c_laddr,
- &conn->c_faddr);
- rds_conn_drop(conn);
- break;
- }
-}
-
-/*
- * Create a QP
- */
-static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
- struct rds_iw_device *rds_iwdev,
- struct rds_iw_work_ring *send_ring,
- void (*send_cq_handler)(struct ib_cq *, void *),
- struct rds_iw_work_ring *recv_ring,
- void (*recv_cq_handler)(struct ib_cq *, void *),
- void *context)
-{
- struct ib_device *dev = rds_iwdev->dev;
- struct ib_cq_init_attr cq_attr = {};
- unsigned int send_size, recv_size;
- int ret;
-
- /* The offset of 1 is to accommodate the additional ACK WR. */
- send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1);
- recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1);
- rds_iw_ring_resize(send_ring, send_size - 1);
- rds_iw_ring_resize(recv_ring, recv_size - 1);
-
- memset(attr, 0, sizeof(*attr));
- attr->event_handler = rds_iw_qp_event_handler;
- attr->qp_context = context;
- attr->cap.max_send_wr = send_size;
- attr->cap.max_recv_wr = recv_size;
- attr->cap.max_send_sge = rds_iwdev->max_sge;
- attr->cap.max_recv_sge = RDS_IW_RECV_SGE;
- attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- attr->qp_type = IB_QPT_RC;
-
- cq_attr.cqe = send_size;
- attr->send_cq = ib_create_cq(dev, send_cq_handler,
- rds_iw_cq_event_handler,
- context, &cq_attr);
- if (IS_ERR(attr->send_cq)) {
- ret = PTR_ERR(attr->send_cq);
- attr->send_cq = NULL;
- rdsdebug("ib_create_cq send failed: %d\n", ret);
- goto out;
- }
-
- cq_attr.cqe = recv_size;
- attr->recv_cq = ib_create_cq(dev, recv_cq_handler,
- rds_iw_cq_event_handler,
- context, &cq_attr);
- if (IS_ERR(attr->recv_cq)) {
- ret = PTR_ERR(attr->recv_cq);
- attr->recv_cq = NULL;
- rdsdebug("ib_create_cq send failed: %d\n", ret);
- goto out;
- }
-
- ret = ib_req_notify_cq(attr->send_cq, IB_CQ_NEXT_COMP);
- if (ret) {
- rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
- goto out;
- }
-
- ret = ib_req_notify_cq(attr->recv_cq, IB_CQ_SOLICITED);
- if (ret) {
- rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
- goto out;
- }
-
-out:
- if (ret) {
- if (attr->send_cq)
- ib_destroy_cq(attr->send_cq);
- if (attr->recv_cq)
- ib_destroy_cq(attr->recv_cq);
- }
- return ret;
-}
-
-/*
- * This needs to be very careful to not leave IS_ERR pointers around for
- * cleanup to trip over.
- */
-static int rds_iw_setup_qp(struct rds_connection *conn)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
- struct ib_device *dev = ic->i_cm_id->device;
- struct ib_qp_init_attr attr;
- struct rds_iw_device *rds_iwdev;
- int ret;
-
- /* rds_iw_add_one creates a rds_iw_device object per IB device,
- * and allocates a protection domain, memory range and MR pool
- * for each. If that fails for any reason, it will not register
- * the rds_iwdev at all.
- */
- rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
- if (!rds_iwdev) {
- printk_ratelimited(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
- dev->name);
- return -EOPNOTSUPP;
- }
-
- /* Protection domain and memory range */
- ic->i_pd = rds_iwdev->pd;
- ic->i_mr = rds_iwdev->mr;
-
- ret = rds_iw_init_qp_attrs(&attr, rds_iwdev,
- &ic->i_send_ring, rds_iw_send_cq_comp_handler,
- &ic->i_recv_ring, rds_iw_recv_cq_comp_handler,
- conn);
- if (ret < 0)
- goto out;
-
- ic->i_send_cq = attr.send_cq;
- ic->i_recv_cq = attr.recv_cq;
-
- /*
- * XXX this can fail if max_*_wr is too large? Are we supposed
- * to back off until we get a value that the hardware can support?
- */
- ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
- if (ret) {
- rdsdebug("rdma_create_qp failed: %d\n", ret);
- goto out;
- }
-
- ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
- ic->i_send_ring.w_nr *
- sizeof(struct rds_header),
- &ic->i_send_hdrs_dma, GFP_KERNEL);
- if (!ic->i_send_hdrs) {
- ret = -ENOMEM;
- rdsdebug("ib_dma_alloc_coherent send failed\n");
- goto out;
- }
-
- ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
- ic->i_recv_ring.w_nr *
- sizeof(struct rds_header),
- &ic->i_recv_hdrs_dma, GFP_KERNEL);
- if (!ic->i_recv_hdrs) {
- ret = -ENOMEM;
- rdsdebug("ib_dma_alloc_coherent recv failed\n");
- goto out;
- }
-
- ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
- &ic->i_ack_dma, GFP_KERNEL);
- if (!ic->i_ack) {
- ret = -ENOMEM;
- rdsdebug("ib_dma_alloc_coherent ack failed\n");
- goto out;
- }
-
- ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work));
- if (!ic->i_sends) {
- ret = -ENOMEM;
- rdsdebug("send allocation failed\n");
- goto out;
- }
- rds_iw_send_init_ring(ic);
-
- ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work));
- if (!ic->i_recvs) {
- ret = -ENOMEM;
- rdsdebug("recv allocation failed\n");
- goto out;
- }
-
- rds_iw_recv_init_ring(ic);
- rds_iw_recv_init_ack(ic);
-
- /* Post receive buffers - as a side effect, this will update
- * the posted credit count. */
- rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
-
- rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
- ic->i_send_cq, ic->i_recv_cq);
-
-out:
- return ret;
-}
-
-static u32 rds_iw_protocol_compatible(const struct rds_iw_connect_private *dp)
-{
- u16 common;
- u32 version = 0;
-
- /* rdma_cm private data is odd - when there is any private data in the
- * request, we will be given a pretty large buffer without telling us the
- * original size. The only way to tell the difference is by looking at
- * the contents, which are initialized to zero.
- * If the protocol version fields aren't set, this is a connection attempt
- * from an older version. This could could be 3.0 or 2.0 - we can't tell.
- * We really should have changed this for OFED 1.3 :-( */
- if (dp->dp_protocol_major == 0)
- return RDS_PROTOCOL_3_0;
-
- common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IW_SUPPORTED_PROTOCOLS;
- if (dp->dp_protocol_major == 3 && common) {
- version = RDS_PROTOCOL_3_0;
- while ((common >>= 1) != 0)
- version++;
- }
- printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using "
- "incompatible protocol version %u.%u\n",
- &dp->dp_saddr,
- dp->dp_protocol_major,
- dp->dp_protocol_minor);
- return version;
-}
-
-int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
- struct rdma_cm_event *event)
-{
- const struct rds_iw_connect_private *dp = event->param.conn.private_data;
- struct rds_iw_connect_private dp_rep;
- struct rds_connection *conn = NULL;
- struct rds_iw_connection *ic = NULL;
- struct rdma_conn_param conn_param;
- struct rds_iw_device *rds_iwdev;
- u32 version;
- int err, destroy = 1;
-
- /* Check whether the remote protocol version matches ours. */
- version = rds_iw_protocol_compatible(dp);
- if (!version)
- goto out;
-
- rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u\n",
- &dp->dp_saddr, &dp->dp_daddr,
- RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
-
- /* RDS/IW is not currently netns aware, thus init_net */
- conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
- &rds_iw_transport, GFP_KERNEL);
- if (IS_ERR(conn)) {
- rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
- conn = NULL;
- goto out;
- }
-
- /*
- * The connection request may occur while the
- * previous connection exist, e.g. in case of failover.
- * But as connections may be initiated simultaneously
- * by both hosts, we have a random backoff mechanism -
- * see the comment above rds_queue_reconnect()
- */
- mutex_lock(&conn->c_cm_lock);
- if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
- if (rds_conn_state(conn) == RDS_CONN_UP) {
- rdsdebug("incoming connect while connecting\n");
- rds_conn_drop(conn);
- rds_iw_stats_inc(s_iw_listen_closed_stale);
- } else
- if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
- /* Wait and see - our connect may still be succeeding */
- rds_iw_stats_inc(s_iw_connect_raced);
- }
- mutex_unlock(&conn->c_cm_lock);
- goto out;
- }
-
- ic = conn->c_transport_data;
-
- rds_iw_set_protocol(conn, version);
- rds_iw_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
-
- /* If the peer gave us the last packet it saw, process this as if
- * we had received a regular ACK. */
- if (dp->dp_ack_seq)
- rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
-
- BUG_ON(cm_id->context);
- BUG_ON(ic->i_cm_id);
-
- ic->i_cm_id = cm_id;
- cm_id->context = conn;
-
- rds_iwdev = ib_get_client_data(cm_id->device, &rds_iw_client);
- ic->i_dma_local_lkey = rds_iwdev->dma_local_lkey;
-
- /* We got halfway through setting up the ib_connection, if we
- * fail now, we have to take the long route out of this mess. */
- destroy = 0;
-
- err = rds_iw_setup_qp(conn);
- if (err) {
- rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
- mutex_unlock(&conn->c_cm_lock);
- goto out;
- }
-
- rds_iw_cm_fill_conn_param(conn, &conn_param, &dp_rep, version);
-
- /* rdma_accept() calls rdma_reject() internally if it fails */
- err = rdma_accept(cm_id, &conn_param);
- mutex_unlock(&conn->c_cm_lock);
- if (err) {
- rds_iw_conn_error(conn, "rdma_accept failed (%d)\n", err);
- goto out;
- }
-
- return 0;
-
-out:
- rdma_reject(cm_id, NULL, 0);
- return destroy;
-}
-
-
-int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id)
-{
- struct rds_connection *conn = cm_id->context;
- struct rds_iw_connection *ic = conn->c_transport_data;
- struct rdma_conn_param conn_param;
- struct rds_iw_connect_private dp;
- int ret;
-
- /* If the peer doesn't do protocol negotiation, we must
- * default to RDSv3.0 */
- rds_iw_set_protocol(conn, RDS_PROTOCOL_3_0);
- ic->i_flowctl = rds_iw_sysctl_flow_control; /* advertise flow control */
-
- ret = rds_iw_setup_qp(conn);
- if (ret) {
- rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", ret);
- goto out;
- }
-
- rds_iw_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION);
-
- ret = rdma_connect(cm_id, &conn_param);
- if (ret)
- rds_iw_conn_error(conn, "rdma_connect failed (%d)\n", ret);
-
-out:
- /* Beware - returning non-zero tells the rdma_cm to destroy
- * the cm_id. We should certainly not do it as long as we still
- * "own" the cm_id. */
- if (ret) {
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- if (ic->i_cm_id == cm_id)
- ret = 0;
- }
- return ret;
-}
-
-int rds_iw_conn_connect(struct rds_connection *conn)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
- struct rds_iw_device *rds_iwdev;
- struct sockaddr_in src, dest;
- int ret;
-
- /* XXX I wonder what affect the port space has */
- /* delegate cm event handler to rdma_transport */
- ic->i_cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, conn,
- RDMA_PS_TCP, IB_QPT_RC);
- if (IS_ERR(ic->i_cm_id)) {
- ret = PTR_ERR(ic->i_cm_id);
- ic->i_cm_id = NULL;
- rdsdebug("rdma_create_id() failed: %d\n", ret);
- goto out;
- }
-
- rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
-
- src.sin_family = AF_INET;
- src.sin_addr.s_addr = (__force u32)conn->c_laddr;
- src.sin_port = (__force u16)htons(0);
-
- /* First, bind to the local address and device. */
- ret = rdma_bind_addr(ic->i_cm_id, (struct sockaddr *) &src);
- if (ret) {
- rdsdebug("rdma_bind_addr(%pI4) failed: %d\n",
- &conn->c_laddr, ret);
- rdma_destroy_id(ic->i_cm_id);
- ic->i_cm_id = NULL;
- goto out;
- }
-
- rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
- ic->i_dma_local_lkey = rds_iwdev->dma_local_lkey;
-
- dest.sin_family = AF_INET;
- dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
- dest.sin_port = (__force u16)htons(RDS_PORT);
-
- ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
- (struct sockaddr *)&dest,
- RDS_RDMA_RESOLVE_TIMEOUT_MS);
- if (ret) {
- rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
- ret);
- rdma_destroy_id(ic->i_cm_id);
- ic->i_cm_id = NULL;
- }
-
-out:
- return ret;
-}
-
-/*
- * This is so careful about only cleaning up resources that were built up
- * so that it can be called at any point during startup. In fact it
- * can be called multiple times for a given connection.
- */
-void rds_iw_conn_shutdown(struct rds_connection *conn)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
- int err = 0;
- struct ib_qp_attr qp_attr;
-
- rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
- ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
- ic->i_cm_id ? ic->i_cm_id->qp : NULL);
-
- if (ic->i_cm_id) {
- struct ib_device *dev = ic->i_cm_id->device;
-
- rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
- err = rdma_disconnect(ic->i_cm_id);
- if (err) {
- /* Actually this may happen quite frequently, when
- * an outgoing connect raced with an incoming connect.
- */
- rdsdebug("failed to disconnect, cm: %p err %d\n",
- ic->i_cm_id, err);
- }
-
- if (ic->i_cm_id->qp) {
- qp_attr.qp_state = IB_QPS_ERR;
- ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
- }
-
- wait_event(rds_iw_ring_empty_wait,
- rds_iw_ring_empty(&ic->i_send_ring) &&
- rds_iw_ring_empty(&ic->i_recv_ring));
-
- if (ic->i_send_hdrs)
- ib_dma_free_coherent(dev,
- ic->i_send_ring.w_nr *
- sizeof(struct rds_header),
- ic->i_send_hdrs,
- ic->i_send_hdrs_dma);
-
- if (ic->i_recv_hdrs)
- ib_dma_free_coherent(dev,
- ic->i_recv_ring.w_nr *
- sizeof(struct rds_header),
- ic->i_recv_hdrs,
- ic->i_recv_hdrs_dma);
-
- if (ic->i_ack)
- ib_dma_free_coherent(dev, sizeof(struct rds_header),
- ic->i_ack, ic->i_ack_dma);
-
- if (ic->i_sends)
- rds_iw_send_clear_ring(ic);
- if (ic->i_recvs)
- rds_iw_recv_clear_ring(ic);
-
- if (ic->i_cm_id->qp)
- rdma_destroy_qp(ic->i_cm_id);
- if (ic->i_send_cq)
- ib_destroy_cq(ic->i_send_cq);
- if (ic->i_recv_cq)
- ib_destroy_cq(ic->i_recv_cq);
-
- /*
- * If associated with an rds_iw_device:
- * Move connection back to the nodev list.
- * Remove cm_id from the device cm_id list.
- */
- if (ic->rds_iwdev)
- rds_iw_remove_conn(ic->rds_iwdev, conn);
-
- rdma_destroy_id(ic->i_cm_id);
-
- ic->i_cm_id = NULL;
- ic->i_pd = NULL;
- ic->i_mr = NULL;
- ic->i_send_cq = NULL;
- ic->i_recv_cq = NULL;
- ic->i_send_hdrs = NULL;
- ic->i_recv_hdrs = NULL;
- ic->i_ack = NULL;
- }
- BUG_ON(ic->rds_iwdev);
-
- /* Clear pending transmit */
- if (ic->i_rm) {
- rds_message_put(ic->i_rm);
- ic->i_rm = NULL;
- }
-
- /* Clear the ACK state */
- clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
-#ifdef KERNEL_HAS_ATOMIC64
- atomic64_set(&ic->i_ack_next, 0);
-#else
- ic->i_ack_next = 0;
-#endif
- ic->i_ack_recv = 0;
-
- /* Clear flow control state */
- ic->i_flowctl = 0;
- atomic_set(&ic->i_credits, 0);
-
- rds_iw_ring_init(&ic->i_send_ring, rds_iw_sysctl_max_send_wr);
- rds_iw_ring_init(&ic->i_recv_ring, rds_iw_sysctl_max_recv_wr);
-
- if (ic->i_iwinc) {
- rds_inc_put(&ic->i_iwinc->ii_inc);
- ic->i_iwinc = NULL;
- }
-
- vfree(ic->i_sends);
- ic->i_sends = NULL;
- vfree(ic->i_recvs);
- ic->i_recvs = NULL;
- rdsdebug("shutdown complete\n");
-}
-
-int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
-{
- struct rds_iw_connection *ic;
- unsigned long flags;
-
- /* XXX too lazy? */
- ic = kzalloc(sizeof(struct rds_iw_connection), gfp);
- if (!ic)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&ic->iw_node);
- tasklet_init(&ic->i_recv_tasklet, rds_iw_recv_tasklet_fn,
- (unsigned long) ic);
- mutex_init(&ic->i_recv_mutex);
-#ifndef KERNEL_HAS_ATOMIC64
- spin_lock_init(&ic->i_ack_lock);
-#endif
-
- /*
- * rds_iw_conn_shutdown() waits for these to be emptied so they
- * must be initialized before it can be called.
- */
- rds_iw_ring_init(&ic->i_send_ring, rds_iw_sysctl_max_send_wr);
- rds_iw_ring_init(&ic->i_recv_ring, rds_iw_sysctl_max_recv_wr);
-
- ic->conn = conn;
- conn->c_transport_data = ic;
-
- spin_lock_irqsave(&iw_nodev_conns_lock, flags);
- list_add_tail(&ic->iw_node, &iw_nodev_conns);
- spin_unlock_irqrestore(&iw_nodev_conns_lock, flags);
-
-
- rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
- return 0;
-}
-
-/*
- * Free a connection. Connection must be shut down and not set for reconnect.
- */
-void rds_iw_conn_free(void *arg)
-{
- struct rds_iw_connection *ic = arg;
- spinlock_t *lock_ptr;
-
- rdsdebug("ic %p\n", ic);
-
- /*
- * Conn is either on a dev's list or on the nodev list.
- * A race with shutdown() or connect() would cause problems
- * (since rds_iwdev would change) but that should never happen.
- */
- lock_ptr = ic->rds_iwdev ? &ic->rds_iwdev->spinlock : &iw_nodev_conns_lock;
-
- spin_lock_irq(lock_ptr);
- list_del(&ic->iw_node);
- spin_unlock_irq(lock_ptr);
-
- kfree(ic);
-}
-
-/*
- * An error occurred on the connection
- */
-void
-__rds_iw_conn_error(struct rds_connection *conn, const char *fmt, ...)
-{
- va_list ap;
-
- rds_conn_drop(conn);
-
- va_start(ap, fmt);
- vprintk(fmt, ap);
- va_end(ap);
-}
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
deleted file mode 100644
index b09a40c1adce..000000000000
--- a/net/rds/iw_rdma.c
+++ /dev/null
@@ -1,837 +0,0 @@
-/*
- * Copyright (c) 2006 Oracle. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/ratelimit.h>
-
-#include "rds.h"
-#include "iw.h"
-
-
-/*
- * This is stored as mr->r_trans_private.
- */
-struct rds_iw_mr {
- struct rds_iw_device *device;
- struct rds_iw_mr_pool *pool;
- struct rdma_cm_id *cm_id;
-
- struct ib_mr *mr;
-
- struct rds_iw_mapping mapping;
- unsigned char remap_count;
-};
-
-/*
- * Our own little MR pool
- */
-struct rds_iw_mr_pool {
- struct rds_iw_device *device; /* back ptr to the device that owns us */
-
- struct mutex flush_lock; /* serialize fmr invalidate */
- struct work_struct flush_worker; /* flush worker */
-
- spinlock_t list_lock; /* protect variables below */
- atomic_t item_count; /* total # of MRs */
- atomic_t dirty_count; /* # dirty of MRs */
- struct list_head dirty_list; /* dirty mappings */
- struct list_head clean_list; /* unused & unamapped MRs */
- atomic_t free_pinned; /* memory pinned by free MRs */
- unsigned long max_message_size; /* in pages */
- unsigned long max_items;
- unsigned long max_items_soft;
- unsigned long max_free_pinned;
- int max_pages;
-};
-
-static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
-static void rds_iw_mr_pool_flush_worker(struct work_struct *work);
-static int rds_iw_init_reg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
-static int rds_iw_map_reg(struct rds_iw_mr_pool *pool,
- struct rds_iw_mr *ibmr,
- struct scatterlist *sg, unsigned int nents);
-static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
-static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
- struct list_head *unmap_list,
- struct list_head *kill_list,
- int *unpinned);
-static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
-
-static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
- struct rds_iw_device **rds_iwdev,
- struct rdma_cm_id **cm_id)
-{
- struct rds_iw_device *iwdev;
- struct rds_iw_cm_id *i_cm_id;
-
- *rds_iwdev = NULL;
- *cm_id = NULL;
-
- list_for_each_entry(iwdev, &rds_iw_devices, list) {
- spin_lock_irq(&iwdev->spinlock);
- list_for_each_entry(i_cm_id, &iwdev->cm_id_list, list) {
- struct sockaddr_in *src_addr, *dst_addr;
-
- src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr;
- dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr;
-
- rdsdebug("local ipaddr = %x port %d, "
- "remote ipaddr = %x port %d"
- "..looking for %x port %d, "
- "remote ipaddr = %x port %d\n",
- src_addr->sin_addr.s_addr,
- src_addr->sin_port,
- dst_addr->sin_addr.s_addr,
- dst_addr->sin_port,
- src->sin_addr.s_addr,
- src->sin_port,
- dst->sin_addr.s_addr,
- dst->sin_port);
-#ifdef WORKING_TUPLE_DETECTION
- if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
- src_addr->sin_port == src->sin_port &&
- dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
- dst_addr->sin_port == dst->sin_port) {
-#else
- /* FIXME - needs to compare the local and remote
- * ipaddr/port tuple, but the ipaddr is the only
- * available information in the rds_sock (as the rest are
- * zero'ed. It doesn't appear to be properly populated
- * during connection setup...
- */
- if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
-#endif
- spin_unlock_irq(&iwdev->spinlock);
- *rds_iwdev = iwdev;
- *cm_id = i_cm_id->cm_id;
- return 0;
- }
- }
- spin_unlock_irq(&iwdev->spinlock);
- }
-
- return 1;
-}
-
-static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
-{
- struct rds_iw_cm_id *i_cm_id;
-
- i_cm_id = kmalloc(sizeof *i_cm_id, GFP_KERNEL);
- if (!i_cm_id)
- return -ENOMEM;
-
- i_cm_id->cm_id = cm_id;
-
- spin_lock_irq(&rds_iwdev->spinlock);
- list_add_tail(&i_cm_id->list, &rds_iwdev->cm_id_list);
- spin_unlock_irq(&rds_iwdev->spinlock);
-
- return 0;
-}
-
-static void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev,
- struct rdma_cm_id *cm_id)
-{
- struct rds_iw_cm_id *i_cm_id;
-
- spin_lock_irq(&rds_iwdev->spinlock);
- list_for_each_entry(i_cm_id, &rds_iwdev->cm_id_list, list) {
- if (i_cm_id->cm_id == cm_id) {
- list_del(&i_cm_id->list);
- kfree(i_cm_id);
- break;
- }
- }
- spin_unlock_irq(&rds_iwdev->spinlock);
-}
-
-
-int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
-{
- struct sockaddr_in *src_addr, *dst_addr;
- struct rds_iw_device *rds_iwdev_old;
- struct rdma_cm_id *pcm_id;
- int rc;
-
- src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
- dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
-
- rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
- if (rc)
- rds_iw_remove_cm_id(rds_iwdev, cm_id);
-
- return rds_iw_add_cm_id(rds_iwdev, cm_id);
-}
-
-void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- /* conn was previously on the nodev_conns_list */
- spin_lock_irq(&iw_nodev_conns_lock);
- BUG_ON(list_empty(&iw_nodev_conns));
- BUG_ON(list_empty(&ic->iw_node));
- list_del(&ic->iw_node);
-
- spin_lock(&rds_iwdev->spinlock);
- list_add_tail(&ic->iw_node, &rds_iwdev->conn_list);
- spin_unlock(&rds_iwdev->spinlock);
- spin_unlock_irq(&iw_nodev_conns_lock);
-
- ic->rds_iwdev = rds_iwdev;
-}
-
-void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- /* place conn on nodev_conns_list */
- spin_lock(&iw_nodev_conns_lock);
-
- spin_lock_irq(&rds_iwdev->spinlock);
- BUG_ON(list_empty(&ic->iw_node));
- list_del(&ic->iw_node);
- spin_unlock_irq(&rds_iwdev->spinlock);
-
- list_add_tail(&ic->iw_node, &iw_nodev_conns);
-
- spin_unlock(&iw_nodev_conns_lock);
-
- rds_iw_remove_cm_id(ic->rds_iwdev, ic->i_cm_id);
- ic->rds_iwdev = NULL;
-}
-
-void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock)
-{
- struct rds_iw_connection *ic, *_ic;
- LIST_HEAD(tmp_list);
-
- /* avoid calling conn_destroy with irqs off */
- spin_lock_irq(list_lock);
- list_splice(list, &tmp_list);
- INIT_LIST_HEAD(list);
- spin_unlock_irq(list_lock);
-
- list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node)
- rds_conn_destroy(ic->conn);
-}
-
-static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
- struct scatterlist *list, unsigned int sg_len)
-{
- sg->list = list;
- sg->len = sg_len;
- sg->dma_len = 0;
- sg->dma_npages = 0;
- sg->bytes = 0;
-}
-
-static int rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
- struct rds_iw_scatterlist *sg)
-{
- struct ib_device *dev = rds_iwdev->dev;
- int i, ret;
-
- WARN_ON(sg->dma_len);
-
- sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
- if (unlikely(!sg->dma_len)) {
- printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n");
- return -EBUSY;
- }
-
- sg->bytes = 0;
- sg->dma_npages = 0;
-
- ret = -EINVAL;
- for (i = 0; i < sg->dma_len; ++i) {
- unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]);
- u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]);
- u64 end_addr;
-
- sg->bytes += dma_len;
-
- end_addr = dma_addr + dma_len;
- if (dma_addr & PAGE_MASK) {
- if (i > 0)
- goto out_unmap;
- dma_addr &= ~PAGE_MASK;
- }
- if (end_addr & PAGE_MASK) {
- if (i < sg->dma_len - 1)
- goto out_unmap;
- end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK;
- }
-
- sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT;
- }
-
- /* Now gather the dma addrs into one list */
- if (sg->dma_npages > fastreg_message_size)
- goto out_unmap;
-
-
-
- return 0;
-
-out_unmap:
- ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
- sg->dma_len = 0;
- return ret;
-}
-
-
-struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *rds_iwdev)
-{
- struct rds_iw_mr_pool *pool;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool) {
- printk(KERN_WARNING "RDS/IW: rds_iw_create_mr_pool alloc error\n");
- return ERR_PTR(-ENOMEM);
- }
-
- pool->device = rds_iwdev;
- INIT_LIST_HEAD(&pool->dirty_list);
- INIT_LIST_HEAD(&pool->clean_list);
- mutex_init(&pool->flush_lock);
- spin_lock_init(&pool->list_lock);
- INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker);
-
- pool->max_message_size = fastreg_message_size;
- pool->max_items = fastreg_pool_size;
- pool->max_free_pinned = pool->max_items * pool->max_message_size / 4;
- pool->max_pages = fastreg_message_size;
-
- /* We never allow more than max_items MRs to be allocated.
- * When we exceed more than max_items_soft, we start freeing
- * items more aggressively.
- * Make sure that max_items > max_items_soft > max_items / 2
- */
- pool->max_items_soft = pool->max_items * 3 / 4;
-
- return pool;
-}
-
-void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo)
-{
- struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
-
- iinfo->rdma_mr_max = pool->max_items;
- iinfo->rdma_mr_size = pool->max_pages;
-}
-
-void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool)
-{
- flush_workqueue(rds_wq);
- rds_iw_flush_mr_pool(pool, 1);
- BUG_ON(atomic_read(&pool->item_count));
- BUG_ON(atomic_read(&pool->free_pinned));
- kfree(pool);
-}
-
-static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool)
-{
- struct rds_iw_mr *ibmr = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&pool->list_lock, flags);
- if (!list_empty(&pool->clean_list)) {
- ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list);
- list_del_init(&ibmr->mapping.m_list);
- }
- spin_unlock_irqrestore(&pool->list_lock, flags);
-
- return ibmr;
-}
-
-static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev)
-{
- struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
- struct rds_iw_mr *ibmr = NULL;
- int err = 0, iter = 0;
-
- while (1) {
- ibmr = rds_iw_reuse_fmr(pool);
- if (ibmr)
- return ibmr;
-
- /* No clean MRs - now we have the choice of either
- * allocating a fresh MR up to the limit imposed by the
- * driver, or flush any dirty unused MRs.
- * We try to avoid stalling in the send path if possible,
- * so we allocate as long as we're allowed to.
- *
- * We're fussy with enforcing the FMR limit, though. If the driver
- * tells us we can't use more than N fmrs, we shouldn't start
- * arguing with it */
- if (atomic_inc_return(&pool->item_count) <= pool->max_items)
- break;
-
- atomic_dec(&pool->item_count);
-
- if (++iter > 2) {
- rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted);
- return ERR_PTR(-EAGAIN);
- }
-
- /* We do have some empty MRs. Flush them out. */
- rds_iw_stats_inc(s_iw_rdma_mr_pool_wait);
- rds_iw_flush_mr_pool(pool, 0);
- }
-
- ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
- if (!ibmr) {
- err = -ENOMEM;
- goto out_no_cigar;
- }
-
- spin_lock_init(&ibmr->mapping.m_lock);
- INIT_LIST_HEAD(&ibmr->mapping.m_list);
- ibmr->mapping.m_mr = ibmr;
-
- err = rds_iw_init_reg(pool, ibmr);
- if (err)
- goto out_no_cigar;
-
- rds_iw_stats_inc(s_iw_rdma_mr_alloc);
- return ibmr;
-
-out_no_cigar:
- if (ibmr) {
- rds_iw_destroy_fastreg(pool, ibmr);
- kfree(ibmr);
- }
- atomic_dec(&pool->item_count);
- return ERR_PTR(err);
-}
-
-void rds_iw_sync_mr(void *trans_private, int direction)
-{
- struct rds_iw_mr *ibmr = trans_private;
- struct rds_iw_device *rds_iwdev = ibmr->device;
-
- switch (direction) {
- case DMA_FROM_DEVICE:
- ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list,
- ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
- break;
- case DMA_TO_DEVICE:
- ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list,
- ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
- break;
- }
-}
-
-/*
- * Flush our pool of MRs.
- * At a minimum, all currently unused MRs are unmapped.
- * If the number of MRs allocated exceeds the limit, we also try
- * to free as many MRs as needed to get back to this limit.
- */
-static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
-{
- struct rds_iw_mr *ibmr, *next;
- LIST_HEAD(unmap_list);
- LIST_HEAD(kill_list);
- unsigned long flags;
- unsigned int nfreed = 0, ncleaned = 0, unpinned = 0;
-
- rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
-
- mutex_lock(&pool->flush_lock);
-
- spin_lock_irqsave(&pool->list_lock, flags);
- /* Get the list of all mappings to be destroyed */
- list_splice_init(&pool->dirty_list, &unmap_list);
- if (free_all)
- list_splice_init(&pool->clean_list, &kill_list);
- spin_unlock_irqrestore(&pool->list_lock, flags);
-
- /* Batched invalidate of dirty MRs.
- * For FMR based MRs, the mappings on the unmap list are
- * actually members of an ibmr (ibmr->mapping). They either
- * migrate to the kill_list, or have been cleaned and should be
- * moved to the clean_list.
- * For fastregs, they will be dynamically allocated, and
- * will be destroyed by the unmap function.
- */
- if (!list_empty(&unmap_list)) {
- ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
- &kill_list, &unpinned);
- /* If we've been asked to destroy all MRs, move those
- * that were simply cleaned to the kill list */
- if (free_all)
- list_splice_init(&unmap_list, &kill_list);
- }
-
- /* Destroy any MRs that are past their best before date */
- list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) {
- rds_iw_stats_inc(s_iw_rdma_mr_free);
- list_del(&ibmr->mapping.m_list);
- rds_iw_destroy_fastreg(pool, ibmr);
- kfree(ibmr);
- nfreed++;
- }
-
- /* Anything that remains are laundered ibmrs, which we can add
- * back to the clean list. */
- if (!list_empty(&unmap_list)) {
- spin_lock_irqsave(&pool->list_lock, flags);
- list_splice(&unmap_list, &pool->clean_list);
- spin_unlock_irqrestore(&pool->list_lock, flags);
- }
-
- atomic_sub(unpinned, &pool->free_pinned);
- atomic_sub(ncleaned, &pool->dirty_count);
- atomic_sub(nfreed, &pool->item_count);
-
- mutex_unlock(&pool->flush_lock);
-}
-
-static void rds_iw_mr_pool_flush_worker(struct work_struct *work)
-{
- struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker);
-
- rds_iw_flush_mr_pool(pool, 0);
-}
-
-void rds_iw_free_mr(void *trans_private, int invalidate)
-{
- struct rds_iw_mr *ibmr = trans_private;
- struct rds_iw_mr_pool *pool = ibmr->device->mr_pool;
-
- rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len);
- if (!pool)
- return;
-
- /* Return it to the pool's free list */
- rds_iw_free_fastreg(pool, ibmr);
-
- /* If we've pinned too many pages, request a flush */
- if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
- atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- queue_work(rds_wq, &pool->flush_worker);
-
- if (invalidate) {
- if (likely(!in_interrupt())) {
- rds_iw_flush_mr_pool(pool, 0);
- } else {
- /* We get here if the user created a MR marked
- * as use_once and invalidate at the same time. */
- queue_work(rds_wq, &pool->flush_worker);
- }
- }
-}
-
-void rds_iw_flush_mrs(void)
-{
- struct rds_iw_device *rds_iwdev;
-
- list_for_each_entry(rds_iwdev, &rds_iw_devices, list) {
- struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
-
- if (pool)
- rds_iw_flush_mr_pool(pool, 0);
- }
-}
-
-void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
- struct rds_sock *rs, u32 *key_ret)
-{
- struct rds_iw_device *rds_iwdev;
- struct rds_iw_mr *ibmr = NULL;
- struct rdma_cm_id *cm_id;
- struct sockaddr_in src = {
- .sin_addr.s_addr = rs->rs_bound_addr,
- .sin_port = rs->rs_bound_port,
- };
- struct sockaddr_in dst = {
- .sin_addr.s_addr = rs->rs_conn_addr,
- .sin_port = rs->rs_conn_port,
- };
- int ret;
-
- ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
- if (ret || !cm_id) {
- ret = -ENODEV;
- goto out;
- }
-
- if (!rds_iwdev->mr_pool) {
- ret = -ENODEV;
- goto out;
- }
-
- ibmr = rds_iw_alloc_mr(rds_iwdev);
- if (IS_ERR(ibmr))
- return ibmr;
-
- ibmr->cm_id = cm_id;
- ibmr->device = rds_iwdev;
-
- ret = rds_iw_map_reg(rds_iwdev->mr_pool, ibmr, sg, nents);
- if (ret == 0)
- *key_ret = ibmr->mr->rkey;
- else
- printk(KERN_WARNING "RDS/IW: failed to map mr (errno=%d)\n", ret);
-
-out:
- if (ret) {
- if (ibmr)
- rds_iw_free_mr(ibmr, 0);
- ibmr = ERR_PTR(ret);
- }
- return ibmr;
-}
-
-/*
- * iWARP reg handling
- *
- * The life cycle of a fastreg registration is a bit different from
- * FMRs.
- * The idea behind fastreg is to have one MR, to which we bind different
- * mappings over time. To avoid stalling on the expensive map and invalidate
- * operations, these operations are pipelined on the same send queue on
- * which we want to send the message containing the r_key.
- *
- * This creates a bit of a problem for us, as we do not have the destination
- * IP in GET_MR, so the connection must be setup prior to the GET_MR call for
- * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit
- * will try to queue a LOCAL_INV (if needed) and a REG_MR work request
- * before queuing the SEND. When completions for these arrive, they are
- * dispatched to the MR has a bit set showing that RDMa can be performed.
- *
- * There is another interesting aspect that's related to invalidation.
- * The application can request that a mapping is invalidated in FREE_MR.
- * The expectation there is that this invalidation step includes ALL
- * PREVIOUSLY FREED MRs.
- */
-static int rds_iw_init_reg(struct rds_iw_mr_pool *pool,
- struct rds_iw_mr *ibmr)
-{
- struct rds_iw_device *rds_iwdev = pool->device;
- struct ib_mr *mr;
- int err;
-
- mr = ib_alloc_mr(rds_iwdev->pd, IB_MR_TYPE_MEM_REG,
- pool->max_message_size);
- if (IS_ERR(mr)) {
- err = PTR_ERR(mr);
-
- printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed (err=%d)\n", err);
- return err;
- }
-
- ibmr->mr = mr;
- return 0;
-}
-
-static int rds_iw_rdma_reg_mr(struct rds_iw_mapping *mapping)
-{
- struct rds_iw_mr *ibmr = mapping->m_mr;
- struct rds_iw_scatterlist *m_sg = &mapping->m_sg;
- struct ib_reg_wr reg_wr;
- struct ib_send_wr *failed_wr;
- int ret, n;
-
- n = ib_map_mr_sg_zbva(ibmr->mr, m_sg->list, m_sg->len, PAGE_SIZE);
- if (unlikely(n != m_sg->len))
- return n < 0 ? n : -EINVAL;
-
- reg_wr.wr.next = NULL;
- reg_wr.wr.opcode = IB_WR_REG_MR;
- reg_wr.wr.wr_id = RDS_IW_REG_WR_ID;
- reg_wr.wr.num_sge = 0;
- reg_wr.mr = ibmr->mr;
- reg_wr.key = mapping->m_rkey;
- reg_wr.access = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE;
-
- /*
- * Perform a WR for the reg_mr. Each individual page
- * in the sg list is added to the fast reg page list and placed
- * inside the reg_mr WR. The key used is a rolling 8bit
- * counter, which should guarantee uniqueness.
- */
- ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++);
- mapping->m_rkey = ibmr->mr->rkey;
-
- failed_wr = &reg_wr.wr;
- ret = ib_post_send(ibmr->cm_id->qp, &reg_wr.wr, &failed_wr);
- BUG_ON(failed_wr != &reg_wr.wr);
- if (ret)
- printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
- __func__, __LINE__, ret);
- return ret;
-}
-
-static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr)
-{
- struct ib_send_wr s_wr, *failed_wr;
- int ret = 0;
-
- if (!ibmr->cm_id->qp || !ibmr->mr)
- goto out;
-
- memset(&s_wr, 0, sizeof(s_wr));
- s_wr.wr_id = RDS_IW_LOCAL_INV_WR_ID;
- s_wr.opcode = IB_WR_LOCAL_INV;
- s_wr.ex.invalidate_rkey = ibmr->mr->rkey;
- s_wr.send_flags = IB_SEND_SIGNALED;
-
- failed_wr = &s_wr;
- ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
- if (ret) {
- printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
- __func__, __LINE__, ret);
- goto out;
- }
-out:
- return ret;
-}
-
-static int rds_iw_map_reg(struct rds_iw_mr_pool *pool,
- struct rds_iw_mr *ibmr,
- struct scatterlist *sg,
- unsigned int sg_len)
-{
- struct rds_iw_device *rds_iwdev = pool->device;
- struct rds_iw_mapping *mapping = &ibmr->mapping;
- u64 *dma_pages;
- int ret = 0;
-
- rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len);
-
- ret = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg);
- if (ret) {
- dma_pages = NULL;
- goto out;
- }
-
- if (mapping->m_sg.dma_len > pool->max_message_size) {
- ret = -EMSGSIZE;
- goto out;
- }
-
- ret = rds_iw_rdma_reg_mr(mapping);
- if (ret)
- goto out;
-
- rds_iw_stats_inc(s_iw_rdma_mr_used);
-
-out:
- kfree(dma_pages);
-
- return ret;
-}
-
-/*
- * "Free" a fastreg MR.
- */
-static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
- struct rds_iw_mr *ibmr)
-{
- unsigned long flags;
- int ret;
-
- if (!ibmr->mapping.m_sg.dma_len)
- return;
-
- ret = rds_iw_rdma_fastreg_inv(ibmr);
- if (ret)
- return;
-
- /* Try to post the LOCAL_INV WR to the queue. */
- spin_lock_irqsave(&pool->list_lock, flags);
-
- list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list);
- atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned);
- atomic_inc(&pool->dirty_count);
-
- spin_unlock_irqrestore(&pool->list_lock, flags);
-}
-
-static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
- struct list_head *unmap_list,
- struct list_head *kill_list,
- int *unpinned)
-{
- struct rds_iw_mapping *mapping, *next;
- unsigned int ncleaned = 0;
- LIST_HEAD(laundered);
-
- /* Batched invalidation of fastreg MRs.
- * Why do we do it this way, even though we could pipeline unmap
- * and remap? The reason is the application semantics - when the
- * application requests an invalidation of MRs, it expects all
- * previously released R_Keys to become invalid.
- *
- * If we implement MR reuse naively, we risk memory corruption
- * (this has actually been observed). So the default behavior
- * requires that a MR goes through an explicit unmap operation before
- * we can reuse it again.
- *
- * We could probably improve on this a little, by allowing immediate
- * reuse of a MR on the same socket (eg you could add small
- * cache of unused MRs to strct rds_socket - GET_MR could grab one
- * of these without requiring an explicit invalidate).
- */
- while (!list_empty(unmap_list)) {
- unsigned long flags;
-
- spin_lock_irqsave(&pool->list_lock, flags);
- list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
- *unpinned += mapping->m_sg.len;
- list_move(&mapping->m_list, &laundered);
- ncleaned++;
- }
- spin_unlock_irqrestore(&pool->list_lock, flags);
- }
-
- /* Move all laundered mappings back to the unmap list.
- * We do not kill any WRs right now - it doesn't seem the
- * fastreg API has a max_remap limit. */
- list_splice_init(&laundered, unmap_list);
-
- return ncleaned;
-}
-
-static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool,
- struct rds_iw_mr *ibmr)
-{
- if (ibmr->mr)
- ib_dereg_mr(ibmr->mr);
-}
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
deleted file mode 100644
index a66d1794b2d0..000000000000
--- a/net/rds/iw_recv.c
+++ /dev/null
@@ -1,904 +0,0 @@
-/*
- * Copyright (c) 2006 Oracle. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <rdma/rdma_cm.h>
-
-#include "rds.h"
-#include "iw.h"
-
-static struct kmem_cache *rds_iw_incoming_slab;
-static struct kmem_cache *rds_iw_frag_slab;
-static atomic_t rds_iw_allocation = ATOMIC_INIT(0);
-
-static void rds_iw_frag_drop_page(struct rds_page_frag *frag)
-{
- rdsdebug("frag %p page %p\n", frag, frag->f_page);
- __free_page(frag->f_page);
- frag->f_page = NULL;
-}
-
-static void rds_iw_frag_free(struct rds_page_frag *frag)
-{
- rdsdebug("frag %p page %p\n", frag, frag->f_page);
- BUG_ON(frag->f_page);
- kmem_cache_free(rds_iw_frag_slab, frag);
-}
-
-/*
- * We map a page at a time. Its fragments are posted in order. This
- * is called in fragment order as the fragments get send completion events.
- * Only the last frag in the page performs the unmapping.
- *
- * It's OK for ring cleanup to call this in whatever order it likes because
- * DMA is not in flight and so we can unmap while other ring entries still
- * hold page references in their frags.
- */
-static void rds_iw_recv_unmap_page(struct rds_iw_connection *ic,
- struct rds_iw_recv_work *recv)
-{
- struct rds_page_frag *frag = recv->r_frag;
-
- rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
- if (frag->f_mapped)
- ib_dma_unmap_page(ic->i_cm_id->device,
- frag->f_mapped,
- RDS_FRAG_SIZE, DMA_FROM_DEVICE);
- frag->f_mapped = 0;
-}
-
-void rds_iw_recv_init_ring(struct rds_iw_connection *ic)
-{
- struct rds_iw_recv_work *recv;
- u32 i;
-
- for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
- struct ib_sge *sge;
-
- recv->r_iwinc = NULL;
- recv->r_frag = NULL;
-
- recv->r_wr.next = NULL;
- recv->r_wr.wr_id = i;
- recv->r_wr.sg_list = recv->r_sge;
- recv->r_wr.num_sge = RDS_IW_RECV_SGE;
-
- sge = rds_iw_data_sge(ic, recv->r_sge);
- sge->addr = 0;
- sge->length = RDS_FRAG_SIZE;
- sge->lkey = 0;
-
- sge = rds_iw_header_sge(ic, recv->r_sge);
- sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
- sge->length = sizeof(struct rds_header);
- sge->lkey = 0;
- }
-}
-
-static void rds_iw_recv_clear_one(struct rds_iw_connection *ic,
- struct rds_iw_recv_work *recv)
-{
- if (recv->r_iwinc) {
- rds_inc_put(&recv->r_iwinc->ii_inc);
- recv->r_iwinc = NULL;
- }
- if (recv->r_frag) {
- rds_iw_recv_unmap_page(ic, recv);
- if (recv->r_frag->f_page)
- rds_iw_frag_drop_page(recv->r_frag);
- rds_iw_frag_free(recv->r_frag);
- recv->r_frag = NULL;
- }
-}
-
-void rds_iw_recv_clear_ring(struct rds_iw_connection *ic)
-{
- u32 i;
-
- for (i = 0; i < ic->i_recv_ring.w_nr; i++)
- rds_iw_recv_clear_one(ic, &ic->i_recvs[i]);
-
- if (ic->i_frag.f_page)
- rds_iw_frag_drop_page(&ic->i_frag);
-}
-
-static int rds_iw_recv_refill_one(struct rds_connection *conn,
- struct rds_iw_recv_work *recv,
- gfp_t kptr_gfp, gfp_t page_gfp)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
- dma_addr_t dma_addr;
- struct ib_sge *sge;
- int ret = -ENOMEM;
-
- if (!recv->r_iwinc) {
- if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
- rds_iw_stats_inc(s_iw_rx_alloc_limit);
- goto out;
- }
- recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
- kptr_gfp);
- if (!recv->r_iwinc) {
- atomic_dec(&rds_iw_allocation);
- goto out;
- }
- INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
- rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
- }
-
- if (!recv->r_frag) {
- recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp);
- if (!recv->r_frag)
- goto out;
- INIT_LIST_HEAD(&recv->r_frag->f_item);
- recv->r_frag->f_page = NULL;
- }
-
- if (!ic->i_frag.f_page) {
- ic->i_frag.f_page = alloc_page(page_gfp);
- if (!ic->i_frag.f_page)
- goto out;
- ic->i_frag.f_offset = 0;
- }
-
- dma_addr = ib_dma_map_page(ic->i_cm_id->device,
- ic->i_frag.f_page,
- ic->i_frag.f_offset,
- RDS_FRAG_SIZE,
- DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
- goto out;
-
- /*
- * Once we get the RDS_PAGE_LAST_OFF frag then rds_iw_frag_unmap()
- * must be called on this recv. This happens as completions hit
- * in order or on connection shutdown.
- */
- recv->r_frag->f_page = ic->i_frag.f_page;
- recv->r_frag->f_offset = ic->i_frag.f_offset;
- recv->r_frag->f_mapped = dma_addr;
-
- sge = rds_iw_data_sge(ic, recv->r_sge);
- sge->addr = dma_addr;
- sge->length = RDS_FRAG_SIZE;
-
- sge = rds_iw_header_sge(ic, recv->r_sge);
- sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
- sge->length = sizeof(struct rds_header);
-
- get_page(recv->r_frag->f_page);
-
- if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) {
- ic->i_frag.f_offset += RDS_FRAG_SIZE;
- } else {
- put_page(ic->i_frag.f_page);
- ic->i_frag.f_page = NULL;
- ic->i_frag.f_offset = 0;
- }
-
- ret = 0;
-out:
- return ret;
-}
-
-/*
- * This tries to allocate and post unused work requests after making sure that
- * they have all the allocations they need to queue received fragments into
- * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
- * pairs don't go unmatched.
- *
- * -1 is returned if posting fails due to temporary resource exhaustion.
- */
-int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
- gfp_t page_gfp, int prefill)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
- struct rds_iw_recv_work *recv;
- struct ib_recv_wr *failed_wr;
- unsigned int posted = 0;
- int ret = 0;
- u32 pos;
-
- while ((prefill || rds_conn_up(conn)) &&
- rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
- if (pos >= ic->i_recv_ring.w_nr) {
- printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
- pos);
- ret = -EINVAL;
- break;
- }
-
- recv = &ic->i_recvs[pos];
- ret = rds_iw_recv_refill_one(conn, recv, kptr_gfp, page_gfp);
- if (ret) {
- ret = -1;
- break;
- }
-
- /* XXX when can this fail? */
- ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
- rdsdebug("recv %p iwinc %p page %p addr %lu ret %d\n", recv,
- recv->r_iwinc, recv->r_frag->f_page,
- (long) recv->r_frag->f_mapped, ret);
- if (ret) {
- rds_iw_conn_error(conn, "recv post on "
- "%pI4 returned %d, disconnecting and "
- "reconnecting\n", &conn->c_faddr,
- ret);
- ret = -1;
- break;
- }
-
- posted++;
- }
-
- /* We're doing flow control - update the window. */
- if (ic->i_flowctl && posted)
- rds_iw_advertise_credits(conn, posted);
-
- if (ret)
- rds_iw_ring_unalloc(&ic->i_recv_ring, 1);
- return ret;
-}
-
-static void rds_iw_inc_purge(struct rds_incoming *inc)
-{
- struct rds_iw_incoming *iwinc;
- struct rds_page_frag *frag;
- struct rds_page_frag *pos;
-
- iwinc = container_of(inc, struct rds_iw_incoming, ii_inc);
- rdsdebug("purging iwinc %p inc %p\n", iwinc, inc);
-
- list_for_each_entry_safe(frag, pos, &iwinc->ii_frags, f_item) {
- list_del_init(&frag->f_item);
- rds_iw_frag_drop_page(frag);
- rds_iw_frag_free(frag);
- }
-}
-
-void rds_iw_inc_free(struct rds_incoming *inc)
-{
- struct rds_iw_incoming *iwinc;
-
- iwinc = container_of(inc, struct rds_iw_incoming, ii_inc);
-
- rds_iw_inc_purge(inc);
- rdsdebug("freeing iwinc %p inc %p\n", iwinc, inc);
- BUG_ON(!list_empty(&iwinc->ii_frags));
- kmem_cache_free(rds_iw_incoming_slab, iwinc);
- atomic_dec(&rds_iw_allocation);
- BUG_ON(atomic_read(&rds_iw_allocation) < 0);
-}
-
-int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
-{
- struct rds_iw_incoming *iwinc;
- struct rds_page_frag *frag;
- unsigned long to_copy;
- unsigned long frag_off = 0;
- int copied = 0;
- int ret;
- u32 len;
-
- iwinc = container_of(inc, struct rds_iw_incoming, ii_inc);
- frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
- len = be32_to_cpu(inc->i_hdr.h_len);
-
- while (iov_iter_count(to) && copied < len) {
- if (frag_off == RDS_FRAG_SIZE) {
- frag = list_entry(frag->f_item.next,
- struct rds_page_frag, f_item);
- frag_off = 0;
- }
- to_copy = min_t(unsigned long, iov_iter_count(to),
- RDS_FRAG_SIZE - frag_off);
- to_copy = min_t(unsigned long, to_copy, len - copied);
-
- /* XXX needs + offset for multiple recvs per page */
- rds_stats_add(s_copy_to_user, to_copy);
- ret = copy_page_to_iter(frag->f_page,
- frag->f_offset + frag_off,
- to_copy,
- to);
- if (ret != to_copy)
- return -EFAULT;
-
- frag_off += to_copy;
- copied += to_copy;
- }
-
- return copied;
-}
-
-/* ic starts out kzalloc()ed */
-void rds_iw_recv_init_ack(struct rds_iw_connection *ic)
-{
- struct ib_send_wr *wr = &ic->i_ack_wr;
- struct ib_sge *sge = &ic->i_ack_sge;
-
- sge->addr = ic->i_ack_dma;
- sge->length = sizeof(struct rds_header);
- sge->lkey = rds_iw_local_dma_lkey(ic);
-
- wr->sg_list = sge;
- wr->num_sge = 1;
- wr->opcode = IB_WR_SEND;
- wr->wr_id = RDS_IW_ACK_WR_ID;
- wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
-}
-
-/*
- * You'd think that with reliable IB connections you wouldn't need to ack
- * messages that have been received. The problem is that IB hardware generates
- * an ack message before it has DMAed the message into memory. This creates a
- * potential message loss if the HCA is disabled for any reason between when it
- * sends the ack and before the message is DMAed and processed. This is only a
- * potential issue if another HCA is available for fail-over.
- *
- * When the remote host receives our ack they'll free the sent message from
- * their send queue. To decrease the latency of this we always send an ack
- * immediately after we've received messages.
- *
- * For simplicity, we only have one ack in flight at a time. This puts
- * pressure on senders to have deep enough send queues to absorb the latency of
- * a single ack frame being in flight. This might not be good enough.
- *
- * This is implemented by have a long-lived send_wr and sge which point to a
- * statically allocated ack frame. This ack wr does not fall under the ring
- * accounting that the tx and rx wrs do. The QP attribute specifically makes
- * room for it beyond the ring size. Send completion notices its special
- * wr_id and avoids working with the ring in that case.
- */
-#ifndef KERNEL_HAS_ATOMIC64
-static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
- int ack_required)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ic->i_ack_lock, flags);
- ic->i_ack_next = seq;
- if (ack_required)
- set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
- spin_unlock_irqrestore(&ic->i_ack_lock, flags);
-}
-
-static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
-{
- unsigned long flags;
- u64 seq;
-
- clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
-
- spin_lock_irqsave(&ic->i_ack_lock, flags);
- seq = ic->i_ack_next;
- spin_unlock_irqrestore(&ic->i_ack_lock, flags);
-
- return seq;
-}
-#else
-static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
- int ack_required)
-{
- atomic64_set(&ic->i_ack_next, seq);
- if (ack_required) {
- smp_mb__before_atomic();
- set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
- }
-}
-
-static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
-{
- clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
- smp_mb__after_atomic();
-
- return atomic64_read(&ic->i_ack_next);
-}
-#endif
-
-
-static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits)
-{
- struct rds_header *hdr = ic->i_ack;
- struct ib_send_wr *failed_wr;
- u64 seq;
- int ret;
-
- seq = rds_iw_get_ack(ic);
-
- rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
- rds_message_populate_header(hdr, 0, 0, 0);
- hdr->h_ack = cpu_to_be64(seq);
- hdr->h_credit = adv_credits;
- rds_message_make_checksum(hdr);
- ic->i_ack_queued = jiffies;
-
- ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
- if (unlikely(ret)) {
- /* Failed to send. Release the WR, and
- * force another ACK.
- */
- clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
- set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
-
- rds_iw_stats_inc(s_iw_ack_send_failure);
-
- rds_iw_conn_error(ic->conn, "sending ack failed\n");
- } else
- rds_iw_stats_inc(s_iw_ack_sent);
-}
-
-/*
- * There are 3 ways of getting acknowledgements to the peer:
- * 1. We call rds_iw_attempt_ack from the recv completion handler
- * to send an ACK-only frame.
- * However, there can be only one such frame in the send queue
- * at any time, so we may have to postpone it.
- * 2. When another (data) packet is transmitted while there's
- * an ACK in the queue, we piggyback the ACK sequence number
- * on the data packet.
- * 3. If the ACK WR is done sending, we get called from the
- * send queue completion handler, and check whether there's
- * another ACK pending (postponed because the WR was on the
- * queue). If so, we transmit it.
- *
- * We maintain 2 variables:
- * - i_ack_flags, which keeps track of whether the ACK WR
- * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
- * - i_ack_next, which is the last sequence number we received
- *
- * Potentially, send queue and receive queue handlers can run concurrently.
- * It would be nice to not have to use a spinlock to synchronize things,
- * but the one problem that rules this out is that 64bit updates are
- * not atomic on all platforms. Things would be a lot simpler if
- * we had atomic64 or maybe cmpxchg64 everywhere.
- *
- * Reconnecting complicates this picture just slightly. When we
- * reconnect, we may be seeing duplicate packets. The peer
- * is retransmitting them, because it hasn't seen an ACK for
- * them. It is important that we ACK these.
- *
- * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
- * this flag set *MUST* be acknowledged immediately.
- */
-
-/*
- * When we get here, we're called from the recv queue handler.
- * Check whether we ought to transmit an ACK.
- */
-void rds_iw_attempt_ack(struct rds_iw_connection *ic)
-{
- unsigned int adv_credits;
-
- if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
- return;
-
- if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
- rds_iw_stats_inc(s_iw_ack_send_delayed);
- return;
- }
-
- /* Can we get a send credit? */
- if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
- rds_iw_stats_inc(s_iw_tx_throttle);
- clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
- return;
- }
-
- clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
- rds_iw_send_ack(ic, adv_credits);
-}
-
-/*
- * We get here from the send completion handler, when the
- * adapter tells us the ACK frame was sent.
- */
-void rds_iw_ack_send_complete(struct rds_iw_connection *ic)
-{
- clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
- rds_iw_attempt_ack(ic);
-}
-
-/*
- * This is called by the regular xmit code when it wants to piggyback
- * an ACK on an outgoing frame.
- */
-u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic)
-{
- if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
- rds_iw_stats_inc(s_iw_ack_send_piggybacked);
- return rds_iw_get_ack(ic);
-}
-
-/*
- * It's kind of lame that we're copying from the posted receive pages into
- * long-lived bitmaps. We could have posted the bitmaps and rdma written into
- * them. But receiving new congestion bitmaps should be a *rare* event, so
- * hopefully we won't need to invest that complexity in making it more
- * efficient. By copying we can share a simpler core with TCP which has to
- * copy.
- */
-static void rds_iw_cong_recv(struct rds_connection *conn,
- struct rds_iw_incoming *iwinc)
-{
- struct rds_cong_map *map;
- unsigned int map_off;
- unsigned int map_page;
- struct rds_page_frag *frag;
- unsigned long frag_off;
- unsigned long to_copy;
- unsigned long copied;
- uint64_t uncongested = 0;
- void *addr;
-
- /* catch completely corrupt packets */
- if (be32_to_cpu(iwinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
- return;
-
- map = conn->c_fcong;
- map_page = 0;
- map_off = 0;
-
- frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
- frag_off = 0;
-
- copied = 0;
-
- while (copied < RDS_CONG_MAP_BYTES) {
- uint64_t *src, *dst;
- unsigned int k;
-
- to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
- BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
-
- addr = kmap_atomic(frag->f_page);
-
- src = addr + frag_off;
- dst = (void *)map->m_page_addrs[map_page] + map_off;
- for (k = 0; k < to_copy; k += 8) {
- /* Record ports that became uncongested, ie
- * bits that changed from 0 to 1. */
- uncongested |= ~(*src) & *dst;
- *dst++ = *src++;
- }
- kunmap_atomic(addr);
-
- copied += to_copy;
-
- map_off += to_copy;
- if (map_off == PAGE_SIZE) {
- map_off = 0;
- map_page++;
- }
-
- frag_off += to_copy;
- if (frag_off == RDS_FRAG_SIZE) {
- frag = list_entry(frag->f_item.next,
- struct rds_page_frag, f_item);
- frag_off = 0;
- }
- }
-
- /* the congestion map is in little endian order */
- uncongested = le64_to_cpu(uncongested);
-
- rds_cong_map_updated(map, uncongested);
-}
-
-/*
- * Rings are posted with all the allocations they'll need to queue the
- * incoming message to the receiving socket so this can't fail.
- * All fragments start with a header, so we can make sure we're not receiving
- * garbage, and we can tell a small 8 byte fragment from an ACK frame.
- */
-struct rds_iw_ack_state {
- u64 ack_next;
- u64 ack_recv;
- unsigned int ack_required:1;
- unsigned int ack_next_valid:1;
- unsigned int ack_recv_valid:1;
-};
-
-static void rds_iw_process_recv(struct rds_connection *conn,
- struct rds_iw_recv_work *recv, u32 byte_len,
- struct rds_iw_ack_state *state)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
- struct rds_iw_incoming *iwinc = ic->i_iwinc;
- struct rds_header *ihdr, *hdr;
-
- /* XXX shut down the connection if port 0,0 are seen? */
-
- rdsdebug("ic %p iwinc %p recv %p byte len %u\n", ic, iwinc, recv,
- byte_len);
-
- if (byte_len < sizeof(struct rds_header)) {
- rds_iw_conn_error(conn, "incoming message "
- "from %pI4 didn't include a "
- "header, disconnecting and "
- "reconnecting\n",
- &conn->c_faddr);
- return;
- }
- byte_len -= sizeof(struct rds_header);
-
- ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
-
- /* Validate the checksum. */
- if (!rds_message_verify_checksum(ihdr)) {
- rds_iw_conn_error(conn, "incoming message "
- "from %pI4 has corrupted header - "
- "forcing a reconnect\n",
- &conn->c_faddr);
- rds_stats_inc(s_recv_drop_bad_checksum);
- return;
- }
-
- /* Process the ACK sequence which comes with every packet */
- state->ack_recv = be64_to_cpu(ihdr->h_ack);
- state->ack_recv_valid = 1;
-
- /* Process the credits update if there was one */
- if (ihdr->h_credit)
- rds_iw_send_add_credits(conn, ihdr->h_credit);
-
- if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && byte_len == 0) {
- /* This is an ACK-only packet. The fact that it gets
- * special treatment here is that historically, ACKs
- * were rather special beasts.
- */
- rds_iw_stats_inc(s_iw_ack_received);
-
- /*
- * Usually the frags make their way on to incs and are then freed as
- * the inc is freed. We don't go that route, so we have to drop the
- * page ref ourselves. We can't just leave the page on the recv
- * because that confuses the dma mapping of pages and each recv's use
- * of a partial page. We can leave the frag, though, it will be
- * reused.
- *
- * FIXME: Fold this into the code path below.
- */
- rds_iw_frag_drop_page(recv->r_frag);
- return;
- }
-
- /*
- * If we don't already have an inc on the connection then this
- * fragment has a header and starts a message.. copy its header
- * into the inc and save the inc so we can hang upcoming fragments
- * off its list.
- */
- if (!iwinc) {
- iwinc = recv->r_iwinc;
- recv->r_iwinc = NULL;
- ic->i_iwinc = iwinc;
-
- hdr = &iwinc->ii_inc.i_hdr;
- memcpy(hdr, ihdr, sizeof(*hdr));
- ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
-
- rdsdebug("ic %p iwinc %p rem %u flag 0x%x\n", ic, iwinc,
- ic->i_recv_data_rem, hdr->h_flags);
- } else {
- hdr = &iwinc->ii_inc.i_hdr;
- /* We can't just use memcmp here; fragments of a
- * single message may carry different ACKs */
- if (hdr->h_sequence != ihdr->h_sequence ||
- hdr->h_len != ihdr->h_len ||
- hdr->h_sport != ihdr->h_sport ||
- hdr->h_dport != ihdr->h_dport) {
- rds_iw_conn_error(conn,
- "fragment header mismatch; forcing reconnect\n");
- return;
- }
- }
-
- list_add_tail(&recv->r_frag->f_item, &iwinc->ii_frags);
- recv->r_frag = NULL;
-
- if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
- ic->i_recv_data_rem -= RDS_FRAG_SIZE;
- else {
- ic->i_recv_data_rem = 0;
- ic->i_iwinc = NULL;
-
- if (iwinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
- rds_iw_cong_recv(conn, iwinc);
- else {
- rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
- &iwinc->ii_inc, GFP_ATOMIC);
- state->ack_next = be64_to_cpu(hdr->h_sequence);
- state->ack_next_valid = 1;
- }
-
- /* Evaluate the ACK_REQUIRED flag *after* we received
- * the complete frame, and after bumping the next_rx
- * sequence. */
- if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
- rds_stats_inc(s_recv_ack_required);
- state->ack_required = 1;
- }
-
- rds_inc_put(&iwinc->ii_inc);
- }
-}
-
-/*
- * Plucking the oldest entry from the ring can be done concurrently with
- * the thread refilling the ring. Each ring operation is protected by
- * spinlocks and the transient state of refilling doesn't change the
- * recording of which entry is oldest.
- *
- * This relies on IB only calling one cq comp_handler for each cq so that
- * there will only be one caller of rds_recv_incoming() per RDS connection.
- */
-void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
-{
- struct rds_connection *conn = context;
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- rdsdebug("conn %p cq %p\n", conn, cq);
-
- rds_iw_stats_inc(s_iw_rx_cq_call);
-
- tasklet_schedule(&ic->i_recv_tasklet);
-}
-
-static inline void rds_poll_cq(struct rds_iw_connection *ic,
- struct rds_iw_ack_state *state)
-{
- struct rds_connection *conn = ic->conn;
- struct ib_wc wc;
- struct rds_iw_recv_work *recv;
-
- while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
- rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
- (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
- be32_to_cpu(wc.ex.imm_data));
- rds_iw_stats_inc(s_iw_rx_cq_event);
-
- recv = &ic->i_recvs[rds_iw_ring_oldest(&ic->i_recv_ring)];
-
- rds_iw_recv_unmap_page(ic, recv);
-
- /*
- * Also process recvs in connecting state because it is possible
- * to get a recv completion _before_ the rdmacm ESTABLISHED
- * event is processed.
- */
- if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
- /* We expect errors as the qp is drained during shutdown */
- if (wc.status == IB_WC_SUCCESS) {
- rds_iw_process_recv(conn, recv, wc.byte_len, state);
- } else {
- rds_iw_conn_error(conn, "recv completion on "
- "%pI4 had status %u, disconnecting and "
- "reconnecting\n", &conn->c_faddr,
- wc.status);
- }
- }
-
- rds_iw_ring_free(&ic->i_recv_ring, 1);
- }
-}
-
-void rds_iw_recv_tasklet_fn(unsigned long data)
-{
- struct rds_iw_connection *ic = (struct rds_iw_connection *) data;
- struct rds_connection *conn = ic->conn;
- struct rds_iw_ack_state state = { 0, };
-
- rds_poll_cq(ic, &state);
- ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
- rds_poll_cq(ic, &state);
-
- if (state.ack_next_valid)
- rds_iw_set_ack(ic, state.ack_next, state.ack_required);
- if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
- rds_send_drop_acked(conn, state.ack_recv, NULL);
- ic->i_ack_recv = state.ack_recv;
- }
- if (rds_conn_up(conn))
- rds_iw_attempt_ack(ic);
-
- /* If we ever end up with a really empty receive ring, we're
- * in deep trouble, as the sender will definitely see RNR
- * timeouts. */
- if (rds_iw_ring_empty(&ic->i_recv_ring))
- rds_iw_stats_inc(s_iw_rx_ring_empty);
-
- /*
- * If the ring is running low, then schedule the thread to refill.
- */
- if (rds_iw_ring_low(&ic->i_recv_ring))
- queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
-}
-
-int rds_iw_recv(struct rds_connection *conn)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
- int ret = 0;
-
- rdsdebug("conn %p\n", conn);
-
- /*
- * If we get a temporary posting failure in this context then
- * we're really low and we want the caller to back off for a bit.
- */
- mutex_lock(&ic->i_recv_mutex);
- if (rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0))
- ret = -ENOMEM;
- else
- rds_iw_stats_inc(s_iw_rx_refill_from_thread);
- mutex_unlock(&ic->i_recv_mutex);
-
- if (rds_conn_up(conn))
- rds_iw_attempt_ack(ic);
-
- return ret;
-}
-
-int rds_iw_recv_init(void)
-{
- struct sysinfo si;
- int ret = -ENOMEM;
-
- /* Default to 30% of all available RAM for recv memory */
- si_meminfo(&si);
- rds_iw_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
-
- rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming",
- sizeof(struct rds_iw_incoming),
- 0, 0, NULL);
- if (!rds_iw_incoming_slab)
- goto out;
-
- rds_iw_frag_slab = kmem_cache_create("rds_iw_frag",
- sizeof(struct rds_page_frag),
- 0, 0, NULL);
- if (!rds_iw_frag_slab)
- kmem_cache_destroy(rds_iw_incoming_slab);
- else
- ret = 0;
-out:
- return ret;
-}
-
-void rds_iw_recv_exit(void)
-{
- kmem_cache_destroy(rds_iw_incoming_slab);
- kmem_cache_destroy(rds_iw_frag_slab);
-}
diff --git a/net/rds/iw_ring.c b/net/rds/iw_ring.c
deleted file mode 100644
index da8e3b63f663..000000000000
--- a/net/rds/iw_ring.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2006 Oracle. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/kernel.h>
-
-#include "rds.h"
-#include "iw.h"
-
-/*
- * Locking for IB rings.
- * We assume that allocation is always protected by a mutex
- * in the caller (this is a valid assumption for the current
- * implementation).
- *
- * Freeing always happens in an interrupt, and hence only
- * races with allocations, but not with other free()s.
- *
- * The interaction between allocation and freeing is that
- * the alloc code has to determine the number of free entries.
- * To this end, we maintain two counters; an allocation counter
- * and a free counter. Both are allowed to run freely, and wrap
- * around.
- * The number of used entries is always (alloc_ctr - free_ctr) % NR.
- *
- * The current implementation makes free_ctr atomic. When the
- * caller finds an allocation fails, it should set an "alloc fail"
- * bit and retry the allocation. The "alloc fail" bit essentially tells
- * the CQ completion handlers to wake it up after freeing some
- * more entries.
- */
-
-/*
- * This only happens on shutdown.
- */
-DECLARE_WAIT_QUEUE_HEAD(rds_iw_ring_empty_wait);
-
-void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr)
-{
- memset(ring, 0, sizeof(*ring));
- ring->w_nr = nr;
- rdsdebug("ring %p nr %u\n", ring, ring->w_nr);
-}
-
-static inline u32 __rds_iw_ring_used(struct rds_iw_work_ring *ring)
-{
- u32 diff;
-
- /* This assumes that atomic_t has at least as many bits as u32 */
- diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr);
- BUG_ON(diff > ring->w_nr);
-
- return diff;
-}
-
-void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr)
-{
- /* We only ever get called from the connection setup code,
- * prior to creating the QP. */
- BUG_ON(__rds_iw_ring_used(ring));
- ring->w_nr = nr;
-}
-
-static int __rds_iw_ring_empty(struct rds_iw_work_ring *ring)
-{
- return __rds_iw_ring_used(ring) == 0;
-}
-
-u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos)
-{
- u32 ret = 0, avail;
-
- avail = ring->w_nr - __rds_iw_ring_used(ring);
-
- rdsdebug("ring %p val %u next %u free %u\n", ring, val,
- ring->w_alloc_ptr, avail);
-
- if (val && avail) {
- ret = min(val, avail);
- *pos = ring->w_alloc_ptr;
-
- ring->w_alloc_ptr = (ring->w_alloc_ptr + ret) % ring->w_nr;
- ring->w_alloc_ctr += ret;
- }
-
- return ret;
-}
-
-void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val)
-{
- ring->w_free_ptr = (ring->w_free_ptr + val) % ring->w_nr;
- atomic_add(val, &ring->w_free_ctr);
-
- if (__rds_iw_ring_empty(ring) &&
- waitqueue_active(&rds_iw_ring_empty_wait))
- wake_up(&rds_iw_ring_empty_wait);
-}
-
-void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val)
-{
- ring->w_alloc_ptr = (ring->w_alloc_ptr - val) % ring->w_nr;
- ring->w_alloc_ctr -= val;
-}
-
-int rds_iw_ring_empty(struct rds_iw_work_ring *ring)
-{
- return __rds_iw_ring_empty(ring);
-}
-
-int rds_iw_ring_low(struct rds_iw_work_ring *ring)
-{
- return __rds_iw_ring_used(ring) <= (ring->w_nr >> 1);
-}
-
-
-/*
- * returns the oldest alloced ring entry. This will be the next one
- * freed. This can't be called if there are none allocated.
- */
-u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring)
-{
- return ring->w_free_ptr;
-}
-
-/*
- * returns the number of completed work requests.
- */
-
-u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest)
-{
- u32 ret;
-
- if (oldest <= (unsigned long long)wr_id)
- ret = (unsigned long long)wr_id - oldest + 1;
- else
- ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1;
-
- rdsdebug("ring %p ret %u wr_id %u oldest %u\n", ring, ret,
- wr_id, oldest);
- return ret;
-}
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
deleted file mode 100644
index e20bd503f4bd..000000000000
--- a/net/rds/iw_send.c
+++ /dev/null
@@ -1,981 +0,0 @@
-/*
- * Copyright (c) 2006 Oracle. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/kernel.h>
-#include <linux/in.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/ratelimit.h>
-
-#include "rds.h"
-#include "iw.h"
-
-static void rds_iw_send_rdma_complete(struct rds_message *rm,
- int wc_status)
-{
- int notify_status;
-
- switch (wc_status) {
- case IB_WC_WR_FLUSH_ERR:
- return;
-
- case IB_WC_SUCCESS:
- notify_status = RDS_RDMA_SUCCESS;
- break;
-
- case IB_WC_REM_ACCESS_ERR:
- notify_status = RDS_RDMA_REMOTE_ERROR;
- break;
-
- default:
- notify_status = RDS_RDMA_OTHER_ERROR;
- break;
- }
- rds_rdma_send_complete(rm, notify_status);
-}
-
-static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
- struct rm_rdma_op *op)
-{
- if (op->op_mapped) {
- ib_dma_unmap_sg(ic->i_cm_id->device,
- op->op_sg, op->op_nents,
- op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- op->op_mapped = 0;
- }
-}
-
-static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
- struct rds_iw_send_work *send,
- int wc_status)
-{
- struct rds_message *rm = send->s_rm;
-
- rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
-
- ib_dma_unmap_sg(ic->i_cm_id->device,
- rm->data.op_sg, rm->data.op_nents,
- DMA_TO_DEVICE);
-
- if (rm->rdma.op_active) {
- rds_iw_send_unmap_rdma(ic, &rm->rdma);
-
- /* If the user asked for a completion notification on this
- * message, we can implement three different semantics:
- * 1. Notify when we received the ACK on the RDS message
- * that was queued with the RDMA. This provides reliable
- * notification of RDMA status at the expense of a one-way
- * packet delay.
- * 2. Notify when the IB stack gives us the completion event for
- * the RDMA operation.
- * 3. Notify when the IB stack gives us the completion event for
- * the accompanying RDS messages.
- * Here, we implement approach #3. To implement approach #2,
- * call rds_rdma_send_complete from the cq_handler. To implement #1,
- * don't call rds_rdma_send_complete at all, and fall back to the notify
- * handling in the ACK processing code.
- *
- * Note: There's no need to explicitly sync any RDMA buffers using
- * ib_dma_sync_sg_for_cpu - the completion for the RDMA
- * operation itself unmapped the RDMA buffers, which takes care
- * of synching.
- */
- rds_iw_send_rdma_complete(rm, wc_status);
-
- if (rm->rdma.op_write)
- rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
- else
- rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
- }
-
- /* If anyone waited for this message to get flushed out, wake
- * them up now */
- rds_message_unmapped(rm);
-
- rds_message_put(rm);
- send->s_rm = NULL;
-}
-
-void rds_iw_send_init_ring(struct rds_iw_connection *ic)
-{
- struct rds_iw_send_work *send;
- u32 i;
-
- for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
- struct ib_sge *sge;
-
- send->s_rm = NULL;
- send->s_op = NULL;
- send->s_mapping = NULL;
-
- send->s_send_wr.next = NULL;
- send->s_send_wr.wr_id = i;
- send->s_send_wr.sg_list = send->s_sge;
- send->s_send_wr.num_sge = 1;
- send->s_send_wr.opcode = IB_WR_SEND;
- send->s_send_wr.send_flags = 0;
- send->s_send_wr.ex.imm_data = 0;
-
- sge = rds_iw_data_sge(ic, send->s_sge);
- sge->lkey = 0;
-
- sge = rds_iw_header_sge(ic, send->s_sge);
- sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
- sge->length = sizeof(struct rds_header);
- sge->lkey = 0;
-
- send->s_mr = ib_alloc_mr(ic->i_pd, IB_MR_TYPE_MEM_REG,
- fastreg_message_size);
- if (IS_ERR(send->s_mr)) {
- printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed\n");
- break;
- }
- }
-}
-
-void rds_iw_send_clear_ring(struct rds_iw_connection *ic)
-{
- struct rds_iw_send_work *send;
- u32 i;
-
- for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
- BUG_ON(!send->s_mr);
- ib_dereg_mr(send->s_mr);
- if (send->s_send_wr.opcode == 0xdead)
- continue;
- if (send->s_rm)
- rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
- if (send->s_op)
- rds_iw_send_unmap_rdma(ic, send->s_op);
- }
-}
-
-/*
- * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
- * operations performed in the send path. As the sender allocs and potentially
- * unallocs the next free entry in the ring it doesn't alter which is
- * the next to be freed, which is what this is concerned with.
- */
-void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
-{
- struct rds_connection *conn = context;
- struct rds_iw_connection *ic = conn->c_transport_data;
- struct ib_wc wc;
- struct rds_iw_send_work *send;
- u32 completed;
- u32 oldest;
- u32 i;
- int ret;
-
- rdsdebug("cq %p conn %p\n", cq, conn);
- rds_iw_stats_inc(s_iw_tx_cq_call);
- ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- if (ret)
- rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
-
- while (ib_poll_cq(cq, 1, &wc) > 0) {
- rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
- (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
- be32_to_cpu(wc.ex.imm_data));
- rds_iw_stats_inc(s_iw_tx_cq_event);
-
- if (wc.status != IB_WC_SUCCESS) {
- printk(KERN_ERR "WC Error: status = %d opcode = %d\n", wc.status, wc.opcode);
- break;
- }
-
- if (wc.opcode == IB_WC_LOCAL_INV && wc.wr_id == RDS_IW_LOCAL_INV_WR_ID) {
- ic->i_fastreg_posted = 0;
- continue;
- }
-
- if (wc.opcode == IB_WC_REG_MR && wc.wr_id == RDS_IW_REG_WR_ID) {
- ic->i_fastreg_posted = 1;
- continue;
- }
-
- if (wc.wr_id == RDS_IW_ACK_WR_ID) {
- if (time_after(jiffies, ic->i_ack_queued + HZ/2))
- rds_iw_stats_inc(s_iw_tx_stalled);
- rds_iw_ack_send_complete(ic);
- continue;
- }
-
- oldest = rds_iw_ring_oldest(&ic->i_send_ring);
-
- completed = rds_iw_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
-
- for (i = 0; i < completed; i++) {
- send = &ic->i_sends[oldest];
-
- /* In the error case, wc.opcode sometimes contains garbage */
- switch (send->s_send_wr.opcode) {
- case IB_WR_SEND:
- if (send->s_rm)
- rds_iw_send_unmap_rm(ic, send, wc.status);
- break;
- case IB_WR_REG_MR:
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_READ_WITH_INV:
- /* Nothing to be done - the SG list will be unmapped
- * when the SEND completes. */
- break;
- default:
- printk_ratelimited(KERN_NOTICE
- "RDS/IW: %s: unexpected opcode 0x%x in WR!\n",
- __func__, send->s_send_wr.opcode);
- break;
- }
-
- send->s_send_wr.opcode = 0xdead;
- send->s_send_wr.num_sge = 1;
- if (time_after(jiffies, send->s_queued + HZ/2))
- rds_iw_stats_inc(s_iw_tx_stalled);
-
- /* If a RDMA operation produced an error, signal this right
- * away. If we don't, the subsequent SEND that goes with this
- * RDMA will be canceled with ERR_WFLUSH, and the application
- * never learn that the RDMA failed. */
- if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
- struct rds_message *rm;
-
- rm = rds_send_get_message(conn, send->s_op);
- if (rm)
- rds_iw_send_rdma_complete(rm, wc.status);
- }
-
- oldest = (oldest + 1) % ic->i_send_ring.w_nr;
- }
-
- rds_iw_ring_free(&ic->i_send_ring, completed);
-
- if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
- test_bit(0, &conn->c_map_queued))
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
-
- /* We expect errors as the qp is drained during shutdown */
- if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
- rds_iw_conn_error(conn,
- "send completion on %pI4 "
- "had status %u, disconnecting and reconnecting\n",
- &conn->c_faddr, wc.status);
- }
- }
-}
-
-/*
- * This is the main function for allocating credits when sending
- * messages.
- *
- * Conceptually, we have two counters:
- * - send credits: this tells us how many WRs we're allowed
- * to submit without overruning the receiver's queue. For
- * each SEND WR we post, we decrement this by one.
- *
- * - posted credits: this tells us how many WRs we recently
- * posted to the receive queue. This value is transferred
- * to the peer as a "credit update" in a RDS header field.
- * Every time we transmit credits to the peer, we subtract
- * the amount of transferred credits from this counter.
- *
- * It is essential that we avoid situations where both sides have
- * exhausted their send credits, and are unable to send new credits
- * to the peer. We achieve this by requiring that we send at least
- * one credit update to the peer before exhausting our credits.
- * When new credits arrive, we subtract one credit that is withheld
- * until we've posted new buffers and are ready to transmit these
- * credits (see rds_iw_send_add_credits below).
- *
- * The RDS send code is essentially single-threaded; rds_send_xmit
- * grabs c_send_lock to ensure exclusive access to the send ring.
- * However, the ACK sending code is independent and can race with
- * message SENDs.
- *
- * In the send path, we need to update the counters for send credits
- * and the counter of posted buffers atomically - when we use the
- * last available credit, we cannot allow another thread to race us
- * and grab the posted credits counter. Hence, we have to use a
- * spinlock to protect the credit counter, or use atomics.
- *
- * Spinlocks shared between the send and the receive path are bad,
- * because they create unnecessary delays. An early implementation
- * using a spinlock showed a 5% degradation in throughput at some
- * loads.
- *
- * This implementation avoids spinlocks completely, putting both
- * counters into a single atomic, and updating that atomic using
- * atomic_add (in the receive path, when receiving fresh credits),
- * and using atomic_cmpxchg when updating the two counters.
- */
-int rds_iw_send_grab_credits(struct rds_iw_connection *ic,
- u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
-{
- unsigned int avail, posted, got = 0, advertise;
- long oldval, newval;
-
- *adv_credits = 0;
- if (!ic->i_flowctl)
- return wanted;
-
-try_again:
- advertise = 0;
- oldval = newval = atomic_read(&ic->i_credits);
- posted = IB_GET_POST_CREDITS(oldval);
- avail = IB_GET_SEND_CREDITS(oldval);
-
- rdsdebug("wanted=%u credits=%u posted=%u\n",
- wanted, avail, posted);
-
- /* The last credit must be used to send a credit update. */
- if (avail && !posted)
- avail--;
-
- if (avail < wanted) {
- struct rds_connection *conn = ic->i_cm_id->context;
-
- /* Oops, there aren't that many credits left! */
- set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
- got = avail;
- } else {
- /* Sometimes you get what you want, lalala. */
- got = wanted;
- }
- newval -= IB_SET_SEND_CREDITS(got);
-
- /*
- * If need_posted is non-zero, then the caller wants
- * the posted regardless of whether any send credits are
- * available.
- */
- if (posted && (got || need_posted)) {
- advertise = min_t(unsigned int, posted, max_posted);
- newval -= IB_SET_POST_CREDITS(advertise);
- }
-
- /* Finally bill everything */
- if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
- goto try_again;
-
- *adv_credits = advertise;
- return got;
-}
-
-void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- if (credits == 0)
- return;
-
- rdsdebug("credits=%u current=%u%s\n",
- credits,
- IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
- test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
-
- atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
- if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
-
- WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
-
- rds_iw_stats_inc(s_iw_rx_credit_updates);
-}
-
-void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- if (posted == 0)
- return;
-
- atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
-
- /* Decide whether to send an update to the peer now.
- * If we would send a credit update for every single buffer we
- * post, we would end up with an ACK storm (ACK arrives,
- * consumes buffer, we refill the ring, send ACK to remote
- * advertising the newly posted buffer... ad inf)
- *
- * Performance pretty much depends on how often we send
- * credit updates - too frequent updates mean lots of ACKs.
- * Too infrequent updates, and the peer will run out of
- * credits and has to throttle.
- * For the time being, 16 seems to be a good compromise.
- */
- if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
- set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
-}
-
-static inline void
-rds_iw_xmit_populate_wr(struct rds_iw_connection *ic,
- struct rds_iw_send_work *send, unsigned int pos,
- unsigned long buffer, unsigned int length,
- int send_flags)
-{
- struct ib_sge *sge;
-
- WARN_ON(pos != send - ic->i_sends);
-
- send->s_send_wr.send_flags = send_flags;
- send->s_send_wr.opcode = IB_WR_SEND;
- send->s_send_wr.num_sge = 2;
- send->s_send_wr.next = NULL;
- send->s_queued = jiffies;
- send->s_op = NULL;
-
- if (length != 0) {
- sge = rds_iw_data_sge(ic, send->s_sge);
- sge->addr = buffer;
- sge->length = length;
- sge->lkey = rds_iw_local_dma_lkey(ic);
-
- sge = rds_iw_header_sge(ic, send->s_sge);
- } else {
- /* We're sending a packet with no payload. There is only
- * one SGE */
- send->s_send_wr.num_sge = 1;
- sge = &send->s_sge[0];
- }
-
- sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
- sge->length = sizeof(struct rds_header);
- sge->lkey = rds_iw_local_dma_lkey(ic);
-}
-
-/*
- * This can be called multiple times for a given message. The first time
- * we see a message we map its scatterlist into the IB device so that
- * we can provide that mapped address to the IB scatter gather entries
- * in the IB work requests. We translate the scatterlist into a series
- * of work requests that fragment the message. These work requests complete
- * in order so we pass ownership of the message to the completion handler
- * once we send the final fragment.
- *
- * The RDS core uses the c_send_lock to only enter this function once
- * per connection. This makes sure that the tx ring alloc/unalloc pairs
- * don't get out of sync and confuse the ring.
- */
-int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
- unsigned int hdr_off, unsigned int sg, unsigned int off)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
- struct ib_device *dev = ic->i_cm_id->device;
- struct rds_iw_send_work *send = NULL;
- struct rds_iw_send_work *first;
- struct rds_iw_send_work *prev;
- struct ib_send_wr *failed_wr;
- struct scatterlist *scat;
- u32 pos;
- u32 i;
- u32 work_alloc;
- u32 credit_alloc;
- u32 posted;
- u32 adv_credits = 0;
- int send_flags = 0;
- int sent;
- int ret;
- int flow_controlled = 0;
-
- BUG_ON(off % RDS_FRAG_SIZE);
- BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
-
- /* Fastreg support */
- if (rds_rdma_cookie_key(rm->m_rdma_cookie) && !ic->i_fastreg_posted) {
- ret = -EAGAIN;
- goto out;
- }
-
- /* FIXME we may overallocate here */
- if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
- i = 1;
- else
- i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
-
- work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
- if (work_alloc == 0) {
- set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
- rds_iw_stats_inc(s_iw_tx_ring_full);
- ret = -ENOMEM;
- goto out;
- }
-
- credit_alloc = work_alloc;
- if (ic->i_flowctl) {
- credit_alloc = rds_iw_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
- adv_credits += posted;
- if (credit_alloc < work_alloc) {
- rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
- work_alloc = credit_alloc;
- flow_controlled++;
- }
- if (work_alloc == 0) {
- set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
- rds_iw_stats_inc(s_iw_tx_throttle);
- ret = -ENOMEM;
- goto out;
- }
- }
-
- /* map the message the first time we see it */
- if (!ic->i_rm) {
- /*
- printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n",
- be16_to_cpu(rm->m_inc.i_hdr.h_dport),
- rm->m_inc.i_hdr.h_flags,
- be32_to_cpu(rm->m_inc.i_hdr.h_len));
- */
- if (rm->data.op_nents) {
- rm->data.op_count = ib_dma_map_sg(dev,
- rm->data.op_sg,
- rm->data.op_nents,
- DMA_TO_DEVICE);
- rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
- if (rm->data.op_count == 0) {
- rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
- rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
- ret = -ENOMEM; /* XXX ? */
- goto out;
- }
- } else {
- rm->data.op_count = 0;
- }
-
- ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
- ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes;
- rds_message_addref(rm);
- rm->data.op_dmasg = 0;
- rm->data.op_dmaoff = 0;
- ic->i_rm = rm;
-
- /* Finalize the header */
- if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
- rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
- if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
- rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
-
- /* If it has a RDMA op, tell the peer we did it. This is
- * used by the peer to release use-once RDMA MRs. */
- if (rm->rdma.op_active) {
- struct rds_ext_header_rdma ext_hdr;
-
- ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
- rds_message_add_extension(&rm->m_inc.i_hdr,
- RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
- }
- if (rm->m_rdma_cookie) {
- rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
- rds_rdma_cookie_key(rm->m_rdma_cookie),
- rds_rdma_cookie_offset(rm->m_rdma_cookie));
- }
-
- /* Note - rds_iw_piggyb_ack clears the ACK_REQUIRED bit, so
- * we should not do this unless we have a chance of at least
- * sticking the header into the send ring. Which is why we
- * should call rds_iw_ring_alloc first. */
- rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_iw_piggyb_ack(ic));
- rds_message_make_checksum(&rm->m_inc.i_hdr);
-
- /*
- * Update adv_credits since we reset the ACK_REQUIRED bit.
- */
- rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
- adv_credits += posted;
- BUG_ON(adv_credits > 255);
- }
-
- send = &ic->i_sends[pos];
- first = send;
- prev = NULL;
- scat = &rm->data.op_sg[rm->data.op_dmasg];
- sent = 0;
- i = 0;
-
- /* Sometimes you want to put a fence between an RDMA
- * READ and the following SEND.
- * We could either do this all the time
- * or when requested by the user. Right now, we let
- * the application choose.
- */
- if (rm->rdma.op_active && rm->rdma.op_fence)
- send_flags = IB_SEND_FENCE;
-
- /*
- * We could be copying the header into the unused tail of the page.
- * That would need to be changed in the future when those pages might
- * be mapped userspace pages or page cache pages. So instead we always
- * use a second sge and our long-lived ring of mapped headers. We send
- * the header after the data so that the data payload can be aligned on
- * the receiver.
- */
-
- /* handle a 0-len message */
- if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) {
- rds_iw_xmit_populate_wr(ic, send, pos, 0, 0, send_flags);
- goto add_header;
- }
-
- /* if there's data reference it with a chain of work reqs */
- for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) {
- unsigned int len;
-
- send = &ic->i_sends[pos];
-
- len = min(RDS_FRAG_SIZE,
- ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff);
- rds_iw_xmit_populate_wr(ic, send, pos,
- ib_sg_dma_address(dev, scat) + rm->data.op_dmaoff, len,
- send_flags);
-
- /*
- * We want to delay signaling completions just enough to get
- * the batching benefits but not so much that we create dead time
- * on the wire.
- */
- if (ic->i_unsignaled_wrs-- == 0) {
- ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
- send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
- }
-
- ic->i_unsignaled_bytes -= len;
- if (ic->i_unsignaled_bytes <= 0) {
- ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes;
- send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
- }
-
- /*
- * Always signal the last one if we're stopping due to flow control.
- */
- if (flow_controlled && i == (work_alloc-1))
- send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
-
- rdsdebug("send %p wr %p num_sge %u next %p\n", send,
- &send->s_send_wr, send->s_send_wr.num_sge, send->s_send_wr.next);
-
- sent += len;
- rm->data.op_dmaoff += len;
- if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) {
- scat++;
- rm->data.op_dmaoff = 0;
- rm->data.op_dmasg++;
- }
-
-add_header:
- /* Tack on the header after the data. The header SGE should already
- * have been set up to point to the right header buffer. */
- memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
-
- if (0) {
- struct rds_header *hdr = &ic->i_send_hdrs[pos];
-
- printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
- be16_to_cpu(hdr->h_dport),
- hdr->h_flags,
- be32_to_cpu(hdr->h_len));
- }
- if (adv_credits) {
- struct rds_header *hdr = &ic->i_send_hdrs[pos];
-
- /* add credit and redo the header checksum */
- hdr->h_credit = adv_credits;
- rds_message_make_checksum(hdr);
- adv_credits = 0;
- rds_iw_stats_inc(s_iw_tx_credit_updates);
- }
-
- if (prev)
- prev->s_send_wr.next = &send->s_send_wr;
- prev = send;
-
- pos = (pos + 1) % ic->i_send_ring.w_nr;
- }
-
- /* Account the RDS header in the number of bytes we sent, but just once.
- * The caller has no concept of fragmentation. */
- if (hdr_off == 0)
- sent += sizeof(struct rds_header);
-
- /* if we finished the message then send completion owns it */
- if (scat == &rm->data.op_sg[rm->data.op_count]) {
- prev->s_rm = ic->i_rm;
- prev->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
- ic->i_rm = NULL;
- }
-
- if (i < work_alloc) {
- rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i);
- work_alloc = i;
- }
- if (ic->i_flowctl && i < credit_alloc)
- rds_iw_send_add_credits(conn, credit_alloc - i);
-
- /* XXX need to worry about failed_wr and partial sends. */
- failed_wr = &first->s_send_wr;
- ret = ib_post_send(ic->i_cm_id->qp, &first->s_send_wr, &failed_wr);
- rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
- first, &first->s_send_wr, ret, failed_wr);
- BUG_ON(failed_wr != &first->s_send_wr);
- if (ret) {
- printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 "
- "returned %d\n", &conn->c_faddr, ret);
- rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
- if (prev->s_rm) {
- ic->i_rm = prev->s_rm;
- prev->s_rm = NULL;
- }
- goto out;
- }
-
- ret = sent;
-out:
- BUG_ON(adv_credits);
- return ret;
-}
-
-static int rds_iw_build_send_reg(struct rds_iw_send_work *send,
- struct scatterlist *sg,
- int sg_nents)
-{
- int n;
-
- n = ib_map_mr_sg(send->s_mr, sg, sg_nents, PAGE_SIZE);
- if (unlikely(n != sg_nents))
- return n < 0 ? n : -EINVAL;
-
- send->s_reg_wr.wr.opcode = IB_WR_REG_MR;
- send->s_reg_wr.wr.wr_id = 0;
- send->s_reg_wr.wr.num_sge = 0;
- send->s_reg_wr.mr = send->s_mr;
- send->s_reg_wr.key = send->s_mr->rkey;
- send->s_reg_wr.access = IB_ACCESS_REMOTE_WRITE;
-
- ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
-
- return 0;
-}
-
-int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
- struct rds_iw_send_work *send = NULL;
- struct rds_iw_send_work *first;
- struct rds_iw_send_work *prev;
- struct ib_send_wr *failed_wr;
- struct rds_iw_device *rds_iwdev;
- struct scatterlist *scat;
- unsigned long len;
- u64 remote_addr = op->op_remote_addr;
- u32 pos, fr_pos;
- u32 work_alloc;
- u32 i;
- u32 j;
- int sent;
- int ret;
- int num_sge;
- int sg_nents;
-
- rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
-
- /* map the message the first time we see it */
- if (!op->op_mapped) {
- op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
- op->op_sg, op->op_nents, (op->op_write) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
- if (op->op_count == 0) {
- rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
- ret = -ENOMEM; /* XXX ? */
- goto out;
- }
-
- op->op_mapped = 1;
- }
-
- if (!op->op_write) {
- /* Alloc space on the send queue for the fastreg */
- work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
- if (work_alloc != 1) {
- rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
- rds_iw_stats_inc(s_iw_tx_ring_full);
- ret = -ENOMEM;
- goto out;
- }
- }
-
- /*
- * Instead of knowing how to return a partial rdma read/write we insist that there
- * be enough work requests to send the entire message.
- */
- i = ceil(op->op_count, rds_iwdev->max_sge);
-
- work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
- if (work_alloc != i) {
- rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
- rds_iw_stats_inc(s_iw_tx_ring_full);
- ret = -ENOMEM;
- goto out;
- }
-
- send = &ic->i_sends[pos];
- if (!op->op_write) {
- first = prev = &ic->i_sends[fr_pos];
- } else {
- first = send;
- prev = NULL;
- }
- scat = &op->op_sg[0];
- sent = 0;
- num_sge = op->op_count;
- sg_nents = 0;
-
- for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
- send->s_rdma_wr.wr.send_flags = 0;
- send->s_queued = jiffies;
-
- /*
- * We want to delay signaling completions just enough to get
- * the batching benefits but not so much that we create dead time on the wire.
- */
- if (ic->i_unsignaled_wrs-- == 0) {
- ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
- send->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED;
- }
-
- /* To avoid the need to have the plumbing to invalidate the fastreg_mr used
- * for local access after RDS is finished with it, using
- * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
- */
- if (op->op_write)
- send->s_rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
- else
- send->s_rdma_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
-
- send->s_rdma_wr.remote_addr = remote_addr;
- send->s_rdma_wr.rkey = op->op_rkey;
- send->s_op = op;
-
- if (num_sge > rds_iwdev->max_sge) {
- send->s_rdma_wr.wr.num_sge = rds_iwdev->max_sge;
- num_sge -= rds_iwdev->max_sge;
- } else
- send->s_rdma_wr.wr.num_sge = num_sge;
-
- send->s_rdma_wr.wr.next = NULL;
-
- if (prev)
- prev->s_send_wr.next = &send->s_rdma_wr.wr;
-
- for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
- scat != &op->op_sg[op->op_count]; j++) {
- len = ib_sg_dma_len(ic->i_cm_id->device, scat);
-
- if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV)
- sg_nents++;
- else {
- send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat);
- send->s_sge[j].length = len;
- send->s_sge[j].lkey = rds_iw_local_dma_lkey(ic);
- }
-
- sent += len;
- rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
- remote_addr += len;
-
- scat++;
- }
-
- if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV) {
- send->s_rdma_wr.wr.num_sge = 1;
- send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr;
- send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes;
- send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey;
- }
-
- rdsdebug("send %p wr %p num_sge %u next %p\n", send,
- &send->s_rdma_wr,
- send->s_rdma_wr.wr.num_sge,
- send->s_rdma_wr.wr.next);
-
- prev = send;
- if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
- send = ic->i_sends;
- }
-
- /* if we finished the message then send completion owns it */
- if (scat == &op->op_sg[op->op_count])
- first->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED;
-
- if (i < work_alloc) {
- rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i);
- work_alloc = i;
- }
-
- /* On iWARP, local memory access by a remote system (ie, RDMA Read) is not
- * recommended. Putting the lkey on the wire is a security hole, as it can
- * allow for memory access to all of memory on the remote system. Some
- * adapters do not allow using the lkey for this at all. To bypass this use a
- * fastreg_mr (or possibly a dma_mr)
- */
- if (!op->op_write) {
- ret = rds_iw_build_send_reg(&ic->i_sends[fr_pos],
- &op->op_sg[0], sg_nents);
- if (ret) {
- printk(KERN_WARNING "RDS/IW: failed to reg send mem\n");
- goto out;
- }
- work_alloc++;
- }
-
- failed_wr = &first->s_rdma_wr.wr;
- ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
- rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
- first, &first->s_rdma_wr, ret, failed_wr);
- BUG_ON(failed_wr != &first->s_rdma_wr.wr);
- if (ret) {
- printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 "
- "returned %d\n", &conn->c_faddr, ret);
- rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
- goto out;
- }
-
-out:
- return ret;
-}
-
-void rds_iw_xmit_complete(struct rds_connection *conn)
-{
- struct rds_iw_connection *ic = conn->c_transport_data;
-
- /* We may have a pending ACK or window update we were unable
- * to send previously (due to flow control). Try again. */
- rds_iw_attempt_ack(ic);
-}
diff --git a/net/rds/iw_stats.c b/net/rds/iw_stats.c
deleted file mode 100644
index 5fe67f6a1d80..000000000000
--- a/net/rds/iw_stats.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2006 Oracle. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/percpu.h>
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
-
-#include "rds.h"
-#include "iw.h"
-
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_iw_statistics, rds_iw_stats);
-
-static const char *const rds_iw_stat_names[] = {
- "iw_connect_raced",
- "iw_listen_closed_stale",
- "iw_tx_cq_call",
- "iw_tx_cq_event",
- "iw_tx_ring_full",
- "iw_tx_throttle",
- "iw_tx_sg_mapping_failure",
- "iw_tx_stalled",
- "iw_tx_credit_updates",
- "iw_rx_cq_call",
- "iw_rx_cq_event",
- "iw_rx_ring_empty",
- "iw_rx_refill_from_cq",
- "iw_rx_refill_from_thread",
- "iw_rx_alloc_limit",
- "iw_rx_credit_updates",
- "iw_ack_sent",
- "iw_ack_send_failure",
- "iw_ack_send_delayed",
- "iw_ack_send_piggybacked",
- "iw_ack_received",
- "iw_rdma_mr_alloc",
- "iw_rdma_mr_free",
- "iw_rdma_mr_used",
- "iw_rdma_mr_pool_flush",
- "iw_rdma_mr_pool_wait",
- "iw_rdma_mr_pool_depleted",
-};
-
-unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter,
- unsigned int avail)
-{
- struct rds_iw_statistics stats = {0, };
- uint64_t *src;
- uint64_t *sum;
- size_t i;
- int cpu;
-
- if (avail < ARRAY_SIZE(rds_iw_stat_names))
- goto out;
-
- for_each_online_cpu(cpu) {
- src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu));
- sum = (uint64_t *)&stats;
- for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++)
- *(sum++) += *(src++);
- }
-
- rds_stats_info_copy(iter, (uint64_t *)&stats, rds_iw_stat_names,
- ARRAY_SIZE(rds_iw_stat_names));
-out:
- return ARRAY_SIZE(rds_iw_stat_names);
-}
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
deleted file mode 100644
index 139239d2cb22..000000000000
--- a/net/rds/iw_sysctl.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2006 Oracle. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/kernel.h>
-#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-
-#include "iw.h"
-
-static struct ctl_table_header *rds_iw_sysctl_hdr;
-
-unsigned long rds_iw_sysctl_max_send_wr = RDS_IW_DEFAULT_SEND_WR;
-unsigned long rds_iw_sysctl_max_recv_wr = RDS_IW_DEFAULT_RECV_WR;
-unsigned long rds_iw_sysctl_max_recv_allocation = (128 * 1024 * 1024) / RDS_FRAG_SIZE;
-static unsigned long rds_iw_sysctl_max_wr_min = 1;
-/* hardware will fail CQ creation long before this */
-static unsigned long rds_iw_sysctl_max_wr_max = (u32)~0;
-
-unsigned long rds_iw_sysctl_max_unsig_wrs = 16;
-static unsigned long rds_iw_sysctl_max_unsig_wr_min = 1;
-static unsigned long rds_iw_sysctl_max_unsig_wr_max = 64;
-
-unsigned long rds_iw_sysctl_max_unsig_bytes = (16 << 20);
-static unsigned long rds_iw_sysctl_max_unsig_bytes_min = 1;
-static unsigned long rds_iw_sysctl_max_unsig_bytes_max = ~0UL;
-
-unsigned int rds_iw_sysctl_flow_control = 1;
-
-static struct ctl_table rds_iw_sysctl_table[] = {
- {
- .procname = "max_send_wr",
- .data = &rds_iw_sysctl_max_send_wr,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_doulongvec_minmax,
- .extra1 = &rds_iw_sysctl_max_wr_min,
- .extra2 = &rds_iw_sysctl_max_wr_max,
- },
- {
- .procname = "max_recv_wr",
- .data = &rds_iw_sysctl_max_recv_wr,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_doulongvec_minmax,
- .extra1 = &rds_iw_sysctl_max_wr_min,
- .extra2 = &rds_iw_sysctl_max_wr_max,
- },
- {
- .procname = "max_unsignaled_wr",
- .data = &rds_iw_sysctl_max_unsig_wrs,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_doulongvec_minmax,
- .extra1 = &rds_iw_sysctl_max_unsig_wr_min,
- .extra2 = &rds_iw_sysctl_max_unsig_wr_max,
- },
- {
- .procname = "max_unsignaled_bytes",
- .data = &rds_iw_sysctl_max_unsig_bytes,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_doulongvec_minmax,
- .extra1 = &rds_iw_sysctl_max_unsig_bytes_min,
- .extra2 = &rds_iw_sysctl_max_unsig_bytes_max,
- },
- {
- .procname = "max_recv_allocation",
- .data = &rds_iw_sysctl_max_recv_allocation,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_doulongvec_minmax,
- },
- {
- .procname = "flow_control",
- .data = &rds_iw_sysctl_flow_control,
- .maxlen = sizeof(rds_iw_sysctl_flow_control),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-
-void rds_iw_sysctl_exit(void)
-{
- unregister_net_sysctl_table(rds_iw_sysctl_hdr);
-}
-
-int rds_iw_sysctl_init(void)
-{
- rds_iw_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/iw", rds_iw_sysctl_table);
- if (!rds_iw_sysctl_hdr)
- return -ENOMEM;
- return 0;
-}
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 9c1fed81bf0f..7220bebcf558 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -49,9 +49,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id,
event->event, rdma_event_msg(event->event));
- if (cm_id->device->node_type == RDMA_NODE_RNIC)
- trans = &rds_iw_transport;
- else
+ if (cm_id->device->node_type == RDMA_NODE_IB_CA)
trans = &rds_ib_transport;
/* Prevent shutdown from tearing down the connection
@@ -119,6 +117,14 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
rds_conn_drop(conn);
break;
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ if (conn) {
+ pr_info("RDS: RDMA_CM_EVENT_TIMEWAIT_EXIT event: dropping connection %pI4->%pI4\n",
+ &conn->c_laddr, &conn->c_faddr);
+ rds_conn_drop(conn);
+ }
+ break;
+
default:
/* things like device disconnect? */
printk(KERN_ERR "RDS: unknown event %u (%s)!\n",
@@ -200,10 +206,6 @@ static int rds_rdma_init(void)
if (ret)
goto out;
- ret = rds_iw_init();
- if (ret)
- goto err_iw_init;
-
ret = rds_ib_init();
if (ret)
goto err_ib_init;
@@ -211,8 +213,6 @@ static int rds_rdma_init(void)
goto out;
err_ib_init:
- rds_iw_exit();
-err_iw_init:
rds_rdma_listen_stop();
out:
return ret;
@@ -224,11 +224,10 @@ static void rds_rdma_exit(void)
/* stop listening first to ensure no new connections are attempted */
rds_rdma_listen_stop();
rds_ib_exit();
- rds_iw_exit();
}
module_exit(rds_rdma_exit);
MODULE_AUTHOR("Oracle Corporation <[email protected]>");
-MODULE_DESCRIPTION("RDS: IB/iWARP transport");
+MODULE_DESCRIPTION("RDS: IB transport");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/net/rds/rdma_transport.h b/net/rds/rdma_transport.h
index faba4e382695..ff2010e9d20c 100644
--- a/net/rds/rdma_transport.h
+++ b/net/rds/rdma_transport.h
@@ -16,9 +16,4 @@ extern struct rds_transport rds_ib_transport;
int rds_ib_init(void);
void rds_ib_exit(void);
-/* from iw.c */
-extern struct rds_transport rds_iw_transport;
-int rds_iw_init(void);
-void rds_iw_exit(void);
-
#endif
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 0e2797bdc316..80256b08eac0 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -222,6 +222,7 @@ struct rds_incoming {
__be32 i_saddr;
rds_rdma_cookie_t i_rdma_cookie;
+ struct timeval i_rx_tstamp;
};
struct rds_mr {
diff --git a/net/rds/recv.c b/net/rds/recv.c
index a00462b0d01d..c0be1ecd11c9 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -35,6 +35,8 @@
#include <net/sock.h>
#include <linux/in.h>
#include <linux/export.h>
+#include <linux/time.h>
+#include <linux/rds.h>
#include "rds.h"
@@ -46,6 +48,8 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
inc->i_conn = conn;
inc->i_saddr = saddr;
inc->i_rdma_cookie = 0;
+ inc->i_rx_tstamp.tv_sec = 0;
+ inc->i_rx_tstamp.tv_usec = 0;
}
EXPORT_SYMBOL_GPL(rds_inc_init);
@@ -228,6 +232,8 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
+ if (sock_flag(sk, SOCK_RCVTSTAMP))
+ do_gettimeofday(&inc->i_rx_tstamp);
rds_inc_addref(inc);
list_add_tail(&inc->i_item, &rs->rs_recv_queue);
__rds_wake_sk_sleep(sk);
@@ -381,7 +387,8 @@ static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
/*
* Receive any control messages.
*/
-static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg)
+static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
+ struct rds_sock *rs)
{
int ret = 0;
@@ -392,6 +399,15 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg)
return ret;
}
+ if ((inc->i_rx_tstamp.tv_sec != 0) &&
+ sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
+ ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
+ sizeof(struct timeval),
+ &inc->i_rx_tstamp);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -474,7 +490,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
msg->msg_flags |= MSG_TRUNC;
}
- if (rds_cmsg_recv(inc, msg)) {
+ if (rds_cmsg_recv(inc, msg, rs)) {
ret = -EFAULT;
goto out;
}
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 598d374f6a35..868f1ad0415a 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -41,5 +41,4 @@ config RFKILL_GPIO
default n
help
If you say yes here you get support of a generic gpio RFKILL
- driver. The platform should fill in the appropriate fields in the
- rfkill_gpio_platform_data structure and pass that to the driver.
+ driver.
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index cf5b69ab1829..03f26e3a6f48 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -57,6 +57,8 @@ struct rfkill {
bool registered;
bool persistent;
+ bool polling_paused;
+ bool suspended;
const struct rfkill_ops *ops;
void *data;
@@ -233,29 +235,6 @@ static void rfkill_event(struct rfkill *rfkill)
rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
}
-static bool __rfkill_set_hw_state(struct rfkill *rfkill,
- bool blocked, bool *change)
-{
- unsigned long flags;
- bool prev, any;
-
- BUG_ON(!rfkill);
-
- spin_lock_irqsave(&rfkill->lock, flags);
- prev = !!(rfkill->state & RFKILL_BLOCK_HW);
- if (blocked)
- rfkill->state |= RFKILL_BLOCK_HW;
- else
- rfkill->state &= ~RFKILL_BLOCK_HW;
- *change = prev != blocked;
- any = !!(rfkill->state & RFKILL_BLOCK_ANY);
- spin_unlock_irqrestore(&rfkill->lock, flags);
-
- rfkill_led_trigger_event(rfkill);
-
- return any;
-}
-
/**
* rfkill_set_block - wrapper for set_block method
*
@@ -285,7 +264,7 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
spin_lock_irqsave(&rfkill->lock, flags);
prev = rfkill->state & RFKILL_BLOCK_SW;
- if (rfkill->state & RFKILL_BLOCK_SW)
+ if (prev)
rfkill->state |= RFKILL_BLOCK_SW_PREV;
else
rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
@@ -303,8 +282,8 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
spin_lock_irqsave(&rfkill->lock, flags);
if (err) {
/*
- * Failed -- reset status to _prev, this may be different
- * from what set set _PREV to earlier in this function
+ * Failed -- reset status to _PREV, which may be different
+ * from what we have set _PREV to earlier in this function
* if rfkill_set_sw_state was invoked.
*/
if (rfkill->state & RFKILL_BLOCK_SW_PREV)
@@ -323,6 +302,19 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
rfkill_event(rfkill);
}
+static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
+{
+ int i;
+
+ if (type != RFKILL_TYPE_ALL) {
+ rfkill_global_states[type].cur = blocked;
+ return;
+ }
+
+ for (i = 0; i < NUM_RFKILL_TYPES; i++)
+ rfkill_global_states[i].cur = blocked;
+}
+
#ifdef CONFIG_RFKILL_INPUT
static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
@@ -332,8 +324,7 @@ static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
* @blocked: the new state
*
* This function sets the state of all switches of given type,
- * unless a specific switch is claimed by userspace (in which case,
- * that switch is left alone) or suspended.
+ * unless a specific switch is suspended.
*
* Caller must have acquired rfkill_global_mutex.
*/
@@ -341,15 +332,7 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
{
struct rfkill *rfkill;
- if (type == RFKILL_TYPE_ALL) {
- int i;
-
- for (i = 0; i < NUM_RFKILL_TYPES; i++)
- rfkill_global_states[i].cur = blocked;
- } else {
- rfkill_global_states[type].cur = blocked;
- }
-
+ rfkill_update_global_state(type, blocked);
list_for_each_entry(rfkill, &rfkill_list, node) {
if (rfkill->type != type && type != RFKILL_TYPE_ALL)
continue;
@@ -477,17 +460,28 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
}
#endif
-
bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
{
- bool ret, change;
+ unsigned long flags;
+ bool ret, prev;
+
+ BUG_ON(!rfkill);
- ret = __rfkill_set_hw_state(rfkill, blocked, &change);
+ spin_lock_irqsave(&rfkill->lock, flags);
+ prev = !!(rfkill->state & RFKILL_BLOCK_HW);
+ if (blocked)
+ rfkill->state |= RFKILL_BLOCK_HW;
+ else
+ rfkill->state &= ~RFKILL_BLOCK_HW;
+ ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ rfkill_led_trigger_event(rfkill);
if (!rfkill->registered)
return ret;
- if (change)
+ if (prev != blocked)
schedule_work(&rfkill->uevent_work);
return ret;
@@ -582,6 +576,34 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
}
EXPORT_SYMBOL(rfkill_set_states);
+static const char * const rfkill_types[] = {
+ NULL, /* RFKILL_TYPE_ALL */
+ "wlan",
+ "bluetooth",
+ "ultrawideband",
+ "wimax",
+ "wwan",
+ "gps",
+ "fm",
+ "nfc",
+};
+
+enum rfkill_type rfkill_find_type(const char *name)
+{
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
+
+ if (!name)
+ return RFKILL_TYPE_ALL;
+
+ for (i = 1; i < NUM_RFKILL_TYPES; i++)
+ if (!strcmp(name, rfkill_types[i]))
+ return i;
+ return RFKILL_TYPE_ALL;
+}
+EXPORT_SYMBOL(rfkill_find_type);
+
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -591,38 +613,12 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(name);
-static const char *rfkill_get_type_str(enum rfkill_type type)
-{
- BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_NFC + 1);
-
- switch (type) {
- case RFKILL_TYPE_WLAN:
- return "wlan";
- case RFKILL_TYPE_BLUETOOTH:
- return "bluetooth";
- case RFKILL_TYPE_UWB:
- return "ultrawideband";
- case RFKILL_TYPE_WIMAX:
- return "wimax";
- case RFKILL_TYPE_WWAN:
- return "wwan";
- case RFKILL_TYPE_GPS:
- return "gps";
- case RFKILL_TYPE_FM:
- return "fm";
- case RFKILL_TYPE_NFC:
- return "nfc";
- default:
- BUG();
- }
-}
-
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
+ return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
}
static DEVICE_ATTR_RO(type);
@@ -730,20 +726,12 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(state);
-static ssize_t claim_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", 0);
-}
-static DEVICE_ATTR_RO(claim);
-
static struct attribute *rfkill_dev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_type.attr,
&dev_attr_index.attr,
&dev_attr_persistent.attr,
&dev_attr_state.attr,
- &dev_attr_claim.attr,
&dev_attr_soft.attr,
&dev_attr_hard.attr,
NULL,
@@ -768,7 +756,7 @@ static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
if (error)
return error;
error = add_uevent_var(env, "RFKILL_TYPE=%s",
- rfkill_get_type_str(rfkill->type));
+ rfkill_types[rfkill->type]);
if (error)
return error;
spin_lock_irqsave(&rfkill->lock, flags);
@@ -786,6 +774,7 @@ void rfkill_pause_polling(struct rfkill *rfkill)
if (!rfkill->ops->poll)
return;
+ rfkill->polling_paused = true;
cancel_delayed_work_sync(&rfkill->poll_work);
}
EXPORT_SYMBOL(rfkill_pause_polling);
@@ -797,6 +786,11 @@ void rfkill_resume_polling(struct rfkill *rfkill)
if (!rfkill->ops->poll)
return;
+ rfkill->polling_paused = false;
+
+ if (rfkill->suspended)
+ return;
+
queue_delayed_work(system_power_efficient_wq,
&rfkill->poll_work, 0);
}
@@ -807,7 +801,8 @@ static int rfkill_suspend(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
- rfkill_pause_polling(rfkill);
+ rfkill->suspended = true;
+ cancel_delayed_work_sync(&rfkill->poll_work);
return 0;
}
@@ -817,12 +812,16 @@ static int rfkill_resume(struct device *dev)
struct rfkill *rfkill = to_rfkill(dev);
bool cur;
+ rfkill->suspended = false;
+
if (!rfkill->persistent) {
cur = !!(rfkill->state & RFKILL_BLOCK_SW);
rfkill_set_block(rfkill, cur);
}
- rfkill_resume_polling(rfkill);
+ if (rfkill->ops->poll && !rfkill->polling_paused)
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work, 0);
return 0;
}
@@ -1164,15 +1163,8 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
mutex_lock(&rfkill_global_mutex);
- if (ev.op == RFKILL_OP_CHANGE_ALL) {
- if (ev.type == RFKILL_TYPE_ALL) {
- enum rfkill_type i;
- for (i = 0; i < NUM_RFKILL_TYPES; i++)
- rfkill_global_states[i].cur = ev.soft;
- } else {
- rfkill_global_states[ev.type].cur = ev.soft;
- }
- }
+ if (ev.op == RFKILL_OP_CHANGE_ALL)
+ rfkill_update_global_state(ev.type, ev.soft);
list_for_each_entry(rfkill, &rfkill_list, node) {
if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
@@ -1261,10 +1253,8 @@ static struct miscdevice rfkill_miscdev = {
static int __init rfkill_init(void)
{
int error;
- int i;
- for (i = 0; i < NUM_RFKILL_TYPES; i++)
- rfkill_global_states[i].cur = !rfkill_default_state;
+ rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
error = class_register(&rfkill_class);
if (error)
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 4b1e3f35f06c..76c01cbd56e3 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -27,8 +27,6 @@
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
-#include <linux/rfkill-gpio.h>
-
struct rfkill_gpio_data {
const char *name;
enum rfkill_type type;
@@ -81,7 +79,6 @@ static int rfkill_gpio_acpi_probe(struct device *dev,
if (!id)
return -ENODEV;
- rfkill->name = dev_name(dev);
rfkill->type = (unsigned)id->driver_data;
return acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
@@ -90,24 +87,27 @@ static int rfkill_gpio_acpi_probe(struct device *dev,
static int rfkill_gpio_probe(struct platform_device *pdev)
{
- struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
struct rfkill_gpio_data *rfkill;
struct gpio_desc *gpio;
+ const char *type_name;
int ret;
rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
if (!rfkill)
return -ENOMEM;
+ device_property_read_string(&pdev->dev, "name", &rfkill->name);
+ device_property_read_string(&pdev->dev, "type", &type_name);
+
+ if (!rfkill->name)
+ rfkill->name = dev_name(&pdev->dev);
+
+ rfkill->type = rfkill_find_type(type_name);
+
if (ACPI_HANDLE(&pdev->dev)) {
ret = rfkill_gpio_acpi_probe(&pdev->dev, rfkill);
if (ret)
return ret;
- } else if (pdata) {
- rfkill->name = pdata->name;
- rfkill->type = pdata->type;
- } else {
- return -ENODEV;
}
rfkill->clk = devm_clk_get(&pdev->dev, NULL);
@@ -124,10 +124,8 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
rfkill->shutdown_gpio = gpio;
- /* Make sure at-least one of the GPIO is defined and that
- * a name is specified for this instance
- */
- if ((!rfkill->reset_gpio && !rfkill->shutdown_gpio) || !rfkill->name) {
+ /* Make sure at-least one GPIO is defined for this instance */
+ if (!rfkill->reset_gpio && !rfkill->shutdown_gpio) {
dev_err(&pdev->dev, "invalid platform data\n");
return -EINVAL;
}
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 7e2d1057d8bc..a76501757b59 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -37,7 +37,7 @@ static struct proto rxrpc_proto;
static const struct proto_ops rxrpc_rpc_ops;
/* local epoch for detecting local-end reset */
-__be32 rxrpc_epoch;
+u32 rxrpc_epoch;
/* current debugging ID */
atomic_t rxrpc_debug_id;
@@ -81,6 +81,8 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
struct sockaddr_rxrpc *srx,
int len)
{
+ unsigned tail;
+
if (len < sizeof(struct sockaddr_rxrpc))
return -EINVAL;
@@ -103,9 +105,7 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
_debug("INET: %x @ %pI4",
ntohs(srx->transport.sin.sin_port),
&srx->transport.sin.sin_addr);
- if (srx->transport_len > 8)
- memset((void *)&srx->transport + 8, 0,
- srx->transport_len - 8);
+ tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad);
break;
case AF_INET6:
@@ -113,6 +113,8 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
return -EAFNOSUPPORT;
}
+ if (tail < len)
+ memset((void *)srx + tail, 0, len - tail);
return 0;
}
@@ -121,11 +123,10 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
*/
static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
{
- struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr;
+ struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
struct sock *sk = sock->sk;
struct rxrpc_local *local;
struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
- __be16 service_id;
int ret;
_enter("%p,%p,%d", rx, saddr, len);
@@ -143,7 +144,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
memcpy(&rx->srx, srx, sizeof(rx->srx));
- /* find a local transport endpoint if we don't have one already */
+ /* Find or create a local transport endpoint to use */
local = rxrpc_lookup_local(&rx->srx);
if (IS_ERR(local)) {
ret = PTR_ERR(local);
@@ -152,14 +153,12 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
rx->local = local;
if (srx->srx_service) {
- service_id = htons(srx->srx_service);
write_lock_bh(&local->services_lock);
list_for_each_entry(prx, &local->services, listen_link) {
- if (prx->service_id == service_id)
+ if (prx->srx.srx_service == srx->srx_service)
goto service_in_use;
}
- rx->service_id = service_id;
list_add_tail(&rx->listen_link, &local->services);
write_unlock_bh(&local->services_lock);
@@ -276,7 +275,6 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
struct rxrpc_transport *trans;
struct rxrpc_call *call;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
- __be16 service_id;
_enter(",,%x,%lx", key_serial(key), user_call_ID);
@@ -299,16 +297,14 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
atomic_inc(&trans->usage);
}
- service_id = rx->service_id;
- if (srx)
- service_id = htons(srx->srx_service);
-
+ if (!srx)
+ srx = &rx->srx;
if (!key)
key = rx->key;
if (key && !key->payload.data[0])
key = NULL; /* a no-security key */
- bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp);
+ bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, gfp);
if (IS_ERR(bundle)) {
call = ERR_CAST(bundle);
goto out;
@@ -324,7 +320,6 @@ out_notrans:
_leave(" = %p", call);
return call;
}
-
EXPORT_SYMBOL(rxrpc_kernel_begin_call);
/**
@@ -340,7 +335,6 @@ void rxrpc_kernel_end_call(struct rxrpc_call *call)
rxrpc_remove_user_ID(call->socket, call);
rxrpc_put_call(call);
}
-
EXPORT_SYMBOL(rxrpc_kernel_end_call);
/**
@@ -425,7 +419,6 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
}
rx->trans = trans;
- rx->service_id = htons(srx->srx_service);
rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
release_sock(&rx->sk);
@@ -622,7 +615,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
- /* we support transport protocol UDP only */
+ /* we support transport protocol UDP/UDP6 only */
if (protocol != PF_INET)
return -EPROTONOSUPPORT;
@@ -754,7 +747,7 @@ static int rxrpc_release(struct socket *sock)
* RxRPC network protocol
*/
static const struct proto_ops rxrpc_rpc_ops = {
- .family = PF_UNIX,
+ .family = PF_RXRPC,
.owner = THIS_MODULE,
.release = rxrpc_release,
.bind = rxrpc_bind,
@@ -778,7 +771,7 @@ static struct proto rxrpc_proto = {
.name = "RXRPC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rxrpc_sock),
- .max_header = sizeof(struct rxrpc_header),
+ .max_header = sizeof(struct rxrpc_wire_header),
};
static const struct net_proto_family rxrpc_family_ops = {
@@ -796,7 +789,7 @@ static int __init af_rxrpc_init(void)
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
- rxrpc_epoch = htonl(get_seconds());
+ rxrpc_epoch = get_seconds();
ret = -ENOMEM;
rxrpc_call_jar = kmem_cache_create(
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index 6d79310fcaae..277731a5e67a 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -27,7 +27,7 @@
* generate a connection-level abort
*/
static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
- struct rxrpc_header *hdr)
+ struct rxrpc_wire_header *whdr)
{
struct msghdr msg;
struct kvec iov[1];
@@ -36,25 +36,21 @@ static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
_enter("%d,,", local->debug_id);
+ whdr->type = RXRPC_PACKET_TYPE_BUSY;
+ whdr->serial = htonl(1);
+
msg.msg_name = &srx->transport.sin;
msg.msg_namelen = sizeof(srx->transport.sin);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
- hdr->seq = 0;
- hdr->type = RXRPC_PACKET_TYPE_BUSY;
- hdr->flags = 0;
- hdr->userStatus = 0;
- hdr->_rsvd = 0;
-
- iov[0].iov_base = hdr;
- iov[0].iov_len = sizeof(*hdr);
+ iov[0].iov_base = whdr;
+ iov[0].iov_len = sizeof(*whdr);
len = iov[0].iov_len;
- hdr->serial = htonl(1);
- _proto("Tx BUSY %%%u", ntohl(hdr->serial));
+ _proto("Tx BUSY %%1");
ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
if (ret < 0) {
@@ -185,8 +181,8 @@ invalid_service:
read_unlock_bh(&local->services_lock);
read_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) {
+ if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
rxrpc_get_call(call);
rxrpc_queue_call(call);
}
@@ -211,8 +207,8 @@ void rxrpc_accept_incoming_calls(struct work_struct *work)
struct rxrpc_skb_priv *sp;
struct sockaddr_rxrpc srx;
struct rxrpc_sock *rx;
+ struct rxrpc_wire_header whdr;
struct sk_buff *skb;
- __be16 service_id;
int ret;
_enter("%d", local->debug_id);
@@ -240,6 +236,19 @@ process_next_packet:
sp = rxrpc_skb(skb);
+ /* Set up a response packet header in case we need it */
+ whdr.epoch = htonl(sp->hdr.epoch);
+ whdr.cid = htonl(sp->hdr.cid);
+ whdr.callNumber = htonl(sp->hdr.callNumber);
+ whdr.seq = htonl(sp->hdr.seq);
+ whdr.serial = 0;
+ whdr.flags = 0;
+ whdr.type = 0;
+ whdr.userStatus = 0;
+ whdr.securityIndex = sp->hdr.securityIndex;
+ whdr._rsvd = 0;
+ whdr.serviceId = htons(sp->hdr.serviceId);
+
/* determine the remote address */
memset(&srx, 0, sizeof(srx));
srx.srx_family = AF_RXRPC;
@@ -256,10 +265,9 @@ process_next_packet:
}
/* get the socket providing the service */
- service_id = sp->hdr.serviceId;
read_lock_bh(&local->services_lock);
list_for_each_entry(rx, &local->services, listen_link) {
- if (rx->service_id == service_id &&
+ if (rx->srx.srx_service == sp->hdr.serviceId &&
rx->sk.sk_state != RXRPC_CLOSE)
goto found_service;
}
@@ -267,7 +275,7 @@ process_next_packet:
goto invalid_service;
found_service:
- _debug("found service %hd", ntohs(rx->service_id));
+ _debug("found service %hd", rx->srx.srx_service);
if (sk_acceptq_is_full(&rx->sk))
goto backlog_full;
sk_acceptq_added(&rx->sk);
@@ -296,7 +304,7 @@ found_service:
backlog_full:
read_unlock_bh(&local->services_lock);
busy:
- rxrpc_busy(local, &srx, &sp->hdr);
+ rxrpc_busy(local, &srx, &whdr);
rxrpc_free_skb(skb);
goto process_next_packet;
@@ -379,7 +387,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
rb_insert_color(&call->sock_node, &rx->calls);
if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
BUG();
- if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events))
+ if (test_and_set_bit(RXRPC_CALL_EV_ACCEPTED, &call->events))
BUG();
rxrpc_queue_call(call);
@@ -395,7 +403,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
out_release:
_debug("release %p", call);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
out_discard:
write_unlock_bh(&call->state_lock);
@@ -407,7 +415,7 @@ out:
}
/*
- * handle rejectance of a call by userspace
+ * Handle rejection of a call by userspace
* - reject the call at the front of the queue
*/
int rxrpc_reject_call(struct rxrpc_sock *rx)
@@ -434,7 +442,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
switch (call->state) {
case RXRPC_CALL_SERVER_ACCEPTING:
call->state = RXRPC_CALL_SERVER_BUSY;
- if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events))
+ if (test_and_set_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events))
rxrpc_queue_call(call);
ret = 0;
goto out_release;
@@ -458,7 +466,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
out_release:
_debug("release %p", call);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
out_discard:
write_unlock_bh(&call->state_lock);
@@ -487,7 +495,6 @@ struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
_leave(" = %p", call);
return call;
}
-
EXPORT_SYMBOL(rxrpc_kernel_accept_call);
/**
@@ -506,5 +513,4 @@ int rxrpc_kernel_reject_call(struct socket *sock)
_leave(" = %d", ret);
return ret;
}
-
EXPORT_SYMBOL(rxrpc_kernel_reject_call);
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index adc555e0323d..20f3f001694e 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -91,7 +91,7 @@ static const s8 rxrpc_ack_priority[] = {
* propose an ACK be sent
*/
void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- __be32 serial, bool immediate)
+ u32 serial, bool immediate)
{
unsigned long expiry;
s8 prior = rxrpc_ack_priority[ack_reason];
@@ -99,8 +99,7 @@ void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
ASSERTCMP(prior, >, 0);
_enter("{%d},%s,%%%x,%u",
- call->debug_id, rxrpc_acks(ack_reason), ntohl(serial),
- immediate);
+ call->debug_id, rxrpc_acks(ack_reason), serial, immediate);
if (prior < rxrpc_ack_priority[call->ackr_reason]) {
if (immediate)
@@ -139,7 +138,7 @@ void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
expiry = rxrpc_requested_ack_delay;
if (!expiry)
goto cancel_timer;
- if (!immediate || serial == cpu_to_be32(1)) {
+ if (!immediate || serial == 1) {
_debug("run defer timer");
goto run_timer;
}
@@ -157,11 +156,11 @@ run_timer:
return;
cancel_timer:
- _debug("cancel timer %%%u", ntohl(serial));
+ _debug("cancel timer %%%u", serial);
try_to_del_timer_sync(&call->ack_timer);
read_lock_bh(&call->state_lock);
if (call->state <= RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
@@ -170,7 +169,7 @@ cancel_timer:
* propose an ACK be sent, locking the call structure
*/
void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- __be32 serial, bool immediate)
+ u32 serial, bool immediate)
{
s8 prior = rxrpc_ack_priority[ack_reason];
@@ -193,7 +192,7 @@ static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
if (resend & 1) {
_debug("SET RESEND");
- set_bit(RXRPC_CALL_RESEND, &call->events);
+ set_bit(RXRPC_CALL_EV_RESEND, &call->events);
}
if (resend & 2) {
@@ -203,7 +202,7 @@ static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
} else {
_debug("KILL RESEND TIMER");
del_timer_sync(&call->resend_timer);
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
}
read_unlock_bh(&call->state_lock);
@@ -214,8 +213,8 @@ static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
*/
static void rxrpc_resend(struct rxrpc_call *call)
{
+ struct rxrpc_wire_header *whdr;
struct rxrpc_skb_priv *sp;
- struct rxrpc_header *hdr;
struct sk_buff *txb;
unsigned long *p_txb, resend_at;
bool stop;
@@ -247,14 +246,13 @@ static void rxrpc_resend(struct rxrpc_call *call)
sp->need_resend = false;
/* each Tx packet has a new serial number */
- sp->hdr.serial =
- htonl(atomic_inc_return(&call->conn->serial));
+ sp->hdr.serial = atomic_inc_return(&call->conn->serial);
- hdr = (struct rxrpc_header *) txb->head;
- hdr->serial = sp->hdr.serial;
+ whdr = (struct rxrpc_wire_header *)txb->head;
+ whdr->serial = htonl(sp->hdr.serial);
_proto("Tx DATA %%%u { #%d }",
- ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
+ sp->hdr.serial, sp->hdr.seq);
if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
stop = true;
sp->resend_at = jiffies + 3;
@@ -428,7 +426,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
int tail = call->acks_tail, old_tail;
int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
- _enter("{%u,%u},%u", call->acks_hard, win, hard);
+ kenter("{%u,%u},%u", call->acks_hard, win, hard);
ASSERTCMP(hard - call->acks_hard, <=, win);
@@ -478,11 +476,11 @@ static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
sp = rxrpc_skb(skb);
_debug("drain OOS packet %d [%d]",
- ntohl(sp->hdr.seq), call->rx_first_oos);
+ sp->hdr.seq, call->rx_first_oos);
- if (ntohl(sp->hdr.seq) != call->rx_first_oos) {
+ if (sp->hdr.seq != call->rx_first_oos) {
skb_queue_head(&call->rx_oos_queue, skb);
- call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq);
+ call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
_debug("requeue %p {%u}", skb, call->rx_first_oos);
} else {
skb->mark = RXRPC_SKB_MARK_DATA;
@@ -496,8 +494,7 @@ static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
/* find out what the next packet is */
skb = skb_peek(&call->rx_oos_queue);
if (skb)
- call->rx_first_oos =
- ntohl(rxrpc_skb(skb)->hdr.seq);
+ call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
else
call->rx_first_oos = 0;
_debug("peek %p {%u}", skb, call->rx_first_oos);
@@ -522,7 +519,7 @@ static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
u32 seq;
sp = rxrpc_skb(skb);
- seq = ntohl(sp->hdr.seq);
+ seq = sp->hdr.seq;
_enter(",,{%u}", seq);
skb->destructor = rxrpc_packet_destructor;
@@ -535,9 +532,8 @@ static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
skb_queue_walk(&call->rx_oos_queue, p) {
psp = rxrpc_skb(p);
- if (ntohl(psp->hdr.seq) > seq) {
- _debug("insert oos #%u before #%u",
- seq, ntohl(psp->hdr.seq));
+ if (psp->hdr.seq > seq) {
+ _debug("insert oos #%u before #%u", seq, psp->hdr.seq);
skb_insert(p, skb, &call->rx_oos_queue);
goto inserted;
}
@@ -555,7 +551,7 @@ inserted:
if (call->state < RXRPC_CALL_COMPLETE &&
call->rx_data_post == call->rx_first_oos) {
_debug("drain rx oos now");
- set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
+ set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
}
read_unlock(&call->state_lock);
@@ -586,7 +582,7 @@ static void rxrpc_zap_tx_window(struct rxrpc_call *call)
skb = (struct sk_buff *) _skb;
sp = rxrpc_skb(skb);
- _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
+ _debug("+++ clear Tx %u", sp->hdr.seq);
rxrpc_free_skb(skb);
}
@@ -657,8 +653,7 @@ process_further:
/* data packets that wind up here have been received out of
* order, need security processing or are jumbo packets */
case RXRPC_PACKET_TYPE_DATA:
- _proto("OOSQ DATA %%%u { #%u }",
- ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
+ _proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
/* secured packets must be verified and possibly decrypted */
if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
@@ -676,7 +671,7 @@ process_further:
if (!skb_pull(skb, sizeof(ack)))
BUG();
- latest = ntohl(sp->hdr.serial);
+ latest = sp->hdr.serial;
hard = ntohl(ack.firstPacket);
tx = atomic_read(&call->sequence);
@@ -793,7 +788,7 @@ all_acked:
del_timer_sync(&call->resend_timer);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
if (call->acks_window)
rxrpc_zap_tx_window(call);
@@ -881,16 +876,17 @@ void rxrpc_process_call(struct work_struct *work)
{
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor);
+ struct rxrpc_wire_header whdr;
struct rxrpc_ackpacket ack;
struct rxrpc_ackinfo ackinfo;
- struct rxrpc_header hdr;
struct msghdr msg;
struct kvec iov[5];
+ enum rxrpc_call_event genbit;
unsigned long bits;
__be32 data, pad;
size_t len;
- int genbit, loop, nbit, ioc, ret, mtu;
- u32 abort_code = RX_PROTOCOL_ERROR;
+ int loop, nbit, ioc, ret, mtu;
+ u32 serial, abort_code = RX_PROTOCOL_ERROR;
u8 *acks = NULL;
//printk("\n--------------------\n");
@@ -911,33 +907,33 @@ void rxrpc_process_call(struct work_struct *work)
msg.msg_controllen = 0;
msg.msg_flags = 0;
- hdr.epoch = call->conn->epoch;
- hdr.cid = call->cid;
- hdr.callNumber = call->call_id;
- hdr.seq = 0;
- hdr.type = RXRPC_PACKET_TYPE_ACK;
- hdr.flags = call->conn->out_clientflag;
- hdr.userStatus = 0;
- hdr.securityIndex = call->conn->security_ix;
- hdr._rsvd = 0;
- hdr.serviceId = call->conn->service_id;
+ whdr.epoch = htonl(call->conn->epoch);
+ whdr.cid = htonl(call->cid);
+ whdr.callNumber = htonl(call->call_id);
+ whdr.seq = 0;
+ whdr.type = RXRPC_PACKET_TYPE_ACK;
+ whdr.flags = call->conn->out_clientflag;
+ whdr.userStatus = 0;
+ whdr.securityIndex = call->conn->security_ix;
+ whdr._rsvd = 0;
+ whdr.serviceId = htons(call->service_id);
memset(iov, 0, sizeof(iov));
- iov[0].iov_base = &hdr;
- iov[0].iov_len = sizeof(hdr);
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
/* deal with events of a final nature */
- if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
rxrpc_release_call(call);
- clear_bit(RXRPC_CALL_RELEASE, &call->events);
+ clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
}
- if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
int error;
- clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
- clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
+ clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
error = call->conn->trans->peer->net_error;
_debug("post net error %d", error);
@@ -945,47 +941,47 @@ void rxrpc_process_call(struct work_struct *work)
if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
error, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
goto kill_ACKs;
}
- if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events)) {
ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
- clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
+ clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
_debug("post conn abort");
if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
call->conn->error, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
goto kill_ACKs;
}
- if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
- hdr.type = RXRPC_PACKET_TYPE_BUSY;
- genbit = RXRPC_CALL_REJECT_BUSY;
+ if (test_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events)) {
+ whdr.type = RXRPC_PACKET_TYPE_BUSY;
+ genbit = RXRPC_CALL_EV_REJECT_BUSY;
goto send_message;
}
- if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
ECONNABORTED, true) < 0)
goto no_mem;
- hdr.type = RXRPC_PACKET_TYPE_ABORT;
+ whdr.type = RXRPC_PACKET_TYPE_ABORT;
data = htonl(call->abort_code);
iov[1].iov_base = &data;
iov[1].iov_len = sizeof(data);
- genbit = RXRPC_CALL_ABORT;
+ genbit = RXRPC_CALL_EV_ABORT;
goto send_message;
}
- if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
- genbit = RXRPC_CALL_ACK_FINAL;
+ if (test_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events)) {
+ genbit = RXRPC_CALL_EV_ACK_FINAL;
ack.bufferSpace = htons(8);
ack.maxSkew = 0;
@@ -995,9 +991,9 @@ void rxrpc_process_call(struct work_struct *work)
call->ackr_reason = 0;
spin_lock_bh(&call->lock);
- ack.serial = call->ackr_serial;
- ack.previousPacket = call->ackr_prev_seq;
- ack.firstPacket = htonl(call->rx_data_eaten + 1);
+ ack.serial = htonl(call->ackr_serial);
+ ack.previousPacket = htonl(call->ackr_prev_seq);
+ ack.firstPacket = htonl(call->rx_data_eaten + 1);
spin_unlock_bh(&call->lock);
pad = 0;
@@ -1011,12 +1007,12 @@ void rxrpc_process_call(struct work_struct *work)
goto send_ACK;
}
- if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
- (1 << RXRPC_CALL_RCVD_ABORT))
+ if (call->events & ((1 << RXRPC_CALL_EV_RCVD_BUSY) |
+ (1 << RXRPC_CALL_EV_RCVD_ABORT))
) {
u32 mark;
- if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
+ if (test_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events))
mark = RXRPC_SKB_MARK_REMOTE_ABORT;
else
mark = RXRPC_SKB_MARK_BUSY;
@@ -1026,22 +1022,22 @@ void rxrpc_process_call(struct work_struct *work)
if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
- clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
goto kill_ACKs;
}
- if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events)) {
_debug("do implicit ackall");
rxrpc_clear_tx_window(call);
}
- if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events)) {
write_lock_bh(&call->state_lock);
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_CALL_TIMEOUT;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
}
write_unlock_bh(&call->state_lock);
@@ -1050,7 +1046,7 @@ void rxrpc_process_call(struct work_struct *work)
ETIME, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
goto kill_ACKs;
}
@@ -1071,13 +1067,13 @@ void rxrpc_process_call(struct work_struct *work)
}
/* handle resending */
- if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
rxrpc_resend_timer(call);
- if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events))
rxrpc_resend(call);
/* consider sending an ordinary ACK */
- if (test_bit(RXRPC_CALL_ACK, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
_debug("send ACK: window: %d - %d { %lx }",
call->rx_data_eaten, call->ackr_win_top,
call->ackr_window[0]);
@@ -1085,11 +1081,11 @@ void rxrpc_process_call(struct work_struct *work)
if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
/* ACK by sending reply DATA packet in this state */
- clear_bit(RXRPC_CALL_ACK, &call->events);
+ clear_bit(RXRPC_CALL_EV_ACK, &call->events);
goto maybe_reschedule;
}
- genbit = RXRPC_CALL_ACK;
+ genbit = RXRPC_CALL_EV_ACK;
acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
GFP_NOFS);
@@ -1099,13 +1095,11 @@ void rxrpc_process_call(struct work_struct *work)
//hdr.flags = RXRPC_SLOW_START_OK;
ack.bufferSpace = htons(8);
ack.maxSkew = 0;
- ack.serial = 0;
- ack.reason = 0;
spin_lock_bh(&call->lock);
- ack.reason = call->ackr_reason;
- ack.serial = call->ackr_serial;
- ack.previousPacket = call->ackr_prev_seq;
+ ack.reason = call->ackr_reason;
+ ack.serial = htonl(call->ackr_serial);
+ ack.previousPacket = htonl(call->ackr_prev_seq);
ack.firstPacket = htonl(call->rx_data_eaten + 1);
ack.nAcks = 0;
@@ -1152,7 +1146,7 @@ void rxrpc_process_call(struct work_struct *work)
/* handle completion of security negotiations on an incoming
* connection */
- if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_SECURED, &call->events)) {
_debug("secured");
spin_lock_bh(&call->lock);
@@ -1160,7 +1154,7 @@ void rxrpc_process_call(struct work_struct *work)
_debug("securing");
write_lock(&call->conn->lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
+ !test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
_debug("not released");
call->state = RXRPC_CALL_SERVER_ACCEPTING;
list_move_tail(&call->accept_link,
@@ -1169,39 +1163,39 @@ void rxrpc_process_call(struct work_struct *work)
write_unlock(&call->conn->lock);
read_lock(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
+ set_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
read_unlock(&call->state_lock);
}
spin_unlock_bh(&call->lock);
- if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
+ if (!test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events))
goto maybe_reschedule;
}
/* post a notification of an acceptable connection to the app */
- if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events)) {
_debug("post accept");
if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
0, false) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
+ clear_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
goto maybe_reschedule;
}
/* handle incoming call acceptance */
- if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACCEPTED, &call->events)) {
_debug("accepted");
ASSERTCMP(call->rx_data_post, ==, 0);
call->rx_data_post = 1;
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
+ set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
read_unlock_bh(&call->state_lock);
}
/* drain the out of sequence received packet queue into the packet Rx
* queue */
- if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events)) {
while (call->rx_data_post == call->rx_first_oos)
if (rxrpc_drain_rx_oos_queue(call) < 0)
break;
@@ -1224,9 +1218,10 @@ send_ACK:
ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
+ serial = atomic_inc_return(&call->conn->serial);
+ whdr.serial = htonl(serial);
_proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
- ntohl(hdr.serial),
+ serial,
ntohs(ack.maxSkew),
ntohl(ack.firstPacket),
ntohl(ack.previousPacket),
@@ -1242,8 +1237,9 @@ send_ACK:
send_message:
_debug("send message");
- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
- _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
+ serial = atomic_inc_return(&call->conn->serial);
+ whdr.serial = htonl(serial);
+ _proto("Tx %s %%%u", rxrpc_pkts[whdr.type], serial);
send_message_2:
len = iov[0].iov_len;
@@ -1280,12 +1276,12 @@ send_message_2:
}
switch (genbit) {
- case RXRPC_CALL_ABORT:
+ case RXRPC_CALL_EV_ABORT:
clear_bit(genbit, &call->events);
- clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
goto kill_ACKs;
- case RXRPC_CALL_ACK_FINAL:
+ case RXRPC_CALL_EV_ACK_FINAL:
write_lock_bh(&call->state_lock);
if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
call->state = RXRPC_CALL_COMPLETE;
@@ -1310,9 +1306,9 @@ send_message_2:
kill_ACKs:
del_timer_sync(&call->ack_timer);
- if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events))
rxrpc_put_call(call);
- clear_bit(RXRPC_CALL_ACK, &call->events);
+ clear_bit(RXRPC_CALL_EV_ACK, &call->events);
maybe_reschedule:
if (call->events || !skb_queue_empty(&call->rx_queue)) {
@@ -1326,12 +1322,11 @@ maybe_reschedule:
if (call->state >= RXRPC_CALL_COMPLETE &&
!list_empty(&call->accept_link)) {
_debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
- call, call->events, call->flags,
- ntohl(call->conn->cid));
+ call, call->events, call->flags, call->conn->cid);
read_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
@@ -1345,7 +1340,7 @@ error:
* this means there's a race between clearing the flag and setting the
* work pending bit and the work item being processed again */
if (call->events && !work_pending(&call->processor)) {
- _debug("jumpstart %x", ntohl(call->conn->cid));
+ _debug("jumpstart %x", call->conn->cid);
rxrpc_queue_call(call);
}
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index a9e05db0f5d5..4a499e0100f1 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -28,7 +28,7 @@ unsigned rxrpc_max_call_lifetime = 60 * HZ;
*/
unsigned rxrpc_dead_call_expiry = 2 * HZ;
-const char *const rxrpc_call_states[] = {
+const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
[RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
@@ -64,11 +64,11 @@ static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
* Hash function for rxrpc_call_hash
*/
static unsigned long rxrpc_call_hashfunc(
- u8 clientflag,
- __be32 cid,
- __be32 call_id,
- __be32 epoch,
- __be16 service_id,
+ u8 in_clientflag,
+ u32 cid,
+ u32 call_id,
+ u32 epoch,
+ u16 service_id,
sa_family_t proto,
void *localptr,
unsigned int addr_size,
@@ -77,7 +77,6 @@ static unsigned long rxrpc_call_hashfunc(
const u16 *p;
unsigned int i;
unsigned long key;
- u32 hcid = ntohl(cid);
_enter("");
@@ -85,12 +84,12 @@ static unsigned long rxrpc_call_hashfunc(
/* We just want to add up the __be32 values, so forcing the
* cast should be okay.
*/
- key += (__force u32)epoch;
- key += (__force u16)service_id;
- key += (__force u32)call_id;
- key += (hcid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
- key += hcid & RXRPC_CHANNELMASK;
- key += clientflag;
+ key += epoch;
+ key += service_id;
+ key += call_id;
+ key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
+ key += cid & RXRPC_CHANNELMASK;
+ key += in_clientflag;
key += proto;
/* Step through the peer address in 16-bit portions for speed */
for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
@@ -148,19 +147,16 @@ static void rxrpc_call_hash_del(struct rxrpc_call *call)
* isn't there.
*/
struct rxrpc_call *rxrpc_find_call_hash(
- u8 clientflag,
- __be32 cid,
- __be32 call_id,
- __be32 epoch,
- __be16 service_id,
+ struct rxrpc_host_header *hdr,
void *localptr,
sa_family_t proto,
- const u8 *peer_addr)
+ const void *peer_addr)
{
unsigned long key;
unsigned int addr_size = 0;
struct rxrpc_call *call = NULL;
struct rxrpc_call *ret = NULL;
+ u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
_enter("");
switch (proto) {
@@ -174,20 +170,21 @@ struct rxrpc_call *rxrpc_find_call_hash(
break;
}
- key = rxrpc_call_hashfunc(clientflag, cid, call_id, epoch,
- service_id, proto, localptr, addr_size,
+ key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
+ hdr->epoch, hdr->serviceId,
+ proto, localptr, addr_size,
peer_addr);
hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
if (call->hash_key == key &&
- call->call_id == call_id &&
- call->cid == cid &&
- call->in_clientflag == clientflag &&
- call->service_id == service_id &&
+ call->call_id == hdr->callNumber &&
+ call->cid == hdr->cid &&
+ call->in_clientflag == in_clientflag &&
+ call->service_id == hdr->serviceId &&
call->proto == proto &&
call->local == localptr &&
memcmp(call->peer_ip.ipv6_addr, peer_addr,
- addr_size) == 0 &&
- call->epoch == epoch) {
+ addr_size) == 0 &&
+ call->epoch == hdr->epoch) {
ret = call;
break;
}
@@ -414,12 +411,12 @@ found_extant_second:
*/
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_connection *conn,
- struct rxrpc_header *hdr,
+ struct rxrpc_host_header *hdr,
gfp_t gfp)
{
struct rxrpc_call *call, *candidate;
struct rb_node **p, *parent;
- __be32 call_id;
+ u32 call_id;
_enter(",%d,,%x", conn->debug_id, gfp);
@@ -433,7 +430,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
candidate->conn = conn;
candidate->cid = hdr->cid;
candidate->call_id = hdr->callNumber;
- candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK;
+ candidate->channel = hdr->cid & RXRPC_CHANNELMASK;
candidate->rx_data_post = 0;
candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
if (conn->security_ix > 0)
@@ -452,7 +449,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
read_lock(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_LOCALLY_ABORTED:
- if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
+ if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
rxrpc_queue_call(call);
case RXRPC_CALL_REMOTELY_ABORTED:
read_unlock(&call->state_lock);
@@ -492,9 +489,9 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
/* The tree is sorted in order of the __be32 value without
* turning it into host order.
*/
- if ((__force u32)call_id < (__force u32)call->call_id)
+ if (call_id < call->call_id)
p = &(*p)->rb_left;
- else if ((__force u32)call_id > (__force u32)call->call_id)
+ else if (call_id > call->call_id)
p = &(*p)->rb_right;
else
goto old_call;
@@ -686,7 +683,7 @@ void rxrpc_release_call(struct rxrpc_call *call)
_debug("+++ ABORTING STATE %d +++\n", call->state);
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_CALL_DEAD;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
write_unlock(&call->state_lock);
@@ -714,8 +711,7 @@ void rxrpc_release_call(struct rxrpc_call *call)
_debug("- zap %s %%%u #%u",
rxrpc_pkts[sp->hdr.type],
- ntohl(sp->hdr.serial),
- ntohl(sp->hdr.seq));
+ sp->hdr.serial, sp->hdr.seq);
rxrpc_free_skb(skb);
spin_lock_bh(&call->lock);
}
@@ -763,10 +759,10 @@ static void rxrpc_mark_call_released(struct rxrpc_call *call)
_debug("abort call %p", call);
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_CALL_DEAD;
- if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
+ if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
sched = true;
}
- if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
sched = true;
if (sched)
rxrpc_queue_call(call);
@@ -873,9 +869,9 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call)
unsigned long _skb;
_skb = call->acks_window[call->acks_tail] & ~1;
- sp = rxrpc_skb((struct sk_buff *) _skb);
- _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
- rxrpc_free_skb((struct sk_buff *) _skb);
+ sp = rxrpc_skb((struct sk_buff *)_skb);
+ _debug("+++ clear Tx %u", sp->hdr.seq);
+ rxrpc_free_skb((struct sk_buff *)_skb);
call->acks_tail =
(call->acks_tail + 1) & (call->acks_winsz - 1);
}
@@ -975,7 +971,7 @@ static void rxrpc_call_life_expired(unsigned long _call)
_enter("{%d}", call->debug_id);
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE) {
- set_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
+ set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
rxrpc_queue_call(call);
}
read_unlock_bh(&call->state_lock);
@@ -995,7 +991,7 @@ static void rxrpc_resend_time_expired(unsigned long _call)
return;
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- if (!test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+ if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
rxrpc_queue_call(call);
}
@@ -1013,7 +1009,7 @@ static void rxrpc_ack_time_expired(unsigned long _call)
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 6c71ed1caf16..53df14cb8d25 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -57,10 +57,10 @@ static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
*/
static inline
int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
- struct key *key, __be16 service_id)
+ struct key *key, u16 service_id)
{
return (bundle->service_id - service_id) ?:
- ((unsigned long) bundle->key - (unsigned long) key);
+ ((unsigned long)bundle->key - (unsigned long)key);
}
/*
@@ -69,14 +69,14 @@ int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
struct rxrpc_transport *trans,
struct key *key,
- __be16 service_id,
+ u16 service_id,
gfp_t gfp)
{
struct rxrpc_conn_bundle *bundle, *candidate;
struct rb_node *p, *parent, **pp;
_enter("%p{%x},%x,%hx,",
- rx, key_serial(key), trans->debug_id, ntohs(service_id));
+ rx, key_serial(key), trans->debug_id, service_id);
if (rx->trans == trans && rx->bundle) {
atomic_inc(&rx->bundle->usage);
@@ -213,7 +213,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
conn->avail_calls = RXRPC_MAXCALLS;
conn->size_align = 4;
- conn->header_size = sizeof(struct rxrpc_header);
+ conn->header_size = sizeof(struct rxrpc_wire_header);
}
_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
@@ -230,7 +230,7 @@ static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
struct rxrpc_connection *xconn;
struct rb_node *parent, **p;
__be32 epoch;
- u32 real_conn_id;
+ u32 cid;
_enter("");
@@ -241,7 +241,7 @@ static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
conn->trans->conn_idcounter += RXRPC_CID_INC;
if (conn->trans->conn_idcounter < RXRPC_CID_INC)
conn->trans->conn_idcounter = RXRPC_CID_INC;
- real_conn_id = conn->trans->conn_idcounter;
+ cid = conn->trans->conn_idcounter;
attempt_insertion:
parent = NULL;
@@ -255,9 +255,9 @@ attempt_insertion:
p = &(*p)->rb_left;
else if (epoch > xconn->epoch)
p = &(*p)->rb_right;
- else if (real_conn_id < xconn->real_conn_id)
+ else if (cid < xconn->cid)
p = &(*p)->rb_left;
- else if (real_conn_id > xconn->real_conn_id)
+ else if (cid > xconn->cid)
p = &(*p)->rb_right;
else
goto id_exists;
@@ -268,20 +268,19 @@ attempt_insertion:
rb_link_node(&conn->node, parent, p);
rb_insert_color(&conn->node, &conn->trans->client_conns);
- conn->real_conn_id = real_conn_id;
- conn->cid = htonl(real_conn_id);
+ conn->cid = cid;
write_unlock_bh(&conn->trans->conn_lock);
- _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
+ _leave(" [CID %x]", cid);
return;
/* we found a connection with the proposed ID - walk the tree from that
* point looking for the next unused ID */
id_exists:
for (;;) {
- real_conn_id += RXRPC_CID_INC;
- if (real_conn_id < RXRPC_CID_INC) {
- real_conn_id = RXRPC_CID_INC;
- conn->trans->conn_idcounter = real_conn_id;
+ cid += RXRPC_CID_INC;
+ if (cid < RXRPC_CID_INC) {
+ cid = RXRPC_CID_INC;
+ conn->trans->conn_idcounter = cid;
goto attempt_insertion;
}
@@ -291,7 +290,7 @@ id_exists:
xconn = rb_entry(parent, struct rxrpc_connection, node);
if (epoch < xconn->epoch ||
- real_conn_id < xconn->real_conn_id)
+ cid < xconn->cid)
goto attempt_insertion;
}
}
@@ -334,7 +333,7 @@ static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
*/
static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
struct rxrpc_transport *trans,
- __be16 service_id,
+ u16 service_id,
struct rxrpc_call *call,
gfp_t gfp)
{
@@ -404,11 +403,11 @@ found_channel:
conn->channels[chan] = call;
call->conn = conn;
call->channel = chan;
- call->cid = conn->cid | htonl(chan);
- call->call_id = htonl(++conn->call_counter);
+ call->cid = conn->cid | chan;
+ call->call_id = ++conn->call_counter;
_net("CONNECT client on conn %d chan %d as call %x",
- conn->debug_id, chan, ntohl(call->call_id));
+ conn->debug_id, chan, call->call_id);
spin_unlock(&trans->client_lock);
@@ -593,11 +592,11 @@ found_channel:
conn->channels[chan] = call;
call->conn = conn;
call->channel = chan;
- call->cid = conn->cid | htonl(chan);
- call->call_id = htonl(++conn->call_counter);
+ call->cid = conn->cid | chan;
+ call->call_id = ++conn->call_counter;
_net("CONNECT client on conn %d chan %d as call %x",
- conn->debug_id, chan, ntohl(call->call_id));
+ conn->debug_id, chan, call->call_id);
ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
spin_unlock(&trans->client_lock);
@@ -620,21 +619,21 @@ interrupted:
*/
struct rxrpc_connection *
rxrpc_incoming_connection(struct rxrpc_transport *trans,
- struct rxrpc_header *hdr,
+ struct rxrpc_host_header *hdr,
gfp_t gfp)
{
struct rxrpc_connection *conn, *candidate = NULL;
struct rb_node *p, **pp;
const char *new = "old";
__be32 epoch;
- u32 conn_id;
+ u32 cid;
_enter("");
ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
epoch = hdr->epoch;
- conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
+ cid = hdr->cid & RXRPC_CIDMASK;
/* search the connection list first */
read_lock_bh(&trans->conn_lock);
@@ -643,15 +642,15 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans,
while (p) {
conn = rb_entry(p, struct rxrpc_connection, node);
- _debug("maybe %x", conn->real_conn_id);
+ _debug("maybe %x", conn->cid);
if (epoch < conn->epoch)
p = p->rb_left;
else if (epoch > conn->epoch)
p = p->rb_right;
- else if (conn_id < conn->real_conn_id)
+ else if (cid < conn->cid)
p = p->rb_left;
- else if (conn_id > conn->real_conn_id)
+ else if (cid > conn->cid)
p = p->rb_right;
else
goto found_extant_connection;
@@ -668,12 +667,11 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans,
candidate->trans = trans;
candidate->epoch = hdr->epoch;
- candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
+ candidate->cid = hdr->cid & RXRPC_CIDMASK;
candidate->service_id = hdr->serviceId;
candidate->security_ix = hdr->securityIndex;
candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
candidate->out_clientflag = 0;
- candidate->real_conn_id = conn_id;
candidate->state = RXRPC_CONN_SERVER;
if (candidate->service_id)
candidate->state = RXRPC_CONN_SERVER_UNSECURED;
@@ -690,9 +688,9 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans,
pp = &(*pp)->rb_left;
else if (epoch > conn->epoch)
pp = &(*pp)->rb_right;
- else if (conn_id < conn->real_conn_id)
+ else if (cid < conn->cid)
pp = &(*pp)->rb_left;
- else if (conn_id > conn->real_conn_id)
+ else if (cid > conn->cid)
pp = &(*pp)->rb_right;
else
goto found_extant_second;
@@ -714,7 +712,7 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans,
new = "new";
success:
- _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
+ _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->cid);
_leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
return conn;
@@ -751,18 +749,17 @@ security_mismatch:
* packet
*/
struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
- struct rxrpc_header *hdr)
+ struct rxrpc_host_header *hdr)
{
struct rxrpc_connection *conn;
struct rb_node *p;
- __be32 epoch;
- u32 conn_id;
+ u32 epoch, cid;
- _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
+ _enter(",{%x,%x}", hdr->cid, hdr->flags);
read_lock_bh(&trans->conn_lock);
- conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
+ cid = hdr->cid & RXRPC_CIDMASK;
epoch = hdr->epoch;
if (hdr->flags & RXRPC_CLIENT_INITIATED)
@@ -773,15 +770,15 @@ struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
while (p) {
conn = rb_entry(p, struct rxrpc_connection, node);
- _debug("maybe %x", conn->real_conn_id);
+ _debug("maybe %x", conn->cid);
if (epoch < conn->epoch)
p = p->rb_left;
else if (epoch > conn->epoch)
p = p->rb_right;
- else if (conn_id < conn->real_conn_id)
+ else if (cid < conn->cid)
p = p->rb_left;
- else if (conn_id > conn->real_conn_id)
+ else if (cid > conn->cid)
p = p->rb_right;
else
goto found;
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
index e7ed43a54c41..1bdaaed8cdc4 100644
--- a/net/rxrpc/ar-connevent.c
+++ b/net/rxrpc/ar-connevent.c
@@ -42,9 +42,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state,
call->state = state;
call->abort_code = abort_code;
if (state == RXRPC_CALL_LOCALLY_ABORTED)
- set_bit(RXRPC_CALL_CONN_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
else
- set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
rxrpc_queue_call(call);
}
write_unlock(&call->state_lock);
@@ -60,11 +60,12 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state,
static int rxrpc_abort_connection(struct rxrpc_connection *conn,
u32 error, u32 abort_code)
{
- struct rxrpc_header hdr;
+ struct rxrpc_wire_header whdr;
struct msghdr msg;
struct kvec iov[2];
__be32 word;
size_t len;
+ u32 serial;
int ret;
_enter("%d,,%u,%u", conn->debug_id, error, abort_code);
@@ -89,28 +90,29 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
msg.msg_controllen = 0;
msg.msg_flags = 0;
- hdr.epoch = conn->epoch;
- hdr.cid = conn->cid;
- hdr.callNumber = 0;
- hdr.seq = 0;
- hdr.type = RXRPC_PACKET_TYPE_ABORT;
- hdr.flags = conn->out_clientflag;
- hdr.userStatus = 0;
- hdr.securityIndex = conn->security_ix;
- hdr._rsvd = 0;
- hdr.serviceId = conn->service_id;
+ whdr.epoch = htonl(conn->epoch);
+ whdr.cid = htonl(conn->cid);
+ whdr.callNumber = 0;
+ whdr.seq = 0;
+ whdr.type = RXRPC_PACKET_TYPE_ABORT;
+ whdr.flags = conn->out_clientflag;
+ whdr.userStatus = 0;
+ whdr.securityIndex = conn->security_ix;
+ whdr._rsvd = 0;
+ whdr.serviceId = htons(conn->service_id);
word = htonl(abort_code);
- iov[0].iov_base = &hdr;
- iov[0].iov_len = sizeof(hdr);
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
iov[1].iov_base = &word;
iov[1].iov_len = sizeof(word);
len = iov[0].iov_len + iov[1].iov_len;
- hdr.serial = htonl(atomic_inc_return(&conn->serial));
- _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
+ serial = atomic_inc_return(&conn->serial);
+ whdr.serial = htonl(serial);
+ _proto("Tx CONN ABORT %%%u { %d }", serial, abort_code);
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
if (ret < 0) {
@@ -132,7 +134,7 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call)
if (call) {
read_lock(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_SECURED, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_SECURED, &call->events))
rxrpc_queue_call(call);
read_unlock(&call->state_lock);
}
@@ -146,8 +148,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
u32 *_abort_code)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- __be32 tmp;
- u32 serial;
+ __be32 wtmp;
+ u32 abort_code;
int loop, ret;
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
@@ -155,19 +157,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return -ECONNABORTED;
}
- serial = ntohl(sp->hdr.serial);
-
- _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, serial);
+ _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_ABORT:
- if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0)
+ if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0)
return -EPROTO;
- _proto("Rx ABORT %%%u { ac=%d }", serial, ntohl(tmp));
+ abort_code = ntohl(wtmp);
+ _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
- ntohl(tmp));
+ abort_code);
return -ECONNABORTED;
case RXRPC_PACKET_TYPE_CHALLENGE:
@@ -335,7 +336,7 @@ void rxrpc_reject_packets(struct work_struct *work)
struct sockaddr_in sin;
} sa;
struct rxrpc_skb_priv *sp;
- struct rxrpc_header hdr;
+ struct rxrpc_wire_header whdr;
struct rxrpc_local *local;
struct sk_buff *skb;
struct msghdr msg;
@@ -348,11 +349,11 @@ void rxrpc_reject_packets(struct work_struct *work)
_enter("%d", local->debug_id);
- iov[0].iov_base = &hdr;
- iov[0].iov_len = sizeof(hdr);
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
iov[1].iov_base = &code;
iov[1].iov_len = sizeof(code);
- size = sizeof(hdr) + sizeof(code);
+ size = sizeof(whdr) + sizeof(code);
msg.msg_name = &sa;
msg.msg_control = NULL;
@@ -370,8 +371,8 @@ void rxrpc_reject_packets(struct work_struct *work)
break;
}
- memset(&hdr, 0, sizeof(hdr));
- hdr.type = RXRPC_PACKET_TYPE_ABORT;
+ memset(&whdr, 0, sizeof(whdr));
+ whdr.type = RXRPC_PACKET_TYPE_ABORT;
while ((skb = skb_dequeue(&local->reject_queue))) {
sp = rxrpc_skb(skb);
@@ -381,13 +382,13 @@ void rxrpc_reject_packets(struct work_struct *work)
sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
code = htonl(skb->priority);
- hdr.epoch = sp->hdr.epoch;
- hdr.cid = sp->hdr.cid;
- hdr.callNumber = sp->hdr.callNumber;
- hdr.serviceId = sp->hdr.serviceId;
- hdr.flags = sp->hdr.flags;
- hdr.flags ^= RXRPC_CLIENT_INITIATED;
- hdr.flags &= RXRPC_CLIENT_INITIATED;
+ whdr.epoch = htonl(sp->hdr.epoch);
+ whdr.cid = htonl(sp->hdr.cid);
+ whdr.callNumber = htonl(sp->hdr.callNumber);
+ whdr.serviceId = htons(sp->hdr.serviceId);
+ whdr.flags = sp->hdr.flags;
+ whdr.flags ^= RXRPC_CLIENT_INITIATED;
+ whdr.flags &= RXRPC_CLIENT_INITIATED;
kernel_sendmsg(local->socket, &msg, iov, 2, size);
break;
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 0610efa83d72..3e82d6f0313c 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -115,7 +115,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
/* pass the transport ref to error_handler to release */
skb_queue_tail(&trans->error_queue, skb);
rxrpc_queue_work(&trans->error_handler);
-
_leave("");
}
@@ -152,28 +151,18 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
switch (ee->ee_code) {
case ICMP_NET_UNREACH:
_net("Rx Received ICMP Network Unreachable");
- err = ENETUNREACH;
break;
case ICMP_HOST_UNREACH:
_net("Rx Received ICMP Host Unreachable");
- err = EHOSTUNREACH;
break;
case ICMP_PORT_UNREACH:
_net("Rx Received ICMP Port Unreachable");
- err = ECONNREFUSED;
- break;
- case ICMP_FRAG_NEEDED:
- _net("Rx Received ICMP Fragmentation Needed (%d)",
- ee->ee_info);
- err = 0; /* dealt with elsewhere */
break;
case ICMP_NET_UNKNOWN:
_net("Rx Received ICMP Unknown Network");
- err = ENETUNREACH;
break;
case ICMP_HOST_UNKNOWN:
_net("Rx Received ICMP Unknown Host");
- err = EHOSTUNREACH;
break;
default:
_net("Rx Received ICMP DestUnreach code=%u",
@@ -222,7 +211,7 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
if (call->state != RXRPC_CALL_COMPLETE &&
call->state < RXRPC_CALL_NETWORK_ERROR) {
call->state = RXRPC_CALL_NETWORK_ERROR;
- set_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
rxrpc_queue_call(call);
}
write_unlock(&call->state_lock);
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 4505a691d88c..63ed75c40e29 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -231,7 +231,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
_debug("drain rx oos now");
read_lock(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events))
rxrpc_queue_call(call);
read_unlock(&call->state_lock);
}
@@ -287,12 +287,12 @@ static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
call->acks_latest = serial;
_debug("implicit ACKALL %%%u", call->acks_latest);
- set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events);
write_unlock_bh(&call->state_lock);
if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_RESEND, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND, &call->events);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
}
break;
@@ -310,8 +310,8 @@ static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- __be32 _abort_code;
- u32 serial, hi_serial, seq, abort_code;
+ __be32 wtmp;
+ u32 hi_serial, abort_code;
_enter("%p,%p", call, skb);
@@ -330,16 +330,15 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
/* track the latest serial number on this connection for ACK packet
* information */
- serial = ntohl(sp->hdr.serial);
hi_serial = atomic_read(&call->conn->hi_serial);
- while (serial > hi_serial)
+ while (sp->hdr.serial > hi_serial)
hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
- serial);
+ sp->hdr.serial);
/* request ACK generation for any ACK or DATA packet that requests
* it */
if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
- _proto("ACK Requested on %%%u", serial);
+ _proto("ACK Requested on %%%u", sp->hdr.serial);
rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, false);
}
@@ -347,24 +346,23 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
case RXRPC_PACKET_TYPE_ABORT:
_debug("abort");
- if (skb_copy_bits(skb, 0, &_abort_code,
- sizeof(_abort_code)) < 0)
+ if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0)
goto protocol_error;
- abort_code = ntohl(_abort_code);
- _proto("Rx ABORT %%%u { %x }", serial, abort_code);
+ abort_code = ntohl(wtmp);
+ _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
write_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_REMOTELY_ABORTED;
call->abort_code = abort_code;
- set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
rxrpc_queue_call(call);
}
goto free_packet_unlock;
case RXRPC_PACKET_TYPE_BUSY:
- _proto("Rx BUSY %%%u", serial);
+ _proto("Rx BUSY %%%u", sp->hdr.serial);
if (call->conn->out_clientflag)
goto protocol_error;
@@ -373,7 +371,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
switch (call->state) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
call->state = RXRPC_CALL_SERVER_BUSY;
- set_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
rxrpc_queue_call(call);
case RXRPC_CALL_SERVER_BUSY:
goto free_packet_unlock;
@@ -382,15 +380,13 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
}
default:
- _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], serial);
+ _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
goto protocol_error;
case RXRPC_PACKET_TYPE_DATA:
- seq = ntohl(sp->hdr.seq);
+ _proto("Rx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
- _proto("Rx DATA %%%u { #%u }", serial, seq);
-
- if (seq == 0)
+ if (sp->hdr.seq == 0)
goto protocol_error;
call->ackr_prev_seq = sp->hdr.seq;
@@ -398,9 +394,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
/* received data implicitly ACKs all of the request packets we
* sent when we're acting as a client */
if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
- rxrpc_assume_implicit_ackall(call, serial);
+ rxrpc_assume_implicit_ackall(call, sp->hdr.serial);
- switch (rxrpc_fast_process_data(call, skb, seq)) {
+ switch (rxrpc_fast_process_data(call, skb, sp->hdr.seq)) {
case 0:
skb = NULL;
goto done;
@@ -433,7 +429,7 @@ protocol_error_locked:
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_PROTOCOL_ERROR;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
free_packet_unlock:
@@ -481,12 +477,12 @@ static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
if (!pskb_pull(jumbo, sizeof(jhdr)))
BUG();
- sp->hdr.seq = htonl(ntohl(sp->hdr.seq) + 1);
- sp->hdr.serial = htonl(ntohl(sp->hdr.serial) + 1);
+ sp->hdr.seq += 1;
+ sp->hdr.serial += 1;
sp->hdr.flags = jhdr.flags;
sp->hdr._rsvd = jhdr._rsvd;
- _proto("Rx DATA Jumbo %%%u", ntohl(sp->hdr.serial) - 1);
+ _proto("Rx DATA Jumbo %%%u", sp->hdr.serial - 1);
rxrpc_fast_process_packet(call, part);
part = NULL;
@@ -505,7 +501,7 @@ protocol_error:
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_PROTOCOL_ERROR;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
write_unlock_bh(&call->state_lock);
@@ -530,7 +526,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
read_lock(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_LOCALLY_ABORTED:
- if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) {
+ if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
rxrpc_queue_call(call);
goto free_unlock;
}
@@ -546,7 +542,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
/* resend last packet of a completed call */
_debug("final ack again");
rxrpc_get_call(call);
- set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
+ set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
rxrpc_queue_call(call);
goto free_unlock;
default:
@@ -607,6 +603,35 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
rxrpc_queue_work(&local->event_processor);
}
+/*
+ * Extract the wire header from a packet and translate the byte order.
+ */
+static noinline
+int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
+{
+ struct rxrpc_wire_header whdr;
+
+ /* dig out the RxRPC connection details */
+ if (skb_copy_bits(skb, sizeof(struct udphdr), &whdr, sizeof(whdr)) < 0)
+ return -EBADMSG;
+ if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(whdr)))
+ BUG();
+
+ memset(sp, 0, sizeof(*sp));
+ sp->hdr.epoch = ntohl(whdr.epoch);
+ sp->hdr.cid = ntohl(whdr.cid);
+ sp->hdr.callNumber = ntohl(whdr.callNumber);
+ sp->hdr.seq = ntohl(whdr.seq);
+ sp->hdr.serial = ntohl(whdr.serial);
+ sp->hdr.flags = whdr.flags;
+ sp->hdr.type = whdr.type;
+ sp->hdr.userStatus = whdr.userStatus;
+ sp->hdr.securityIndex = whdr.securityIndex;
+ sp->hdr._rsvd = ntohs(whdr._rsvd);
+ sp->hdr.serviceId = ntohs(whdr.serviceId);
+ return 0;
+}
+
static struct rxrpc_connection *rxrpc_conn_from_local(struct rxrpc_local *local,
struct sk_buff *skb,
struct rxrpc_skb_priv *sp)
@@ -686,29 +711,25 @@ void rxrpc_data_ready(struct sock *sk)
UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0);
- /* the socket buffer we have is owned by UDP, with UDP's data all over
- * it, but we really want our own */
+ /* The socket buffer we have is owned by UDP, with UDP's data all over
+ * it, but we really want our own data there.
+ */
skb_orphan(skb);
sp = rxrpc_skb(skb);
- memset(sp, 0, sizeof(*sp));
_net("Rx UDP packet from %08x:%04hu",
ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
/* dig out the RxRPC connection details */
- if (skb_copy_bits(skb, sizeof(struct udphdr), &sp->hdr,
- sizeof(sp->hdr)) < 0)
+ if (rxrpc_extract_header(sp, skb) < 0)
goto bad_message;
- if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(sp->hdr)))
- BUG();
_net("Rx RxRPC %s ep=%x call=%x:%x",
sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
- ntohl(sp->hdr.epoch),
- ntohl(sp->hdr.cid),
- ntohl(sp->hdr.callNumber));
+ sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
- if (sp->hdr.type == 0 || sp->hdr.type >= RXRPC_N_PACKET_TYPES) {
+ if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
+ !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
_proto("Rx Bad Packet Type %u", sp->hdr.type);
goto bad_message;
}
@@ -737,14 +758,9 @@ void rxrpc_data_ready(struct sock *sk)
rxrpc_put_connection(conn);
} else {
struct rxrpc_call *call;
- u8 in_clientflag = 0;
-
- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
- in_clientflag = RXRPC_CLIENT_INITIATED;
- call = rxrpc_find_call_hash(in_clientflag, sp->hdr.cid,
- sp->hdr.callNumber, sp->hdr.epoch,
- sp->hdr.serviceId, local, AF_INET,
- (u8 *)&ip_hdr(skb)->saddr);
+
+ call = rxrpc_find_call_hash(&sp->hdr, local,
+ AF_INET, &ip_hdr(skb)->saddr);
if (call)
rxrpc_post_packet_to_call(call, skb);
else
@@ -759,7 +775,7 @@ cant_route_call:
_debug("can't route call");
if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
- if (sp->hdr.seq == cpu_to_be32(1)) {
+ if (sp->hdr.seq == 1) {
_debug("first packet");
skb_queue_tail(&local->accept_queue, skb);
rxrpc_queue_work(&local->acceptor);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 2934a73a5981..8b495aed517d 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -16,7 +16,7 @@
BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
(POISON_FREE << 8 | POISON_FREE))
#else
-#define CHECK_SLAB_OKAY(X) do {} while(0)
+#define CHECK_SLAB_OKAY(X) do {} while (0)
#endif
#define FCRYPT_BSIZE 8
@@ -70,12 +70,31 @@ struct rxrpc_sock {
#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
struct sockaddr_rxrpc srx; /* local address */
sa_family_t proto; /* protocol created with */
- __be16 service_id; /* service ID of local/remote service */
};
#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
/*
+ * CPU-byteorder normalised Rx packet header.
+ */
+struct rxrpc_host_header {
+ u32 epoch; /* client boot timestamp */
+ u32 cid; /* connection and channel ID */
+ u32 callNumber; /* call ID (0 for connection-level packets) */
+ u32 seq; /* sequence number of pkt in call stream */
+ u32 serial; /* serial number of pkt sent to network */
+ u8 type; /* packet type */
+ u8 flags; /* packet flags */
+ u8 userStatus; /* app-layer defined status */
+ u8 securityIndex; /* security protocol ID */
+ union {
+ u16 _rsvd; /* reserved */
+ u16 cksum; /* kerberos security checksum */
+ };
+ u16 serviceId; /* service ID */
+} __packed;
+
+/*
* RxRPC socket buffer private variables
* - max 48 bytes (struct sk_buff::cb)
*/
@@ -89,7 +108,7 @@ struct rxrpc_skb_priv {
bool need_resend; /* T if needs resending */
};
- struct rxrpc_header hdr; /* RxRPC packet header from this packet */
+ struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
};
#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
@@ -230,7 +249,7 @@ struct rxrpc_conn_bundle {
atomic_t usage;
int debug_id; /* debug ID for printks */
unsigned short num_conns; /* number of connections in this bundle */
- __be16 service_id; /* service ID */
+ u16 service_id; /* Service ID for this bundle */
u8 security_ix; /* security type */
};
@@ -260,7 +279,6 @@ struct rxrpc_connection {
rwlock_t lock; /* access lock */
spinlock_t state_lock; /* state-change lock */
atomic_t usage;
- u32 real_conn_id; /* connection ID (host-endian) */
enum { /* current state of connection */
RXRPC_CONN_UNUSED, /* - connection not yet attempted */
RXRPC_CONN_CLIENT, /* - client connection */
@@ -282,17 +300,76 @@ struct rxrpc_connection {
u8 security_size; /* security header size */
u32 security_level; /* security level negotiated */
u32 security_nonce; /* response re-use preventer */
-
- /* the following are all in net order */
- __be32 epoch; /* epoch of this connection */
- __be32 cid; /* connection ID */
- __be16 service_id; /* service ID */
+ u32 epoch; /* epoch of this connection */
+ u32 cid; /* connection ID */
+ u16 service_id; /* service ID for this connection */
u8 security_ix; /* security type */
u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
};
/*
+ * Flags in call->flags.
+ */
+enum rxrpc_call_flag {
+ RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
+ RXRPC_CALL_TERMINAL_MSG, /* call has given the socket its final message */
+ RXRPC_CALL_RCVD_LAST, /* all packets received */
+ RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */
+ RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */
+ RXRPC_CALL_PROC_BUSY, /* the processor is busy */
+ RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */
+ RXRPC_CALL_HAS_USERID, /* has a user ID attached */
+ RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */
+};
+
+/*
+ * Events that can be raised on a call.
+ */
+enum rxrpc_call_event {
+ RXRPC_CALL_EV_RCVD_ACKALL, /* ACKALL or reply received */
+ RXRPC_CALL_EV_RCVD_BUSY, /* busy packet received */
+ RXRPC_CALL_EV_RCVD_ABORT, /* abort packet received */
+ RXRPC_CALL_EV_RCVD_ERROR, /* network error received */
+ RXRPC_CALL_EV_ACK_FINAL, /* need to generate final ACK (and release call) */
+ RXRPC_CALL_EV_ACK, /* need to generate ACK */
+ RXRPC_CALL_EV_REJECT_BUSY, /* need to generate busy message */
+ RXRPC_CALL_EV_ABORT, /* need to generate abort */
+ RXRPC_CALL_EV_CONN_ABORT, /* local connection abort generated */
+ RXRPC_CALL_EV_RESEND_TIMER, /* Tx resend timer expired */
+ RXRPC_CALL_EV_RESEND, /* Tx resend required */
+ RXRPC_CALL_EV_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
+ RXRPC_CALL_EV_LIFE_TIMER, /* call's lifetimer ran out */
+ RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
+ RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
+ RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
+ RXRPC_CALL_EV_RELEASE, /* need to release the call's resources */
+};
+
+/*
+ * The states that a call can be in.
+ */
+enum rxrpc_call_state {
+ RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
+ RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
+ RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
+ RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
+ RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
+ RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
+ RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
+ RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
+ RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
+ RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
+ RXRPC_CALL_COMPLETE, /* - call completed */
+ RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
+ RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
+ RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
+ RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
+ RXRPC_CALL_DEAD, /* - call is dead */
+ NR__RXRPC_CALL_STATES
+};
+
+/*
* RxRPC call definition
* - matched by { connection, call_id }
*/
@@ -317,57 +394,13 @@ struct rxrpc_call {
unsigned long user_call_ID; /* user-defined call ID */
unsigned long creation_jif; /* time of call creation */
unsigned long flags;
-#define RXRPC_CALL_RELEASED 0 /* call has been released - no more message to userspace */
-#define RXRPC_CALL_TERMINAL_MSG 1 /* call has given the socket its final message */
-#define RXRPC_CALL_RCVD_LAST 2 /* all packets received */
-#define RXRPC_CALL_RUN_RTIMER 3 /* Tx resend timer started */
-#define RXRPC_CALL_TX_SOFT_ACK 4 /* sent some soft ACKs */
-#define RXRPC_CALL_PROC_BUSY 5 /* the processor is busy */
-#define RXRPC_CALL_INIT_ACCEPT 6 /* acceptance was initiated */
-#define RXRPC_CALL_HAS_USERID 7 /* has a user ID attached */
-#define RXRPC_CALL_EXPECT_OOS 8 /* expect out of sequence packets */
unsigned long events;
-#define RXRPC_CALL_RCVD_ACKALL 0 /* ACKALL or reply received */
-#define RXRPC_CALL_RCVD_BUSY 1 /* busy packet received */
-#define RXRPC_CALL_RCVD_ABORT 2 /* abort packet received */
-#define RXRPC_CALL_RCVD_ERROR 3 /* network error received */
-#define RXRPC_CALL_ACK_FINAL 4 /* need to generate final ACK (and release call) */
-#define RXRPC_CALL_ACK 5 /* need to generate ACK */
-#define RXRPC_CALL_REJECT_BUSY 6 /* need to generate busy message */
-#define RXRPC_CALL_ABORT 7 /* need to generate abort */
-#define RXRPC_CALL_CONN_ABORT 8 /* local connection abort generated */
-#define RXRPC_CALL_RESEND_TIMER 9 /* Tx resend timer expired */
-#define RXRPC_CALL_RESEND 10 /* Tx resend required */
-#define RXRPC_CALL_DRAIN_RX_OOS 11 /* drain the Rx out of sequence queue */
-#define RXRPC_CALL_LIFE_TIMER 12 /* call's lifetimer ran out */
-#define RXRPC_CALL_ACCEPTED 13 /* incoming call accepted by userspace app */
-#define RXRPC_CALL_SECURED 14 /* incoming call's connection is now secure */
-#define RXRPC_CALL_POST_ACCEPT 15 /* need to post an "accept?" message to the app */
-#define RXRPC_CALL_RELEASE 16 /* need to release the call's resources */
-
spinlock_t lock;
rwlock_t state_lock; /* lock for state transition */
atomic_t usage;
atomic_t sequence; /* Tx data packet sequence counter */
u32 abort_code; /* local/remote abort code */
- enum { /* current state of call */
- RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
- RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
- RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
- RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
- RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
- RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
- RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
- RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
- RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
- RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
- RXRPC_CALL_COMPLETE, /* - call completed */
- RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
- RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
- RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
- RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
- RXRPC_CALL_DEAD, /* - call is dead */
- } state;
+ enum rxrpc_call_state state : 8; /* current state of call */
int debug_id; /* debug ID for printks */
u8 channel; /* connection channel occupied by this call */
@@ -389,9 +422,9 @@ struct rxrpc_call {
rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
- rxrpc_seq_net_t ackr_prev_seq; /* previous sequence number received */
+ rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
u8 ackr_reason; /* reason to ACK */
- __be32 ackr_serial; /* serial of packet being ACK'd */
+ rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
atomic_t ackr_not_idle; /* number of packets in Rx queue */
/* received packet records, 1 bit per record */
@@ -403,11 +436,10 @@ struct rxrpc_call {
u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */
struct rxrpc_local *local; /* Local endpoint. Used for hashing. */
sa_family_t proto; /* Frame protocol */
- /* the following should all be in net order */
- __be32 cid; /* connection ID + channel index */
- __be32 call_id; /* call ID on connection */
- __be32 epoch; /* epoch of this connection */
- __be16 service_id; /* service ID */
+ u32 call_id; /* call ID on connection */
+ u32 cid; /* connection ID plus channel index */
+ u32 epoch; /* epoch of this connection */
+ u16 service_id; /* service ID */
union { /* Peer IP address for hashing */
__be32 ipv4_addr;
__u8 ipv6_addr[16]; /* Anticipates eventual IPv6 support */
@@ -423,7 +455,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
if (call->state < RXRPC_CALL_COMPLETE) {
call->abort_code = abort_code;
call->state = RXRPC_CALL_LOCALLY_ABORTED;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
}
write_unlock_bh(&call->state_lock);
}
@@ -432,7 +464,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
* af_rxrpc.c
*/
extern atomic_t rxrpc_n_skbs;
-extern __be32 rxrpc_epoch;
+extern u32 rxrpc_epoch;
extern atomic_t rxrpc_debug_id;
extern struct workqueue_struct *rxrpc_workqueue;
@@ -453,8 +485,8 @@ extern unsigned rxrpc_rx_window_size;
extern unsigned rxrpc_rx_mtu;
extern unsigned rxrpc_rx_jumbo_max;
-void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
-void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
+void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
void rxrpc_process_call(struct work_struct *);
/*
@@ -466,15 +498,15 @@ extern struct kmem_cache *rxrpc_call_jar;
extern struct list_head rxrpc_calls;
extern rwlock_t rxrpc_call_lock;
-struct rxrpc_call *rxrpc_find_call_hash(u8, __be32, __be32, __be32,
- __be16, void *, sa_family_t, const u8 *);
+struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *,
+ void *, sa_family_t, const void *);
struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
struct rxrpc_transport *,
struct rxrpc_conn_bundle *,
unsigned long, int, gfp_t);
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
struct rxrpc_connection *,
- struct rxrpc_header *, gfp_t);
+ struct rxrpc_host_header *, gfp_t);
struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
void rxrpc_release_call(struct rxrpc_call *);
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
@@ -490,16 +522,16 @@ extern rwlock_t rxrpc_connection_lock;
struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
struct rxrpc_transport *,
- struct key *, __be16, gfp_t);
+ struct key *, u16, gfp_t);
void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
void rxrpc_put_connection(struct rxrpc_connection *);
void __exit rxrpc_destroy_all_connections(void);
struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
- struct rxrpc_header *);
+ struct rxrpc_host_header *);
extern struct rxrpc_connection *
-rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
+rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *,
gfp_t);
/*
@@ -694,7 +726,7 @@ do { \
printk(KERN_ERR "RxRPC: Assertion failed\n"); \
BUG(); \
} \
-} while(0)
+} while (0)
#define ASSERTCMP(X, OP, Y) \
do { \
@@ -707,7 +739,7 @@ do { \
(unsigned long)(X), (unsigned long)(Y)); \
BUG(); \
} \
-} while(0)
+} while (0)
#define ASSERTIF(C, X) \
do { \
@@ -716,7 +748,7 @@ do { \
printk(KERN_ERR "RxRPC: Assertion failed\n"); \
BUG(); \
} \
-} while(0)
+} while (0)
#define ASSERTIFCMP(C, X, OP, Y) \
do { \
@@ -729,25 +761,25 @@ do { \
(unsigned long)(X), (unsigned long)(Y)); \
BUG(); \
} \
-} while(0)
+} while (0)
#else
#define ASSERT(X) \
do { \
-} while(0)
+} while (0)
#define ASSERTCMP(X, OP, Y) \
do { \
-} while(0)
+} while (0)
#define ASSERTIF(C, X) \
do { \
-} while(0)
+} while (0)
#define ASSERTIFCMP(C, X, OP, Y) \
do { \
-} while(0)
+} while (0)
#endif /* __KDEBUGALL */
@@ -804,9 +836,9 @@ do { \
CHECK_SLAB_OKAY(&(CALL)->usage); \
if (atomic_inc_return(&(CALL)->usage) == 1) \
BUG(); \
-} while(0)
+} while (0)
#define rxrpc_put_call(CALL) \
do { \
__rxrpc_put_call(CALL); \
-} while(0)
+} while (0)
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index 78483b4602bf..4e1e6db0050b 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -323,9 +323,11 @@ void __exit rxrpc_destroy_all_locals(void)
* Reply to a version request
*/
static void rxrpc_send_version_request(struct rxrpc_local *local,
- struct rxrpc_header *hdr,
+ struct rxrpc_host_header *hdr,
struct sk_buff *skb)
{
+ struct rxrpc_wire_header whdr;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct sockaddr_in sin;
struct msghdr msg;
struct kvec iov[2];
@@ -344,15 +346,20 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
msg.msg_controllen = 0;
msg.msg_flags = 0;
- hdr->seq = 0;
- hdr->serial = 0;
- hdr->type = RXRPC_PACKET_TYPE_VERSION;
- hdr->flags = RXRPC_LAST_PACKET | (~hdr->flags & RXRPC_CLIENT_INITIATED);
- hdr->userStatus = 0;
- hdr->_rsvd = 0;
-
- iov[0].iov_base = hdr;
- iov[0].iov_len = sizeof(*hdr);
+ whdr.epoch = htonl(sp->hdr.epoch);
+ whdr.cid = htonl(sp->hdr.cid);
+ whdr.callNumber = htonl(sp->hdr.callNumber);
+ whdr.seq = 0;
+ whdr.serial = 0;
+ whdr.type = RXRPC_PACKET_TYPE_VERSION;
+ whdr.flags = RXRPC_LAST_PACKET | (~hdr->flags & RXRPC_CLIENT_INITIATED);
+ whdr.userStatus = 0;
+ whdr.securityIndex = 0;
+ whdr._rsvd = 0;
+ whdr.serviceId = htons(sp->hdr.serviceId);
+
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
iov[1].iov_base = (char *)rxrpc_version_string;
iov[1].iov_len = sizeof(rxrpc_version_string);
@@ -383,7 +390,7 @@ static void rxrpc_process_local_events(struct work_struct *work)
while ((skb = skb_dequeue(&local->event_queue))) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- kdebug("{%d},{%u}", local->debug_id, sp->hdr.type);
+ _debug("{%d},{%u}", local->debug_id, sp->hdr.type);
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 14c4e12c47b0..14c8df6b7f41 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -111,11 +111,11 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = abort_code;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
del_timer_sync(&call->resend_timer);
del_timer_sync(&call->ack_timer);
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_ACK, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_ACK, &call->events);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
rxrpc_queue_call(call);
}
@@ -136,7 +136,7 @@ int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans,
struct rxrpc_call *call;
unsigned long user_call_ID = 0;
struct key *key;
- __be16 service_id;
+ u16 service_id;
u32 abort_code = 0;
int ret;
@@ -151,11 +151,11 @@ int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans,
bundle = NULL;
if (trans) {
- service_id = rx->service_id;
+ service_id = rx->srx.srx_service;
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx,
msg->msg_name);
- service_id = htons(srx->srx_service);
+ service_id = srx->srx_service;
}
key = rx->key;
if (key && !rx->key->payload.data[0])
@@ -348,7 +348,7 @@ int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb)
/* send the packet with the don't fragment bit set if we currently
* think it's small enough */
- if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) {
+ if (skb->len - sizeof(struct rxrpc_wire_header) < trans->peer->maxdata) {
down_read(&trans->local->defrag_sem);
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
@@ -401,7 +401,8 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
int ret;
_enter(",{%d},%ld",
- CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz),
+ CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail),
+ call->acks_winsz),
*timeo);
add_wait_queue(&call->tx_waitq, &myself);
@@ -409,7 +410,7 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
ret = 0;
- if (CIRC_SPACE(call->acks_head, call->acks_tail,
+ if (CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail),
call->acks_winsz) > 0)
break;
if (signal_pending(current)) {
@@ -437,7 +438,7 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call)
if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
rxrpc_queue_call(call);
}
read_unlock_bh(&call->state_lock);
@@ -480,8 +481,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
write_unlock_bh(&call->state_lock);
}
- _proto("Tx DATA %%%u { #%u }",
- ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
+ _proto("Tx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
sp->need_resend = false;
sp->resend_at = jiffies + rxrpc_resend_timeout;
@@ -513,6 +513,29 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
}
/*
+ * Convert a host-endian header into a network-endian header.
+ */
+static void rxrpc_insert_header(struct sk_buff *skb)
+{
+ struct rxrpc_wire_header whdr;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ whdr.epoch = htonl(sp->hdr.epoch);
+ whdr.cid = htonl(sp->hdr.cid);
+ whdr.callNumber = htonl(sp->hdr.callNumber);
+ whdr.seq = htonl(sp->hdr.seq);
+ whdr.serial = htonl(sp->hdr.serial);
+ whdr.type = sp->hdr.type;
+ whdr.flags = sp->hdr.flags;
+ whdr.userStatus = sp->hdr.userStatus;
+ whdr.securityIndex = sp->hdr.securityIndex;
+ whdr._rsvd = htons(sp->hdr._rsvd);
+ whdr.serviceId = htons(sp->hdr.serviceId);
+
+ memcpy(skb->head, &whdr, sizeof(whdr));
+}
+
+/*
* send data through a socket
* - must be called in process context
* - caller holds the socket locked
@@ -548,7 +571,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
_debug("alloc");
- if (CIRC_SPACE(call->acks_head, call->acks_tail,
+ if (CIRC_SPACE(call->acks_head,
+ ACCESS_ONCE(call->acks_tail),
call->acks_winsz) <= 0) {
ret = -EAGAIN;
if (msg->msg_flags & MSG_DONTWAIT)
@@ -650,22 +674,22 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
seq = atomic_inc_return(&call->sequence);
- sp->hdr.epoch = conn->epoch;
- sp->hdr.cid = call->cid;
+ sp->hdr.epoch = conn->epoch;
+ sp->hdr.cid = call->cid;
sp->hdr.callNumber = call->call_id;
- sp->hdr.seq = htonl(seq);
- sp->hdr.serial =
- htonl(atomic_inc_return(&conn->serial));
- sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
+ sp->hdr.seq = seq;
+ sp->hdr.serial = atomic_inc_return(&conn->serial);
+ sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
sp->hdr.userStatus = 0;
sp->hdr.securityIndex = conn->security_ix;
- sp->hdr._rsvd = 0;
- sp->hdr.serviceId = conn->service_id;
+ sp->hdr._rsvd = 0;
+ sp->hdr.serviceId = call->service_id;
sp->hdr.flags = conn->out_clientflag;
if (msg_data_left(msg) == 0 && !more)
sp->hdr.flags |= RXRPC_LAST_PACKET;
- else if (CIRC_SPACE(call->acks_head, call->acks_tail,
+ else if (CIRC_SPACE(call->acks_head,
+ ACCESS_ONCE(call->acks_tail),
call->acks_winsz) > 1)
sp->hdr.flags |= RXRPC_MORE_PACKETS;
if (more && seq & 1)
@@ -673,12 +697,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
ret = rxrpc_secure_packet(
call, skb, skb->mark,
- skb->head + sizeof(struct rxrpc_header));
+ skb->head + sizeof(struct rxrpc_wire_header));
if (ret < 0)
goto out;
- memcpy(skb->head, &sp->hdr,
- sizeof(struct rxrpc_header));
+ rxrpc_insert_header(skb);
rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
skb = NULL;
}
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index bebaa43484bc..dc089b1976aa 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -92,7 +92,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
BUG();
}
- peer->hdrsize += sizeof(struct rxrpc_header);
+ peer->hdrsize += sizeof(struct rxrpc_wire_header);
peer->maxdata = peer->mtu - peer->hdrsize;
}
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
index 38047f713f2c..525b2ba5a8f4 100644
--- a/net/rxrpc/ar-proc.c
+++ b/net/rxrpc/ar-proc.c
@@ -74,9 +74,9 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
" %-8.8s %08x %lx\n",
lbuff,
rbuff,
- ntohs(call->conn->service_id),
- ntohl(call->conn->cid),
- ntohl(call->call_id),
+ call->conn->service_id,
+ call->cid,
+ call->call_id,
call->conn->in_clientflag ? "Svc" : "Clt",
atomic_read(&call->usage),
rxrpc_call_states[call->state],
@@ -157,8 +157,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
" %s %08x %08x %08x\n",
lbuff,
rbuff,
- ntohs(conn->service_id),
- ntohl(conn->cid),
+ conn->service_id,
+ conn->cid,
conn->call_counter,
conn->in_clientflag ? "Svc" : "Clt",
atomic_read(&conn->usage),
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index b92beded7459..64facba24a45 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -33,7 +33,7 @@ void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call)
read_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
@@ -158,7 +158,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
goto receive_non_data_message;
_debug("recvmsg DATA #%u { %d, %d }",
- ntohl(sp->hdr.seq), skb->len, sp->offset);
+ sp->hdr.seq, skb->len, sp->offset);
if (!continue_call) {
/* only set the control data once per recvmsg() */
@@ -169,11 +169,11 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
}
- ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
- ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
- call->rx_data_recv = ntohl(sp->hdr.seq);
+ ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
+ ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
+ call->rx_data_recv = sp->hdr.seq;
- ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
+ ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
offset = sp->offset;
copy = skb->len - offset;
@@ -364,11 +364,11 @@ void rxrpc_kernel_data_delivered(struct sk_buff *skb)
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_call *call = sp->call;
- ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
- ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
- call->rx_data_recv = ntohl(sp->hdr.seq);
+ ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
+ ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
+ call->rx_data_recv = sp->hdr.seq;
- ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
+ ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
rxrpc_free_skb(skb);
}
diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c
index 8334474eb26c..ceff6394a65f 100644
--- a/net/rxrpc/ar-security.c
+++ b/net/rxrpc/ar-security.c
@@ -167,11 +167,11 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
struct rxrpc_sock *rx;
struct key *key;
key_ref_t kref;
- char kdesc[5+1+3+1];
+ char kdesc[5 + 1 + 3 + 1];
_enter("");
- sprintf(kdesc, "%u:%u", ntohs(conn->service_id), conn->security_ix);
+ sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix);
sec = rxrpc_security_lookup(conn->security_ix);
if (!sec) {
@@ -182,7 +182,7 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
/* find the service */
read_lock_bh(&local->services_lock);
list_for_each_entry(rx, &local->services, listen_link) {
- if (rx->service_id == conn->service_id)
+ if (rx->srx.srx_service == conn->service_id)
goto found_service;
}
diff --git a/net/rxrpc/ar-skbuff.c b/net/rxrpc/ar-skbuff.c
index 4cfab49e329d..62a267472fce 100644
--- a/net/rxrpc/ar-skbuff.c
+++ b/net/rxrpc/ar-skbuff.c
@@ -34,7 +34,7 @@ static void rxrpc_request_final_ACK(struct rxrpc_call *call)
/* get an extra ref on the call for the final-ACK generator to
* release */
rxrpc_get_call(call);
- set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
+ set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
if (try_to_del_timer_sync(&call->ack_timer) >= 0)
rxrpc_queue_call(call);
break;
@@ -59,7 +59,7 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
spin_lock_bh(&call->lock);
- _debug("hard ACK #%u", ntohl(sp->hdr.seq));
+ _debug("hard ACK #%u", sp->hdr.seq);
for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
call->ackr_window[loop] >>= 1;
@@ -67,7 +67,7 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
call->ackr_window[loop + 1] << (BITS_PER_LONG - 1);
}
- seq = ntohl(sp->hdr.seq);
+ seq = sp->hdr.seq;
ASSERTCMP(seq, ==, call->rx_data_eaten + 1);
call->rx_data_eaten = seq;
@@ -133,5 +133,4 @@ void rxrpc_kernel_free_skb(struct sk_buff *skb)
{
rxrpc_free_skb(skb);
}
-
EXPORT_SYMBOL(rxrpc_kernel_free_skb);
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
index 9946467f16b4..5f9b9d462f53 100644
--- a/net/rxrpc/ar-transport.c
+++ b/net/rxrpc/ar-transport.c
@@ -51,6 +51,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
spin_lock_init(&trans->client_lock);
rwlock_init(&trans->conn_lock);
atomic_set(&trans->usage, 1);
+ trans->conn_idcounter = peer->srx.srx_service << 16;
trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
if (peer->srx.transport.family == AF_INET) {
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index d7a9ab5a9d9c..3106a0c4960b 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -132,8 +132,8 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
desc.info = iv.x;
desc.flags = 0;
- tmpbuf.x[0] = conn->epoch;
- tmpbuf.x[1] = conn->cid;
+ tmpbuf.x[0] = htonl(conn->epoch);
+ tmpbuf.x[1] = htonl(conn->cid);
tmpbuf.x[2] = 0;
tmpbuf.x[3] = htonl(conn->security_ix);
@@ -142,7 +142,7 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv));
- ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]);
+ ASSERTCMP((u32 __force)conn->csum_iv.n[0], ==, (u32 __force)tmpbuf.x[2]);
_leave("");
}
@@ -169,8 +169,8 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
_enter("");
- check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
- data_size |= (u32) check << 16;
+ check = sp->hdr.seq ^ sp->hdr.callNumber;
+ data_size |= (u32)check << 16;
tmpbuf.hdr.data_size = htonl(data_size);
memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first));
@@ -195,9 +195,9 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
* wholly encrypt a packet (level 2 security)
*/
static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 data_size,
- void *sechdr)
+ struct sk_buff *skb,
+ u32 data_size,
+ void *sechdr)
{
const struct rxrpc_key_token *token;
struct rxkad_level2_hdr rxkhdr
@@ -215,9 +215,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
_enter("");
- check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
+ check = sp->hdr.seq ^ sp->hdr.callNumber;
- rxkhdr.data_size = htonl(data_size | (u32) check << 16);
+ rxkhdr.data_size = htonl(data_size | (u32)check << 16);
rxkhdr.checksum = 0;
/* encrypt from the session key */
@@ -251,9 +251,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
* checksum an RxRPC packet header
*/
static int rxkad_secure_packet(const struct rxrpc_call *call,
- struct sk_buff *skb,
- size_t data_size,
- void *sechdr)
+ struct sk_buff *skb,
+ size_t data_size,
+ void *sechdr)
{
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
@@ -262,14 +262,13 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
struct {
__be32 x[2];
} tmpbuf __attribute__((aligned(8))); /* must all be in same page */
- __be32 x;
- u32 y;
+ u32 x, y;
int ret;
sp = rxrpc_skb(skb);
_enter("{%d{%x}},{#%u},%zu,",
- call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq),
+ call->debug_id, key_serial(call->conn->key), sp->hdr.seq,
data_size);
if (!call->conn->cipher)
@@ -286,10 +285,10 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
desc.flags = 0;
/* calculate the security checksum */
- x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
- x |= sp->hdr.seq & cpu_to_be32(0x3fffffff);
- tmpbuf.x[0] = sp->hdr.callNumber;
- tmpbuf.x[1] = x;
+ x = call->channel << (32 - RXRPC_CIDSHIFT);
+ x |= sp->hdr.seq & 0x3fffffff;
+ tmpbuf.x[0] = htonl(sp->hdr.callNumber);
+ tmpbuf.x[1] = htonl(x);
sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
@@ -299,7 +298,7 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
y = (y >> 16) & 0xffff;
if (y == 0)
y = 1; /* zero checksums are not permitted */
- sp->hdr.cksum = htons(y);
+ sp->hdr.cksum = y;
switch (call->conn->security_level) {
case RXRPC_SECURITY_PLAIN:
@@ -368,7 +367,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
data_size = buf & 0xffff;
check = buf >> 16;
- check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
+ check ^= sp->hdr.seq ^ sp->hdr.callNumber;
check &= 0xffff;
if (check != 0) {
*_abort_code = RXKADSEALEDINCON;
@@ -453,7 +452,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
data_size = buf & 0xffff;
check = buf >> 16;
- check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
+ check ^= sp->hdr.seq ^ sp->hdr.callNumber;
check &= 0xffff;
if (check != 0) {
*_abort_code = RXKADSEALEDINCON;
@@ -494,16 +493,14 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
struct {
__be32 x[2];
} tmpbuf __attribute__((aligned(8))); /* must all be in same page */
- __be32 x;
- __be16 cksum;
- u32 y;
+ u16 cksum;
+ u32 x, y;
int ret;
sp = rxrpc_skb(skb);
_enter("{%d{%x}},{#%u}",
- call->debug_id, key_serial(call->conn->key),
- ntohl(sp->hdr.seq));
+ call->debug_id, key_serial(call->conn->key), sp->hdr.seq);
if (!call->conn->cipher)
return 0;
@@ -521,21 +518,20 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
desc.flags = 0;
/* validate the security checksum */
- x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
- x |= sp->hdr.seq & cpu_to_be32(0x3fffffff);
- tmpbuf.x[0] = call->call_id;
- tmpbuf.x[1] = x;
+ x = call->channel << (32 - RXRPC_CIDSHIFT);
+ x |= sp->hdr.seq & 0x3fffffff;
+ tmpbuf.x[0] = htonl(call->call_id);
+ tmpbuf.x[1] = htonl(x);
sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
y = ntohl(tmpbuf.x[1]);
- y = (y >> 16) & 0xffff;
- if (y == 0)
- y = 1; /* zero checksums are not permitted */
+ cksum = (y >> 16) & 0xffff;
+ if (cksum == 0)
+ cksum = 1; /* zero checksums are not permitted */
- cksum = htons(y);
if (sp->hdr.cksum != cksum) {
*_abort_code = RXKADSEALEDINCON;
_leave(" = -EPROTO [csum failed]");
@@ -567,10 +563,11 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
static int rxkad_issue_challenge(struct rxrpc_connection *conn)
{
struct rxkad_challenge challenge;
- struct rxrpc_header hdr;
+ struct rxrpc_wire_header whdr;
struct msghdr msg;
struct kvec iov[2];
size_t len;
+ u32 serial;
int ret;
_enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
@@ -592,26 +589,27 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
msg.msg_controllen = 0;
msg.msg_flags = 0;
- hdr.epoch = conn->epoch;
- hdr.cid = conn->cid;
- hdr.callNumber = 0;
- hdr.seq = 0;
- hdr.type = RXRPC_PACKET_TYPE_CHALLENGE;
- hdr.flags = conn->out_clientflag;
- hdr.userStatus = 0;
- hdr.securityIndex = conn->security_ix;
- hdr._rsvd = 0;
- hdr.serviceId = conn->service_id;
-
- iov[0].iov_base = &hdr;
- iov[0].iov_len = sizeof(hdr);
+ whdr.epoch = htonl(conn->epoch);
+ whdr.cid = htonl(conn->cid);
+ whdr.callNumber = 0;
+ whdr.seq = 0;
+ whdr.type = RXRPC_PACKET_TYPE_CHALLENGE;
+ whdr.flags = conn->out_clientflag;
+ whdr.userStatus = 0;
+ whdr.securityIndex = conn->security_ix;
+ whdr._rsvd = 0;
+ whdr.serviceId = htons(conn->service_id);
+
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
iov[1].iov_base = &challenge;
iov[1].iov_len = sizeof(challenge);
len = iov[0].iov_len + iov[1].iov_len;
- hdr.serial = htonl(atomic_inc_return(&conn->serial));
- _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
+ serial = atomic_inc_return(&conn->serial);
+ whdr.serial = htonl(serial);
+ _proto("Tx CHALLENGE %%%u", serial);
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
if (ret < 0) {
@@ -627,13 +625,15 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
* send a Kerberos security response
*/
static int rxkad_send_response(struct rxrpc_connection *conn,
- struct rxrpc_header *hdr,
+ struct rxrpc_host_header *hdr,
struct rxkad_response *resp,
const struct rxkad_key *s2)
{
+ struct rxrpc_wire_header whdr;
struct msghdr msg;
struct kvec iov[3];
size_t len;
+ u32 serial;
int ret;
_enter("");
@@ -644,24 +644,26 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
msg.msg_controllen = 0;
msg.msg_flags = 0;
- hdr->epoch = conn->epoch;
- hdr->seq = 0;
- hdr->type = RXRPC_PACKET_TYPE_RESPONSE;
- hdr->flags = conn->out_clientflag;
- hdr->userStatus = 0;
- hdr->_rsvd = 0;
+ memset(&whdr, 0, sizeof(whdr));
+ whdr.epoch = htonl(hdr->epoch);
+ whdr.cid = htonl(hdr->cid);
+ whdr.type = RXRPC_PACKET_TYPE_RESPONSE;
+ whdr.flags = conn->out_clientflag;
+ whdr.securityIndex = hdr->securityIndex;
+ whdr.serviceId = htons(hdr->serviceId);
- iov[0].iov_base = hdr;
- iov[0].iov_len = sizeof(*hdr);
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
iov[1].iov_base = resp;
iov[1].iov_len = sizeof(*resp);
- iov[2].iov_base = (void *) s2->ticket;
+ iov[2].iov_base = (void *)s2->ticket;
iov[2].iov_len = s2->ticket_len;
len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
- hdr->serial = htonl(atomic_inc_return(&conn->serial));
- _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
+ serial = atomic_inc_return(&conn->serial);
+ whdr.serial = htonl(serial);
+ _proto("Tx RESPONSE %%%u", serial);
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
if (ret < 0) {
@@ -770,7 +772,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
min_level = ntohl(challenge.min_level);
_proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }",
- ntohl(sp->hdr.serial), version, nonce, min_level);
+ sp->hdr.serial, version, nonce, min_level);
abort_code = RXKADINCONSISTENCY;
if (version != RXKAD_VERSION)
@@ -785,22 +787,23 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
/* build the response packet */
memset(&resp, 0, sizeof(resp));
- resp.version = RXKAD_VERSION;
- resp.encrypted.epoch = conn->epoch;
- resp.encrypted.cid = conn->cid;
- resp.encrypted.securityIndex = htonl(conn->security_ix);
+ resp.version = htonl(RXKAD_VERSION);
+ resp.encrypted.epoch = htonl(conn->epoch);
+ resp.encrypted.cid = htonl(conn->cid);
+ resp.encrypted.securityIndex = htonl(conn->security_ix);
+ resp.encrypted.inc_nonce = htonl(nonce + 1);
+ resp.encrypted.level = htonl(conn->security_level);
+ resp.kvno = htonl(token->kad->kvno);
+ resp.ticket_len = htonl(token->kad->ticket_len);
+
resp.encrypted.call_id[0] =
- (conn->channels[0] ? conn->channels[0]->call_id : 0);
+ htonl(conn->channels[0] ? conn->channels[0]->call_id : 0);
resp.encrypted.call_id[1] =
- (conn->channels[1] ? conn->channels[1]->call_id : 0);
+ htonl(conn->channels[1] ? conn->channels[1]->call_id : 0);
resp.encrypted.call_id[2] =
- (conn->channels[2] ? conn->channels[2]->call_id : 0);
+ htonl(conn->channels[2] ? conn->channels[2]->call_id : 0);
resp.encrypted.call_id[3] =
- (conn->channels[3] ? conn->channels[3]->call_id : 0);
- resp.encrypted.inc_nonce = htonl(nonce + 1);
- resp.encrypted.level = htonl(conn->security_level);
- resp.kvno = htonl(token->kad->kvno);
- resp.ticket_len = htonl(token->kad->ticket_len);
+ htonl(conn->channels[3] ? conn->channels[3]->call_id : 0);
/* calculate the response checksum and then do the encryption */
rxkad_calc_response_checksum(&resp);
@@ -1022,7 +1025,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
kvno = ntohl(response.kvno);
sp = rxrpc_skb(skb);
_proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
- ntohl(sp->hdr.serial), version, kvno, ticket_len);
+ sp->hdr.serial, version, kvno, ticket_len);
abort_code = RXKADINCONSISTENCY;
if (version != RXKAD_VERSION)
@@ -1058,9 +1061,9 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
rxkad_decrypt_response(conn, &response, &session_key);
abort_code = RXKADSEALEDINCON;
- if (response.encrypted.epoch != conn->epoch)
+ if (ntohl(response.encrypted.epoch) != conn->epoch)
goto protocol_error_free;
- if (response.encrypted.cid != conn->cid)
+ if (ntohl(response.encrypted.cid) != conn->cid)
goto protocol_error_free;
if (ntohl(response.encrypted.securityIndex) != conn->security_ix)
goto protocol_error_free;
@@ -1077,7 +1080,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
goto protocol_error_free;
abort_code = RXKADOUTOFSEQUENCE;
- if (response.encrypted.inc_nonce != htonl(conn->security_nonce + 1))
+ if (ntohl(response.encrypted.inc_nonce) != conn->security_nonce + 1)
goto protocol_error_free;
abort_code = RXKADLEVELFAIL;
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index 50a98a910eb1..093547ac2bcd 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -115,7 +115,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)&one,
- .extra1 = (void *)&n_65535,
+ .extra2 = (void *)&n_65535,
},
{
.procname = "rx_jumbo_max",
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 82830824fb1f..b148302bbaf2 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -739,6 +739,28 @@ config NET_ACT_CONNMARK
To compile this code as a module, choose M here: the
module will be called act_connmark.
+config NET_ACT_IFE
+ tristate "Inter-FE action based on IETF ForCES InterFE LFB"
+ depends on NET_CLS_ACT
+ ---help---
+ Say Y here to allow for sourcing and terminating metadata
+ For details refer to netdev01 paper:
+ "Distributing Linux Traffic Control Classifier-Action Subsystem"
+ Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
+
+ To compile this code as a module, choose M here: the
+ module will be called act_ife.
+
+config NET_IFE_SKBMARK
+ tristate "Support to encoding decoding skb mark on IFE action"
+ depends on NET_ACT_IFE
+ ---help---
+
+config NET_IFE_SKBPRIO
+ tristate "Support to encoding decoding skb prio on IFE action"
+ depends on NET_ACT_IFE
+ ---help---
+
config NET_CLS_IND
bool "Incoming device classification"
depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 690c1689e090..84bddb373517 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -19,6 +19,9 @@ obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
obj-$(CONFIG_NET_ACT_VLAN) += act_vlan.o
obj-$(CONFIG_NET_ACT_BPF) += act_bpf.o
obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o
+obj-$(CONFIG_NET_ACT_IFE) += act_ife.o
+obj-$(CONFIG_NET_IFE_SKBMARK) += act_meta_mark.o
+obj-$(CONFIG_NET_IFE_SKBPRIO) += act_meta_skbprio.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
new file mode 100644
index 000000000000..c589a9ba506a
--- /dev/null
+++ b/net/sched/act_ife.c
@@ -0,0 +1,870 @@
+/*
+ * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB
+ *
+ * Refer to:
+ * draft-ietf-forces-interfelfb-03
+ * and
+ * netdev01 paper:
+ * "Distributing Linux Traffic Control Classifier-Action
+ * Subsystem"
+ * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * copyright Jamal Hadi Salim (2015)
+ *
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <uapi/linux/tc_act/tc_ife.h>
+#include <net/tc_act/tc_ife.h>
+#include <linux/etherdevice.h>
+
+#define IFE_TAB_MASK 15
+
+static int ife_net_id;
+static int max_metacnt = IFE_META_MAX + 1;
+
+static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
+ [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
+ [TCA_IFE_DMAC] = { .len = ETH_ALEN},
+ [TCA_IFE_SMAC] = { .len = ETH_ALEN},
+ [TCA_IFE_TYPE] = { .type = NLA_U16},
+};
+
+/* Caller takes care of presenting data in network order
+*/
+int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
+{
+ u32 *tlv = (u32 *)(skbdata);
+ u16 totlen = nla_total_size(dlen); /*alignment + hdr */
+ char *dptr = (char *)tlv + NLA_HDRLEN;
+ u32 htlv = attrtype << 16 | totlen;
+
+ *tlv = htonl(htlv);
+ memset(dptr, 0, totlen - NLA_HDRLEN);
+ memcpy(dptr, dval, dlen);
+
+ return totlen;
+}
+EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
+
+int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
+{
+ if (mi->metaval)
+ return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
+ else
+ return nla_put(skb, mi->metaid, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(ife_get_meta_u32);
+
+int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
+{
+ if (metaval || mi->metaval)
+ return 8; /* T+L+V == 2+2+4 */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ife_check_meta_u32);
+
+int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
+{
+ u32 edata = metaval;
+
+ if (mi->metaval)
+ edata = *(u32 *)mi->metaval;
+ else if (metaval)
+ edata = metaval;
+
+ if (!edata) /* will not encode */
+ return 0;
+
+ edata = htonl(edata);
+ return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
+}
+EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
+
+int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
+{
+ if (mi->metaval)
+ return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
+ else
+ return nla_put(skb, mi->metaid, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(ife_get_meta_u16);
+
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
+{
+ mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL);
+ if (!mi->metaval)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
+
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval)
+{
+ mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL);
+ if (!mi->metaval)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
+
+void ife_release_meta_gen(struct tcf_meta_info *mi)
+{
+ kfree(mi->metaval);
+}
+EXPORT_SYMBOL_GPL(ife_release_meta_gen);
+
+int ife_validate_meta_u32(void *val, int len)
+{
+ if (len == 4)
+ return 0;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
+
+int ife_validate_meta_u16(void *val, int len)
+{
+ /* length will include padding */
+ if (len == NLA_ALIGN(2))
+ return 0;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
+
+static LIST_HEAD(ifeoplist);
+static DEFINE_RWLOCK(ife_mod_lock);
+
+static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
+{
+ struct tcf_meta_ops *o;
+
+ read_lock(&ife_mod_lock);
+ list_for_each_entry(o, &ifeoplist, list) {
+ if (o->metaid == metaid) {
+ if (!try_module_get(o->owner))
+ o = NULL;
+ read_unlock(&ife_mod_lock);
+ return o;
+ }
+ }
+ read_unlock(&ife_mod_lock);
+
+ return NULL;
+}
+
+int register_ife_op(struct tcf_meta_ops *mops)
+{
+ struct tcf_meta_ops *m;
+
+ if (!mops->metaid || !mops->metatype || !mops->name ||
+ !mops->check_presence || !mops->encode || !mops->decode ||
+ !mops->get || !mops->alloc)
+ return -EINVAL;
+
+ write_lock(&ife_mod_lock);
+
+ list_for_each_entry(m, &ifeoplist, list) {
+ if (m->metaid == mops->metaid ||
+ (strcmp(mops->name, m->name) == 0)) {
+ write_unlock(&ife_mod_lock);
+ return -EEXIST;
+ }
+ }
+
+ if (!mops->release)
+ mops->release = ife_release_meta_gen;
+
+ list_add_tail(&mops->list, &ifeoplist);
+ write_unlock(&ife_mod_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(unregister_ife_op);
+
+int unregister_ife_op(struct tcf_meta_ops *mops)
+{
+ struct tcf_meta_ops *m;
+ int err = -ENOENT;
+
+ write_lock(&ife_mod_lock);
+ list_for_each_entry(m, &ifeoplist, list) {
+ if (m->metaid == mops->metaid) {
+ list_del(&mops->list);
+ err = 0;
+ break;
+ }
+ }
+ write_unlock(&ife_mod_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(register_ife_op);
+
+static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
+{
+ int ret = 0;
+ /* XXX: unfortunately cant use nla_policy at this point
+ * because a length of 0 is valid in the case of
+ * "allow". "use" semantics do enforce for proper
+ * length and i couldve use nla_policy but it makes it hard
+ * to use it just for that..
+ */
+ if (ops->validate)
+ return ops->validate(val, len);
+
+ if (ops->metatype == NLA_U32)
+ ret = ife_validate_meta_u32(val, len);
+ else if (ops->metatype == NLA_U16)
+ ret = ife_validate_meta_u16(val, len);
+
+ return ret;
+}
+
+/* called when adding new meta information
+ * under ife->tcf_lock
+*/
+static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
+ void *val, int len)
+{
+ struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ int ret = 0;
+
+ if (!ops) {
+ ret = -ENOENT;
+#ifdef CONFIG_MODULES
+ spin_unlock_bh(&ife->tcf_lock);
+ rtnl_unlock();
+ request_module("ifemeta%u", metaid);
+ rtnl_lock();
+ spin_lock_bh(&ife->tcf_lock);
+ ops = find_ife_oplist(metaid);
+#endif
+ }
+
+ if (ops) {
+ ret = 0;
+ if (len)
+ ret = ife_validate_metatype(ops, val, len);
+
+ module_put(ops->owner);
+ }
+
+ return ret;
+}
+
+/* called when adding new meta information
+ * under ife->tcf_lock
+*/
+static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+ int len)
+{
+ struct tcf_meta_info *mi = NULL;
+ struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ int ret = 0;
+
+ if (!ops)
+ return -ENOENT;
+
+ mi = kzalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi) {
+ /*put back what find_ife_oplist took */
+ module_put(ops->owner);
+ return -ENOMEM;
+ }
+
+ mi->metaid = metaid;
+ mi->ops = ops;
+ if (len > 0) {
+ ret = ops->alloc(mi, metaval);
+ if (ret != 0) {
+ kfree(mi);
+ module_put(ops->owner);
+ return ret;
+ }
+ }
+
+ list_add_tail(&mi->metalist, &ife->metalist);
+
+ return ret;
+}
+
+static int use_all_metadata(struct tcf_ife_info *ife)
+{
+ struct tcf_meta_ops *o;
+ int rc = 0;
+ int installed = 0;
+
+ list_for_each_entry(o, &ifeoplist, list) {
+ rc = add_metainfo(ife, o->metaid, NULL, 0);
+ if (rc == 0)
+ installed += 1;
+ }
+
+ if (installed)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+{
+ struct tcf_meta_info *e;
+ struct nlattr *nest;
+ unsigned char *b = skb_tail_pointer(skb);
+ int total_encoded = 0;
+
+ /*can only happen on decode */
+ if (list_empty(&ife->metalist))
+ return 0;
+
+ nest = nla_nest_start(skb, TCA_IFE_METALST);
+ if (!nest)
+ goto out_nlmsg_trim;
+
+ list_for_each_entry(e, &ife->metalist, metalist) {
+ if (!e->ops->get(skb, e))
+ total_encoded += 1;
+ }
+
+ if (!total_encoded)
+ goto out_nlmsg_trim;
+
+ nla_nest_end(skb, nest);
+
+ return 0;
+
+out_nlmsg_trim:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+/* under ife->tcf_lock */
+static void _tcf_ife_cleanup(struct tc_action *a, int bind)
+{
+ struct tcf_ife_info *ife = a->priv;
+ struct tcf_meta_info *e, *n;
+
+ list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
+ module_put(e->ops->owner);
+ list_del(&e->metalist);
+ if (e->metaval) {
+ if (e->ops->release)
+ e->ops->release(e);
+ else
+ kfree(e->metaval);
+ }
+ kfree(e);
+ }
+}
+
+static void tcf_ife_cleanup(struct tc_action *a, int bind)
+{
+ struct tcf_ife_info *ife = a->priv;
+
+ spin_lock_bh(&ife->tcf_lock);
+ _tcf_ife_cleanup(a, bind);
+ spin_unlock_bh(&ife->tcf_lock);
+}
+
+/* under ife->tcf_lock */
+static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
+{
+ int len = 0;
+ int rc = 0;
+ int i = 0;
+ void *val;
+
+ for (i = 1; i < max_metacnt; i++) {
+ if (tb[i]) {
+ val = nla_data(tb[i]);
+ len = nla_len(tb[i]);
+
+ rc = load_metaops_and_vet(ife, i, val, len);
+ if (rc != 0)
+ return rc;
+
+ rc = add_metainfo(ife, i, val, len);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
+{
+ struct tc_action_net *tn = net_generic(net, ife_net_id);
+ struct nlattr *tb[TCA_IFE_MAX + 1];
+ struct nlattr *tb2[IFE_META_MAX + 1];
+ struct tcf_ife_info *ife;
+ struct tc_ife *parm;
+ u16 ife_type = 0;
+ u8 *daddr = NULL;
+ u8 *saddr = NULL;
+ int ret = 0;
+ int err;
+
+ err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_IFE_PARMS])
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_IFE_PARMS]);
+
+ if (parm->flags & IFE_ENCODE) {
+ /* Until we get issued the ethertype, we cant have
+ * a default..
+ **/
+ if (!tb[TCA_IFE_TYPE]) {
+ pr_info("You MUST pass etherype for encoding\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!tcf_hash_check(tn, parm->index, a, bind)) {
+ ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife),
+ bind, false);
+ if (ret)
+ return ret;
+ ret = ACT_P_CREATED;
+ } else {
+ if (bind) /* dont override defaults */
+ return 0;
+ tcf_hash_release(a, bind);
+ if (!ovr)
+ return -EEXIST;
+ }
+
+ ife = to_ife(a);
+ ife->flags = parm->flags;
+
+ if (parm->flags & IFE_ENCODE) {
+ ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
+ if (tb[TCA_IFE_DMAC])
+ daddr = nla_data(tb[TCA_IFE_DMAC]);
+ if (tb[TCA_IFE_SMAC])
+ saddr = nla_data(tb[TCA_IFE_SMAC]);
+ }
+
+ spin_lock_bh(&ife->tcf_lock);
+ ife->tcf_action = parm->action;
+
+ if (parm->flags & IFE_ENCODE) {
+ if (daddr)
+ ether_addr_copy(ife->eth_dst, daddr);
+ else
+ eth_zero_addr(ife->eth_dst);
+
+ if (saddr)
+ ether_addr_copy(ife->eth_src, saddr);
+ else
+ eth_zero_addr(ife->eth_src);
+
+ ife->eth_type = ife_type;
+ }
+
+ if (ret == ACT_P_CREATED)
+ INIT_LIST_HEAD(&ife->metalist);
+
+ if (tb[TCA_IFE_METALST]) {
+ err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
+ NULL);
+ if (err) {
+metadata_parse_err:
+ if (ret == ACT_P_CREATED)
+ _tcf_ife_cleanup(a, bind);
+
+ spin_unlock_bh(&ife->tcf_lock);
+ return err;
+ }
+
+ err = populate_metalist(ife, tb2);
+ if (err)
+ goto metadata_parse_err;
+
+ } else {
+ /* if no passed metadata allow list or passed allow-all
+ * then here we process by adding as many supported metadatum
+ * as we can. You better have at least one else we are
+ * going to bail out
+ */
+ err = use_all_metadata(ife);
+ if (err) {
+ if (ret == ACT_P_CREATED)
+ _tcf_ife_cleanup(a, bind);
+
+ spin_unlock_bh(&ife->tcf_lock);
+ return err;
+ }
+ }
+
+ spin_unlock_bh(&ife->tcf_lock);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(tn, a);
+
+ return ret;
+}
+
+static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_ife_info *ife = a->priv;
+ struct tc_ife opt = {
+ .index = ife->tcf_index,
+ .refcnt = ife->tcf_refcnt - ref,
+ .bindcnt = ife->tcf_bindcnt - bind,
+ .action = ife->tcf_action,
+ .flags = ife->flags,
+ };
+ struct tcf_t t;
+
+ if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(ife->tcf_tm.expires);
+ if (nla_put(skb, TCA_IFE_TM, sizeof(t), &t))
+ goto nla_put_failure;
+
+ if (!is_zero_ether_addr(ife->eth_dst)) {
+ if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, ife->eth_dst))
+ goto nla_put_failure;
+ }
+
+ if (!is_zero_ether_addr(ife->eth_src)) {
+ if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, ife->eth_src))
+ goto nla_put_failure;
+ }
+
+ if (nla_put(skb, TCA_IFE_TYPE, 2, &ife->eth_type))
+ goto nla_put_failure;
+
+ if (dump_metalist(skb, ife)) {
+ /*ignore failure to dump metalist */
+ pr_info("Failed to dump metalist\n");
+ }
+
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
+ u16 metaid, u16 mlen, void *mdata)
+{
+ struct tcf_meta_info *e;
+
+ /* XXX: use hash to speed up */
+ list_for_each_entry(e, &ife->metalist, metalist) {
+ if (metaid == e->metaid) {
+ if (e->ops) {
+ /* We check for decode presence already */
+ return e->ops->decode(skb, mdata, mlen);
+ }
+ }
+ }
+
+ return 0;
+}
+
+struct ifeheadr {
+ __be16 metalen;
+ u8 tlv_data[];
+};
+
+struct meta_tlvhdr {
+ __be16 type;
+ __be16 len;
+};
+
+static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_ife_info *ife = a->priv;
+ int action = ife->tcf_action;
+ struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
+ u16 ifehdrln = ifehdr->metalen;
+ struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
+
+ spin_lock(&ife->tcf_lock);
+ bstats_update(&ife->tcf_bstats, skb);
+ ife->tcf_tm.lastuse = jiffies;
+ spin_unlock(&ife->tcf_lock);
+
+ ifehdrln = ntohs(ifehdrln);
+ if (unlikely(!pskb_may_pull(skb, ifehdrln))) {
+ spin_lock(&ife->tcf_lock);
+ ife->tcf_qstats.drops++;
+ spin_unlock(&ife->tcf_lock);
+ return TC_ACT_SHOT;
+ }
+
+ skb_set_mac_header(skb, ifehdrln);
+ __skb_pull(skb, ifehdrln);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ ifehdrln -= IFE_METAHDRLEN;
+
+ while (ifehdrln > 0) {
+ u8 *tlvdata = (u8 *)tlv;
+ u16 mtype = tlv->type;
+ u16 mlen = tlv->len;
+
+ mtype = ntohs(mtype);
+ mlen = ntohs(mlen);
+
+ if (find_decode_metaid(skb, ife, mtype, (mlen - 4),
+ (void *)(tlvdata + 4))) {
+ /* abuse overlimits to count when we receive metadata
+ * but dont have an ops for it
+ */
+ pr_info_ratelimited("Unknown metaid %d alnlen %d\n",
+ mtype, mlen);
+ ife->tcf_qstats.overlimits++;
+ }
+
+ tlvdata += mlen;
+ ifehdrln -= mlen;
+ tlv = (struct meta_tlvhdr *)tlvdata;
+ }
+
+ skb_reset_network_header(skb);
+ return action;
+}
+
+/*XXX: check if we can do this at install time instead of current
+ * send data path
+**/
+static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
+{
+ struct tcf_meta_info *e, *n;
+ int tot_run_sz = 0, run_sz = 0;
+
+ list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
+ if (e->ops->check_presence) {
+ run_sz = e->ops->check_presence(skb, e);
+ tot_run_sz += run_sz;
+ }
+ }
+
+ return tot_run_sz;
+}
+
+static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_ife_info *ife = a->priv;
+ int action = ife->tcf_action;
+ struct ethhdr *oethh; /* outer ether header */
+ struct ethhdr *iethh; /* inner eth header */
+ struct tcf_meta_info *e;
+ /*
+ OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
+ where ORIGDATA = original ethernet header ...
+ */
+ u16 metalen = ife_get_sz(skb, ife);
+ int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
+ unsigned int skboff = skb->dev->hard_header_len;
+ u32 at = G_TC_AT(skb->tc_verd);
+ int new_len = skb->len + hdrm;
+ bool exceed_mtu = false;
+ int err;
+
+ if (at & AT_EGRESS) {
+ if (new_len > skb->dev->mtu)
+ exceed_mtu = true;
+ }
+
+ spin_lock(&ife->tcf_lock);
+ bstats_update(&ife->tcf_bstats, skb);
+ ife->tcf_tm.lastuse = jiffies;
+
+ if (!metalen) { /* no metadata to send */
+ /* abuse overlimits to count when we allow packet
+ * with no metadata
+ */
+ ife->tcf_qstats.overlimits++;
+ spin_unlock(&ife->tcf_lock);
+ return action;
+ }
+ /* could be stupid policy setup or mtu config
+ * so lets be conservative.. */
+ if ((action == TC_ACT_SHOT) || exceed_mtu) {
+ ife->tcf_qstats.drops++;
+ spin_unlock(&ife->tcf_lock);
+ return TC_ACT_SHOT;
+ }
+
+ iethh = eth_hdr(skb);
+
+ err = skb_cow_head(skb, hdrm);
+ if (unlikely(err)) {
+ ife->tcf_qstats.drops++;
+ spin_unlock(&ife->tcf_lock);
+ return TC_ACT_SHOT;
+ }
+
+ if (!(at & AT_EGRESS))
+ skb_push(skb, skb->dev->hard_header_len);
+
+ __skb_push(skb, hdrm);
+ memcpy(skb->data, iethh, skb->mac_len);
+ skb_reset_mac_header(skb);
+ oethh = eth_hdr(skb);
+
+ /*total metadata length */
+ metalen += IFE_METAHDRLEN;
+ metalen = htons(metalen);
+ memcpy((skb->data + skboff), &metalen, IFE_METAHDRLEN);
+ skboff += IFE_METAHDRLEN;
+
+ /* XXX: we dont have a clever way of telling encode to
+ * not repeat some of the computations that are done by
+ * ops->presence_check...
+ */
+ list_for_each_entry(e, &ife->metalist, metalist) {
+ if (e->ops->encode) {
+ err = e->ops->encode(skb, (void *)(skb->data + skboff),
+ e);
+ }
+ if (err < 0) {
+ /* too corrupt to keep around if overwritten */
+ ife->tcf_qstats.drops++;
+ spin_unlock(&ife->tcf_lock);
+ return TC_ACT_SHOT;
+ }
+ skboff += err;
+ }
+
+ if (!is_zero_ether_addr(ife->eth_src))
+ ether_addr_copy(oethh->h_source, ife->eth_src);
+ else
+ ether_addr_copy(oethh->h_source, iethh->h_source);
+ if (!is_zero_ether_addr(ife->eth_dst))
+ ether_addr_copy(oethh->h_dest, ife->eth_dst);
+ else
+ ether_addr_copy(oethh->h_dest, iethh->h_dest);
+ oethh->h_proto = htons(ife->eth_type);
+
+ if (!(at & AT_EGRESS))
+ skb_pull(skb, skb->dev->hard_header_len);
+
+ spin_unlock(&ife->tcf_lock);
+
+ return action;
+}
+
+static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_ife_info *ife = a->priv;
+
+ if (ife->flags & IFE_ENCODE)
+ return tcf_ife_encode(skb, a, res);
+
+ if (!(ife->flags & IFE_ENCODE))
+ return tcf_ife_decode(skb, a, res);
+
+ pr_info_ratelimited("unknown failure(policy neither de/encode\n");
+ spin_lock(&ife->tcf_lock);
+ bstats_update(&ife->tcf_bstats, skb);
+ ife->tcf_tm.lastuse = jiffies;
+ ife->tcf_qstats.drops++;
+ spin_unlock(&ife->tcf_lock);
+
+ return TC_ACT_SHOT;
+}
+
+static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
+ struct netlink_callback *cb, int type,
+ struct tc_action *a)
+{
+ struct tc_action_net *tn = net_generic(net, ife_net_id);
+
+ return tcf_generic_walker(tn, skb, cb, type, a);
+}
+
+static int tcf_ife_search(struct net *net, struct tc_action *a, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, ife_net_id);
+
+ return tcf_hash_search(tn, a, index);
+}
+
+static struct tc_action_ops act_ife_ops = {
+ .kind = "ife",
+ .type = TCA_ACT_IFE,
+ .owner = THIS_MODULE,
+ .act = tcf_ife_act,
+ .dump = tcf_ife_dump,
+ .cleanup = tcf_ife_cleanup,
+ .init = tcf_ife_init,
+ .walk = tcf_ife_walker,
+ .lookup = tcf_ife_search,
+};
+
+static __net_init int ife_init_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, ife_net_id);
+
+ return tc_action_net_init(tn, &act_ife_ops, IFE_TAB_MASK);
+}
+
+static void __net_exit ife_exit_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, ife_net_id);
+
+ tc_action_net_exit(tn);
+}
+
+static struct pernet_operations ife_net_ops = {
+ .init = ife_init_net,
+ .exit = ife_exit_net,
+ .id = &ife_net_id,
+ .size = sizeof(struct tc_action_net),
+};
+
+static int __init ife_init_module(void)
+{
+ return tcf_register_action(&act_ife_ops, &ife_net_ops);
+}
+
+static void __exit ife_cleanup_module(void)
+{
+ tcf_unregister_action(&act_ife_ops, &ife_net_ops);
+}
+
+module_init(ife_init_module);
+module_exit(ife_cleanup_module);
+
+MODULE_AUTHOR("Jamal Hadi Salim(2015)");
+MODULE_DESCRIPTION("Inter-FE LFB action");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 89c41a1f3589..350e134cffb3 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -66,6 +66,7 @@ static void ipt_destroy_target(struct xt_entry_target *t)
struct xt_tgdtor_param par = {
.target = t->u.kernel.target,
.targinfo = t->data,
+ .family = NFPROTO_IPV4,
};
if (par.target->destroy != NULL)
par.target->destroy(&par);
@@ -219,6 +220,7 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
par.hooknum = ipt->tcfi_hook;
par.target = ipt->tcfi_t->u.kernel.target;
par.targinfo = ipt->tcfi_t->data;
+ par.family = NFPROTO_IPV4;
ret = par.target->target(skb, &par);
switch (ret) {
diff --git a/net/sched/act_meta_mark.c b/net/sched/act_meta_mark.c
new file mode 100644
index 000000000000..82892170ce4f
--- /dev/null
+++ b/net/sched/act_meta_mark.c
@@ -0,0 +1,79 @@
+/*
+ * net/sched/act_meta_mark.c IFE skb->mark metadata module
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * copyright Jamal Hadi Salim (2015)
+ *
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <uapi/linux/tc_act/tc_ife.h>
+#include <net/tc_act/tc_ife.h>
+#include <linux/rtnetlink.h>
+
+static int skbmark_encode(struct sk_buff *skb, void *skbdata,
+ struct tcf_meta_info *e)
+{
+ u32 ifemark = skb->mark;
+
+ return ife_encode_meta_u32(ifemark, skbdata, e);
+}
+
+static int skbmark_decode(struct sk_buff *skb, void *data, u16 len)
+{
+ u32 ifemark = *(u32 *)data;
+
+ skb->mark = ntohl(ifemark);
+ return 0;
+}
+
+static int skbmark_check(struct sk_buff *skb, struct tcf_meta_info *e)
+{
+ return ife_check_meta_u32(skb->mark, e);
+}
+
+static struct tcf_meta_ops ife_skbmark_ops = {
+ .metaid = IFE_META_SKBMARK,
+ .metatype = NLA_U32,
+ .name = "skbmark",
+ .synopsis = "skb mark 32 bit metadata",
+ .check_presence = skbmark_check,
+ .encode = skbmark_encode,
+ .decode = skbmark_decode,
+ .get = ife_get_meta_u32,
+ .alloc = ife_alloc_meta_u32,
+ .release = ife_release_meta_gen,
+ .validate = ife_validate_meta_u32,
+ .owner = THIS_MODULE,
+};
+
+static int __init ifemark_init_module(void)
+{
+ return register_ife_op(&ife_skbmark_ops);
+}
+
+static void __exit ifemark_cleanup_module(void)
+{
+ unregister_ife_op(&ife_skbmark_ops);
+}
+
+module_init(ifemark_init_module);
+module_exit(ifemark_cleanup_module);
+
+MODULE_AUTHOR("Jamal Hadi Salim(2015)");
+MODULE_DESCRIPTION("Inter-FE skb mark metadata module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_IFE_META(IFE_META_SKBMARK);
diff --git a/net/sched/act_meta_skbprio.c b/net/sched/act_meta_skbprio.c
new file mode 100644
index 000000000000..26bf4d86030b
--- /dev/null
+++ b/net/sched/act_meta_skbprio.c
@@ -0,0 +1,76 @@
+/*
+ * net/sched/act_meta_prio.c IFE skb->priority metadata module
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * copyright Jamal Hadi Salim (2015)
+ *
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <uapi/linux/tc_act/tc_ife.h>
+#include <net/tc_act/tc_ife.h>
+
+static int skbprio_check(struct sk_buff *skb, struct tcf_meta_info *e)
+{
+ return ife_check_meta_u32(skb->priority, e);
+}
+
+static int skbprio_encode(struct sk_buff *skb, void *skbdata,
+ struct tcf_meta_info *e)
+{
+ u32 ifeprio = skb->priority; /* avoid having to cast skb->priority*/
+
+ return ife_encode_meta_u32(ifeprio, skbdata, e);
+}
+
+static int skbprio_decode(struct sk_buff *skb, void *data, u16 len)
+{
+ u32 ifeprio = *(u32 *)data;
+
+ skb->priority = ntohl(ifeprio);
+ return 0;
+}
+
+static struct tcf_meta_ops ife_prio_ops = {
+ .metaid = IFE_META_PRIO,
+ .metatype = NLA_U32,
+ .name = "skbprio",
+ .synopsis = "skb prio metadata",
+ .check_presence = skbprio_check,
+ .encode = skbprio_encode,
+ .decode = skbprio_decode,
+ .get = ife_get_meta_u32,
+ .alloc = ife_alloc_meta_u32,
+ .owner = THIS_MODULE,
+};
+
+static int __init ifeprio_init_module(void)
+{
+ return register_ife_op(&ife_prio_ops);
+}
+
+static void __exit ifeprio_cleanup_module(void)
+{
+ unregister_ife_op(&ife_prio_ops);
+}
+
+module_init(ifeprio_init_module);
+module_exit(ifeprio_cleanup_module);
+
+MODULE_AUTHOR("Jamal Hadi Salim(2015)");
+MODULE_DESCRIPTION("Inter-FE skb prio metadata action");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_IFE_META(IFE_META_PRIO);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 6b284d991e0b..e8a760cf7775 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -182,7 +182,6 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
skb2->skb_iif = skb->dev->ifindex;
skb2->dev = dev;
- skb_sender_cpu_clear(skb2);
err = dev_queue_xmit(skb2);
if (err) {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d54bc942ea87..563cdad76448 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -59,6 +59,7 @@ struct tc_u_knode {
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt __percpu *pf;
#endif
+ u32 flags;
#ifdef CONFIG_CLS_U32_MARK
u32 val;
u32 mask;
@@ -434,7 +435,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (dev->netdev_ops->ndo_setup_tc) {
+ if (tc_should_offload(dev, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
offload.cls_u32->knode.handle = handle;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
@@ -442,7 +443,9 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
}
}
-static void u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
+static void u32_replace_hw_hnode(struct tcf_proto *tp,
+ struct tc_u_hnode *h,
+ u32 flags)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
@@ -451,7 +454,7 @@ static void u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (dev->netdev_ops->ndo_setup_tc) {
+ if (tc_should_offload(dev, flags)) {
offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle;
@@ -471,7 +474,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (dev->netdev_ops->ndo_setup_tc) {
+ if (tc_should_offload(dev, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle;
@@ -482,7 +485,9 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
}
}
-static void u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n)
+static void u32_replace_hw_knode(struct tcf_proto *tp,
+ struct tc_u_knode *n,
+ u32 flags)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
@@ -491,7 +496,7 @@ static void u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n)
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (dev->netdev_ops->ndo_setup_tc) {
+ if (tc_should_offload(dev, flags)) {
offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
offload.cls_u32->knode.handle = n->handle;
offload.cls_u32->knode.fshift = n->fshift;
@@ -679,6 +684,7 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
[TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
[TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
[TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
+ [TCA_U32_FLAGS] = { .type = NLA_U32 },
};
static int u32_set_parms(struct net *net, struct tcf_proto *tp,
@@ -786,6 +792,7 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
#endif
new->fshift = n->fshift;
new->res = n->res;
+ new->flags = n->flags;
RCU_INIT_POINTER(new->ht_down, n->ht_down);
/* bump reference count as long as we hold pointer to structure */
@@ -825,7 +832,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tc_u32_sel *s;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_U32_MAX + 1];
- u32 htid;
+ u32 htid, flags = 0;
int err;
#ifdef CONFIG_CLS_U32_PERF
size_t size;
@@ -838,6 +845,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
return err;
+ if (tb[TCA_U32_FLAGS])
+ flags = nla_get_u32(tb[TCA_U32_FLAGS]);
+
n = (struct tc_u_knode *)*arg;
if (n) {
struct tc_u_knode *new;
@@ -845,6 +855,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
+ if (n->flags != flags)
+ return -EINVAL;
+
new = u32_init_knode(tp, n);
if (!new)
return -ENOMEM;
@@ -861,7 +874,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
u32_replace_knode(tp, tp_c, new);
tcf_unbind_filter(tp, &n->res);
call_rcu(&n->rcu, u32_delete_key_rcu);
- u32_replace_hw_knode(tp, new);
+ u32_replace_hw_knode(tp, new, flags);
return 0;
}
@@ -889,7 +902,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
rcu_assign_pointer(tp_c->hlist, ht);
*arg = (unsigned long)ht;
- u32_replace_hw_hnode(tp, ht);
+ u32_replace_hw_hnode(tp, ht, flags);
return 0;
}
@@ -940,6 +953,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
RCU_INIT_POINTER(n->ht_up, ht);
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
+ n->flags = flags;
tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
n->tp = tp;
@@ -972,7 +986,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
RCU_INIT_POINTER(n->next, pins);
rcu_assign_pointer(*ins, n);
- u32_replace_hw_knode(tp, n);
+ u32_replace_hw_knode(tp, n, flags);
*arg = (unsigned long)n;
return 0;
}
@@ -1077,6 +1091,9 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
goto nla_put_failure;
+ if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
+ goto nla_put_failure;
+
#ifdef CONFIG_CLS_U32_MARK
if ((n->val || n->mask)) {
struct tc_u32_mark mark = {.val = n->val,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index de1e176e35cc..3b180ff72f79 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
return 0;
}
-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
+ unsigned int len)
{
const struct Qdisc_class_ops *cops;
unsigned long cl;
u32 parentid;
int drops;
- if (n == 0)
+ if (n == 0 && len == 0)
return;
drops = max_t(int, n, 0);
rcu_read_lock();
@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
cops->put(sch, cl);
}
sch->q.qlen -= n;
+ sch->qstats.backlog -= len;
__qdisc_qstats_drop(sch, drops);
}
rcu_read_unlock();
}
-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
+EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
static void notify_and_destroy(struct net *net, struct sk_buff *skb,
struct nlmsghdr *n, u32 clid,
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c538d9e4a8f6..baafddf229ce 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new->reshape_fail = cbq_reshape_fail;
#endif
}
- sch_tree_lock(sch);
- *old = cl->q;
- cl->q = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->q);
return 0;
}
@@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
- unsigned int qlen;
+ unsigned int qlen, backlog;
if (cl->filters || cl->children || cl == &q->link)
return -EBUSY;
@@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
qlen = cl->q->q.qlen;
+ backlog = cl->q->qstats.backlog;
qdisc_reset(cl->q);
- qdisc_tree_decrease_qlen(cl->q, qlen);
+ qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
if (cl->next_alive)
cbq_deactivate_class(cl);
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 5ffb8b8337c7..0a08c860eee4 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
choke_zap_tail_holes(q);
qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_drop(skb, sch);
- qdisc_tree_decrease_qlen(sch, 1);
--sch->q.qlen;
}
@@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
old = q->tab;
if (old) {
unsigned int oqlen = sch->q.qlen, tail = 0;
+ unsigned dropped = 0;
while (q->head != q->tail) {
struct sk_buff *skb = q->tab[q->head];
@@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
ntab[tail++] = skb;
continue;
}
+ dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
--sch->q.qlen;
qdisc_drop(skb, sch);
}
- qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
+ qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
q->head = 0;
q->tail = tail;
}
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 535007d5f0b5..9b7e2980ee5c 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
* or HTB crashes. Defer it for next round.
*/
if (q->stats.drop_count && sch->q.qlen) {
- qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+ qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
q->stats.drop_count = 0;
+ q->stats.drop_len = 0;
}
if (skb)
qdisc_bstats_update(sch, skb);
@@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
{
struct codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CODEL_MAX + 1];
- unsigned int qlen;
+ unsigned int qlen, dropped = 0;
int err;
if (!opt)
@@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q);
+ dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
}
- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index a1cd778240cd..a63e879e8975 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
static void drr_purge_queue(struct drr_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
+ unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
- qdisc_tree_decrease_qlen(cl->qdisc, len);
+ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
}
static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
@@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
new = &noop_qdisc;
}
- sch_tree_lock(sch);
- drr_purge_queue(cl);
- *old = cl->qdisc;
- cl->qdisc = new;
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->qdisc);
return 0;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index f357f34d02d2..d0dff0cd8186 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -73,13 +73,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
new = &noop_qdisc;
}
- sch_tree_lock(sch);
- *old = p->q;
- p->q = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &p->q);
return 0;
}
@@ -264,6 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -286,6 +281,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
return NULL;
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
@@ -401,6 +397,7 @@ static void dsmark_reset(struct Qdisc *sch)
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
qdisc_reset(p->q);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 109b2322778f..3c6a47d66a04 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
struct fq_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_MAX + 1];
int err, drop_count = 0;
+ unsigned drop_len = 0;
u32 fq_log;
if (!opt)
@@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
if (!skb)
break;
+ drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
drop_count++;
}
- qdisc_tree_decrease_qlen(sch, drop_count);
+ qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
sch_tree_unlock(sch);
return err;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 4c834e93dafb..d3fc8f9dd3d4 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
- unsigned int idx;
+ unsigned int idx, prev_backlog;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
+ prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet
* from this flow.
@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */
- qdisc_tree_decrease_qlen(sch, 1);
+ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
return NET_XMIT_SUCCESS;
}
@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
struct fq_codel_flow *flow;
struct list_head *head;
u32 prev_drop_count, prev_ecn_mark;
+ unsigned int prev_backlog;
begin:
head = &q->new_flows;
@@ -259,6 +261,7 @@ begin:
prev_drop_count = q->cstats.drop_count;
prev_ecn_mark = q->cstats.ecn_mark;
+ prev_backlog = sch->qstats.backlog;
skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
dequeue);
@@ -276,12 +279,14 @@ begin:
}
qdisc_bstats_update(sch, skb);
flow->deficit -= qdisc_pkt_len(skb);
- /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
* or HTB crashes. Defer it for next round.
*/
if (q->cstats.drop_count && sch->q.qlen) {
- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
+ q->cstats.drop_len);
q->cstats.drop_count = 0;
+ q->cstats.drop_len = 0;
}
return skb;
}
@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_codel_dequeue(sch);
+ q->cstats.drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
q->cstats.drop_count++;
}
- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
q->cstats.drop_count = 0;
+ q->cstats.drop_len = 0;
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 16bc83b2842a..f18c35024207 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -567,6 +567,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
+EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b7ebe2c87586..d783d7cc3348 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -895,9 +895,10 @@ static void
hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
+ unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
- qdisc_tree_decrease_qlen(cl->qdisc, len);
+ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
}
static void
@@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new = &noop_qdisc;
}
- sch_tree_lock(sch);
- hfsc_purge_queue(sch, cl);
- *old = cl->qdisc;
- cl->qdisc = new;
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->qdisc);
return 0;
}
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 86b04e31e60b..13d6f83ec491 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct hhf_sched_data *q = qdisc_priv(sch);
enum wdrr_bucket_idx idx;
struct wdrr_bucket *bucket;
+ unsigned int prev_backlog;
idx = hhf_classify(skb, sch);
@@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
+ prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet from this
* bucket.
@@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this. */
- qdisc_tree_decrease_qlen(sch, 1);
+ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
return NET_XMIT_SUCCESS;
}
@@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1];
- unsigned int qlen;
+ unsigned int qlen, prev_backlog;
int err;
u64 non_hh_quantum;
u32 new_quantum = q->quantum;
@@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
}
qlen = sch->q.qlen;
+ prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = hhf_dequeue(sch);
kfree_skb(skb);
}
- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
+ prev_backlog - sch->qstats.backlog);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 15ccd7f8fb2a..87b02ed3d5f2 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
htb_activate(q, cl);
}
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
ok:
qdisc_bstats_update(sch, skb);
qdisc_unthrottled(sch);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
unsigned int len;
if (cl->un.leaf.q->ops->drop &&
(len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
+ sch->qstats.backlog -= len;
sch->q.qlen--;
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
@@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
-
}
}
qdisc_watchdog_cancel(&q->watchdog);
__skb_queue_purge(&q->direct_queue);
sch->q.qlen = 0;
+ sch->qstats.backlog = 0;
memset(q->hlevel, 0, sizeof(q->hlevel));
memset(q->row_mask, 0, sizeof(q->row_mask));
for (i = 0; i < TC_HTB_NUMPRIO; i++)
@@ -1163,14 +1166,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
cl->common.classid)) == NULL)
return -ENOBUFS;
- sch_tree_lock(sch);
- *old = cl->un.leaf.q;
- cl->un.leaf.q = new;
- if (*old != NULL) {
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- }
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->un.leaf.q);
return 0;
}
@@ -1272,7 +1268,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
- unsigned int qlen;
struct Qdisc *new_q = NULL;
int last_child = 0;
@@ -1292,9 +1287,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
if (!cl->level) {
- qlen = cl->un.leaf.q->q.qlen;
+ unsigned int qlen = cl->un.leaf.q->q.qlen;
+ unsigned int backlog = cl->un.leaf.q->qstats.backlog;
+
qdisc_reset(cl->un.leaf.q);
- qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
+ qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
}
/* delete from hash and active; remainder in destroy_class */
@@ -1428,10 +1425,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
sch_tree_lock(sch);
if (parent && !parent->level) {
unsigned int qlen = parent->un.leaf.q->q.qlen;
+ unsigned int backlog = parent->un.leaf.q->qstats.backlog;
/* turn parent into inner node */
qdisc_reset(parent->un.leaf.q);
- qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
+ qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
qdisc_destroy(parent->un.leaf.q);
if (parent->prio_activity)
htb_deactivate(q, parent);
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 3e82f047caaf..56a77b878eb3 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -57,7 +57,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
dev_queue = netdev_get_tx_queue(dev, ntx);
- qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
+ qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(ntx + 1)));
if (qdisc == NULL)
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index f9947d1f4952..b8002ce3d010 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -125,7 +125,8 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < dev->num_tx_queues; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
- qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
+ qdisc = qdisc_create_dflt(dev_queue,
+ get_default_qdisc_ops(dev, i),
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)));
if (qdisc == NULL) {
@@ -142,7 +143,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
*/
if (qopt->hw) {
struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO,
- .tc = qopt->num_tc};
+ { .tc = qopt->num_tc }};
priv->hw_owned = 1;
err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 4e904ca0af9d..bcdd54bb101c 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
if (q->queues[i] != &noop_qdisc) {
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
- qdisc_tree_decrease_qlen(child, child->q.qlen);
+ qdisc_tree_reduce_backlog(child, child->q.qlen,
+ child->qstats.backlog);
qdisc_destroy(child);
}
}
@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
q->queues[i] = child;
if (old != &noop_qdisc) {
- qdisc_tree_decrease_qlen(old,
- old->q.qlen);
+ qdisc_tree_reduce_backlog(old,
+ old->q.qlen,
+ old->qstats.backlog);
qdisc_destroy(old);
}
sch_tree_unlock(sch);
@@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->queues[band];
- q->queues[band] = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &q->queues[band]);
return 0;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5abd1d9de989..9640bb39a5d2 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -598,7 +598,8 @@ deliver:
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
qdisc_qstats_drop(sch);
- qdisc_tree_decrease_qlen(sch, 1);
+ qdisc_tree_reduce_backlog(sch, 1,
+ qdisc_pkt_len(skb));
}
}
goto tfifo_dequeue;
@@ -1037,15 +1038,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
{
struct netem_sched_data *q = qdisc_priv(sch);
- sch_tree_lock(sch);
- *old = q->qdisc;
- q->qdisc = new;
- if (*old) {
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- }
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &q->qdisc);
return 0;
}
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index b783a446d884..71ae3b9629f9 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
{
struct pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_PIE_MAX + 1];
- unsigned int qlen;
+ unsigned int qlen, dropped = 0;
int err;
if (!opt)
@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q);
+ dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
}
- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index ba6487f2741f..fee1b15506b2 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
if (child != &noop_qdisc) {
- qdisc_tree_decrease_qlen(child, child->q.qlen);
+ qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
qdisc_destroy(child);
}
}
@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
q->queues[i] = child;
if (old != &noop_qdisc) {
- qdisc_tree_decrease_qlen(old,
- old->q.qlen);
+ qdisc_tree_reduce_backlog(old,
+ old->q.qlen,
+ old->qstats.backlog);
qdisc_destroy(old);
}
sch_tree_unlock(sch);
@@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->queues[band];
- q->queues[band] = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &q->queues[band]);
return 0;
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 3dc3a6e56052..8d2d8d953432 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
static void qfq_purge_queue(struct qfq_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
+ unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
- qdisc_tree_decrease_qlen(cl->qdisc, len);
+ qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
}
static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
@@ -617,11 +618,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
new = &noop_qdisc;
}
- sch_tree_lock(sch);
- qfq_purge_queue(cl);
- *old = cl->qdisc;
- cl->qdisc = new;
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &cl->qdisc);
return 0;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 6c0534cc7758..8c0508c0e287 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
q->flags = ctl->flags;
q->limit = ctl->limit;
if (child) {
- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+ q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc);
q->qdisc = child;
}
@@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->qdisc;
- q->qdisc = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &q->qdisc);
return 0;
}
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 5bbb6332ec57..c69611640fa5 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch);
- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+ q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc);
q->qdisc = child;
@@ -606,12 +607,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->qdisc;
- q->qdisc = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
+ *old = qdisc_replace(sch, new, &q->qdisc);
return 0;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3abab534eb5c..498f0a2cb47f 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -346,7 +346,7 @@ static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
- unsigned int hash;
+ unsigned int hash, dropped;
sfq_index x, qlen;
struct sfq_slot *slot;
int uninitialized_var(ret);
@@ -461,7 +461,7 @@ enqueue:
return NET_XMIT_SUCCESS;
qlen = slot->qlen;
- sfq_drop(sch);
+ dropped = sfq_drop(sch);
/* Return Congestion Notification only if we dropped a packet
* from this flow.
*/
@@ -469,7 +469,7 @@ enqueue:
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */
- qdisc_tree_decrease_qlen(sch, 1);
+ qdisc_tree_reduce_backlog(sch, 1, dropped);
return NET_XMIT_SUCCESS;
}
@@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch)
struct sfq_slot *slot;
struct sk_buff_head list;
int dropped = 0;
+ unsigned int drop_len = 0;
__skb_queue_head_init(&list);
@@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch)
if (x >= SFQ_MAX_FLOWS) {
drop:
qdisc_qstats_backlog_dec(sch, skb);
+ drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
dropped++;
continue;
@@ -594,7 +596,7 @@ drop:
}
}
sch->q.qlen -= dropped;
- qdisc_tree_decrease_qlen(sch, dropped);
+ qdisc_tree_reduce_backlog(sch, dropped, drop_len);
}
static void sfq_perturbation(unsigned long arg)
@@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = nla_data(opt);
struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
- unsigned int qlen;
+ unsigned int qlen, dropped = 0;
struct red_parms *p = NULL;
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
@@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit)
- sfq_drop(sch);
- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+ dropped += sfq_drop(sch);
+ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
del_timer(&q->perturb_timer);
if (q->perturb_period) {
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index a4afde14e865..c2fbde742f37 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
+ unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
int ret, nb;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
nskb = segs->next;
segs->next = NULL;
qdisc_skb_cb(segs)->pkt_len = segs->len;
+ len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
}
sch->q.qlen += nb;
if (nb > 1)
- qdisc_tree_decrease_qlen(sch, 1 - nb);
+ qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
consume_skb(skb);
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch);
if (child) {
- qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+ qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
+ q->qdisc->qstats.backlog);
qdisc_destroy(q->qdisc);
q->qdisc = child;
}
@@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (new == NULL)
new = &noop_qdisc;
- sch_tree_lock(sch);
- *old = q->qdisc;
- q->qdisc = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
-
+ *old = qdisc_replace(sch, new, &q->qdisc);
return 0;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ec529121f38a..ce46f1c7f133 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -526,6 +526,8 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
}
return 0;
}
+ if (addr1->v6.sin6_port != addr2->v6.sin6_port)
+ return 0;
if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
return 0;
/* If this is a linklocal address, compare the scope_id. */
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index 5e68b94ee640..6cc2152e0740 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -65,7 +65,7 @@ static struct {
struct kfifo fifo;
spinlock_t lock;
wait_queue_head_t wait;
- struct timespec tstart;
+ struct timespec64 tstart;
} sctpw;
static __printf(1, 2) void printl(const char *fmt, ...)
@@ -85,7 +85,7 @@ static __printf(1, 2) void printl(const char *fmt, ...)
static int sctpprobe_open(struct inode *inode, struct file *file)
{
kfifo_reset(&sctpw.fifo);
- getnstimeofday(&sctpw.tstart);
+ ktime_get_ts64(&sctpw.tstart);
return 0;
}
@@ -138,7 +138,7 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
struct sk_buff *skb = chunk->skb;
struct sctp_transport *sp;
static __u32 lcwnd = 0;
- struct timespec now;
+ struct timespec64 now;
sp = asoc->peer.primary_path;
@@ -149,8 +149,8 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
(full || sp->cwnd != lcwnd)) {
lcwnd = sp->cwnd;
- getnstimeofday(&now);
- now = timespec_sub(now, sctpw.tstart);
+ ktime_get_ts64(&now);
+ now = timespec64_sub(now, sctpw.tstart);
printl("%lu.%06lu ", (unsigned long) now.tv_sec,
(unsigned long) now.tv_nsec / NSEC_PER_USEC);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index cfc3c7101a38..5cfac8d5d3b3 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -480,7 +480,7 @@ static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v)
static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
{
struct sctp_association *assoc;
- struct sctp_transport *tsp;
+ struct sctp_transport *transport, *tsp;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX "
@@ -488,10 +488,10 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
return 0;
}
- tsp = (struct sctp_transport *)v;
- if (!sctp_transport_hold(tsp))
+ transport = (struct sctp_transport *)v;
+ if (!sctp_transport_hold(transport))
return 0;
- assoc = tsp->asoc;
+ assoc = transport->asoc;
list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
transports) {
@@ -544,7 +544,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
- sctp_transport_put(tsp);
+ sctp_transport_put(transport);
return 0;
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 799e65b944b9..cabf586f47d7 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -740,7 +740,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
default:
printk(KERN_CRIT "%s: bad return from "
"gss_fill_context: %zd\n", __func__, err);
- BUG();
+ gss_msg->msg.errno = -EIO;
}
goto err_release_msg;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2b32fd602669..273bc3a35425 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
if (bp[0] == '\\' && bp[1] == 'x') {
/* HEX STRING */
bp += 2;
- while (len < bufsize) {
+ while (len < bufsize - 1) {
int h, l;
h = hex_to_bin(bp[0]);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index cc1251d07297..2dcd7640eeb5 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -341,6 +341,8 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
rqst->rq_reply_bytes_recvd = 0;
rqst->rq_bytes_sent = 0;
rqst->rq_xid = headerp->rm_xid;
+
+ rqst->rq_private_buf.len = size;
set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
buf = &rqst->rq_rcv_buf;
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 47f7da58a7f0..8b5833c1ff2e 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -1093,8 +1093,11 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
.cb = cb,
.idx = idx,
};
+ int err;
- switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb);
+ err = switchdev_port_obj_dump(dev, &dump.fdb.obj,
+ switchdev_port_fdb_dump_cb);
+ cb->args[1] = err;
return dump.idx;
}
EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e401108360a2..ae469b37d852 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -412,11 +412,6 @@ enomem:
return -ENOMEM;
}
-void tipc_bcast_reinit(struct net *net)
-{
- tipc_link_reinit(tipc_bc_sndlink(net), tipc_own_addr(net));
-}
-
void tipc_bcast_stop(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 1944c6c00bb9..d5e79b3767fd 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -46,7 +46,6 @@ struct tipc_node_map;
extern const char tipc_bclink_name[];
int tipc_bcast_init(struct net *net);
-void tipc_bcast_reinit(struct net *net);
void tipc_bcast_stop(struct net *net);
void tipc_bcast_add_peer(struct net *net, struct tipc_link *l,
struct sk_buff_head *xmitq);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 802ffad3200d..27a5406213c6 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -40,6 +40,7 @@
#include "link.h"
#include "discover.h"
#include "bcast.h"
+#include "netlink.h"
#define MAX_ADDR_STR 60
@@ -54,23 +55,6 @@ static struct tipc_media * const media_info_array[] = {
NULL
};
-static const struct nla_policy
-tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
- [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_BEARER_NAME] = {
- .type = NLA_STRING,
- .len = TIPC_MAX_BEARER_NAME
- },
- [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
- [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
-};
-
-static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
- [TIPC_NLA_MEDIA_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_MEDIA_NAME] = { .type = NLA_STRING },
- [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
-};
-
static void bearer_disable(struct net *net, struct tipc_bearer *b);
/**
diff --git a/net/tipc/link.c b/net/tipc/link.c
index e31d92f80572..7d2bb3e70baa 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1,7 +1,7 @@
/*
* net/tipc/link.c: TIPC link code
*
- * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
+ * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
* Copyright (c) 2004-2007, 2010-2013, Wind River Systems
* All rights reserved.
*
@@ -127,6 +127,7 @@ struct tipc_link {
/* Management and link supervision data */
u32 peer_session;
+ u32 session;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
@@ -136,11 +137,7 @@ struct tipc_link {
u16 peer_caps;
bool active;
u32 silent_intv_cnt;
- struct {
- unchar hdr[INT_H_SIZE];
- unchar body[TIPC_MAX_IF_NAME];
- } proto_msg;
- struct tipc_msg *pmsg;
+ char if_name[TIPC_MAX_IF_NAME];
u32 priority;
char net_plane;
@@ -195,14 +192,6 @@ struct tipc_link {
static const char *link_co_err = "Link tunneling error, ";
static const char *link_rst_msg = "Resetting link ";
-/* Properties valid for media, bearar and link */
-static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
- [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
- [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
- [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
-};
-
/* Send states for broadcast NACKs
*/
enum {
@@ -215,10 +204,11 @@ enum {
* Interval between NACKs when packets arrive out of order
*/
#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
-/*
- * Out-of-range value for link session numbers
+
+/* Wildcard value for link session numbers. When it is known that
+ * peer endpoint is down, any session number must be accepted.
*/
-#define WILDCARD_SESSION 0x10000
+#define ANY_SESSION 0x10000
/* Link FSM states:
*/
@@ -398,16 +388,6 @@ char *tipc_link_name(struct tipc_link *l)
return l->name;
}
-static u32 link_own_addr(struct tipc_link *l)
-{
- return msg_prevnode(l->pmsg);
-}
-
-void tipc_link_reinit(struct tipc_link *l, u32 addr)
-{
- msg_set_prevnode(l->pmsg, addr);
-}
-
/**
* tipc_link_create - create a new link
* @n: pointer to associated node
@@ -441,29 +421,22 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
struct tipc_link **link)
{
struct tipc_link *l;
- struct tipc_msg *hdr;
l = kzalloc(sizeof(*l), GFP_ATOMIC);
if (!l)
return false;
*link = l;
- l->pmsg = (struct tipc_msg *)&l->proto_msg;
- hdr = l->pmsg;
- tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
- msg_set_size(hdr, sizeof(l->proto_msg));
- msg_set_session(hdr, session);
- msg_set_bearer_id(hdr, l->bearer_id);
+ l->session = session;
/* Note: peer i/f name is completed by reset/activate message */
sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
- strcpy((char *)msg_data(hdr), if_name);
-
+ strcpy(l->if_name, if_name);
l->addr = peer;
l->peer_caps = peer_caps;
l->net = net;
- l->peer_session = WILDCARD_SESSION;
+ l->peer_session = ANY_SESSION;
l->bearer_id = bearer_id;
l->tolerance = tolerance;
l->net_plane = net_plane;
@@ -790,7 +763,7 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
struct tipc_msg *msg = buf_msg(skb_peek(list));
int imp = msg_importance(msg);
u32 oport = msg_origport(msg);
- u32 addr = link_own_addr(link);
+ u32 addr = tipc_own_addr(link->net);
struct sk_buff *skb;
/* This really cannot happen... */
@@ -839,16 +812,9 @@ void link_prepare_wakeup(struct tipc_link *l)
void tipc_link_reset(struct tipc_link *l)
{
- /* Link is down, accept any session */
- l->peer_session = WILDCARD_SESSION;
-
- /* If peer is up, it only accepts an incremented session number */
- msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
-
- /* Prepare for renewed mtu size negotiation */
+ l->peer_session = ANY_SESSION;
+ l->session++;
l->mtu = l->advertised_mtu;
-
- /* Clean up all queues and counters: */
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
skb_queue_splice_init(&l->wakeupq, l->inputq);
@@ -1156,7 +1122,7 @@ int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
/* Broadcast ACK must be sent via a unicast link => defer to caller */
if (link_is_bc_rcvlink(l)) {
- if (((l->rcv_nxt ^ link_own_addr(l)) & 0xf) != 0xf)
+ if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
return 0;
l->rcv_unacked = 0;
return TIPC_LINK_SND_BC_ACK;
@@ -1268,15 +1234,30 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
u16 rcvgap, int tolerance, int priority,
struct sk_buff_head *xmitq)
{
- struct sk_buff *skb = NULL;
- struct tipc_msg *hdr = l->pmsg;
+ struct sk_buff *skb;
+ struct tipc_msg *hdr;
+ struct sk_buff_head *dfq = &l->deferdq;
bool node_up = link_is_up(l->bc_rcvlink);
/* Don't send protocol message during reset or link failover */
if (tipc_link_is_blocked(l))
return;
- msg_set_type(hdr, mtyp);
+ if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
+ return;
+
+ if (!skb_queue_empty(dfq))
+ rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
+
+ skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
+ TIPC_MAX_IF_NAME, l->addr,
+ tipc_own_addr(l->net), 0, 0, 0);
+ if (!skb)
+ return;
+
+ hdr = buf_msg(skb);
+ msg_set_session(hdr, l->session);
+ msg_set_bearer_id(hdr, l->bearer_id);
msg_set_net_plane(hdr, l->net_plane);
msg_set_next_sent(hdr, l->snd_nxt);
msg_set_ack(hdr, l->rcv_nxt - 1);
@@ -1286,36 +1267,23 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
msg_set_linkprio(hdr, priority);
msg_set_redundant_link(hdr, node_up);
msg_set_seq_gap(hdr, 0);
-
- /* Compatibility: created msg must not be in sequence with pkt flow */
msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
if (mtyp == STATE_MSG) {
- if (!tipc_link_is_up(l))
- return;
-
- /* Override rcvgap if there are packets in deferred queue */
- if (!skb_queue_empty(&l->deferdq))
- rcvgap = buf_seqno(skb_peek(&l->deferdq)) - l->rcv_nxt;
- if (rcvgap) {
- msg_set_seq_gap(hdr, rcvgap);
- l->stats.sent_nacks++;
- }
+ msg_set_seq_gap(hdr, rcvgap);
+ msg_set_size(hdr, INT_H_SIZE);
msg_set_probe(hdr, probe);
- if (probe)
- l->stats.sent_probes++;
l->stats.sent_states++;
l->rcv_unacked = 0;
} else {
/* RESET_MSG or ACTIVATE_MSG */
msg_set_max_pkt(hdr, l->advertised_mtu);
- msg_set_ack(hdr, l->rcv_nxt - 1);
- msg_set_next_sent(hdr, 1);
+ strcpy(msg_data(hdr), l->if_name);
}
- skb = tipc_buf_acquire(msg_size(hdr));
- if (!skb)
- return;
- skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
+ if (probe)
+ l->stats.sent_probes++;
+ if (rcvgap)
+ l->stats.sent_nacks++;
skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, skb);
}
@@ -1340,7 +1308,7 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
/* At least one packet required for safe algorithm => add dummy */
skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
- BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
+ BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
0, 0, TIPC_ERR_NO_PORT);
if (!skb) {
pr_warn("%sunable to create tunnel packet\n", link_co_err);
@@ -1351,7 +1319,7 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
__skb_queue_purge(&tmpxq);
/* Initialize reusable tunnel packet header */
- tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
+ tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
mtyp, INT_H_SIZE, l->addr);
pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
msg_set_msgcnt(&tnlhdr, pktcnt);
@@ -1410,7 +1378,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (tipc_link_is_blocked(l) || !xmitq)
goto exit;
- if (link_own_addr(l) > msg_prevnode(hdr))
+ if (tipc_own_addr(l->net) > msg_prevnode(hdr))
l->net_plane = msg_net_plane(hdr);
switch (mtyp) {
@@ -1418,7 +1386,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
/* Ignore duplicate RESET with old session number */
if ((less_eq(msg_session(hdr), l->peer_session)) &&
- (l->peer_session != WILDCARD_SESSION))
+ (l->peer_session != ANY_SESSION))
break;
/* fall thru' */
@@ -1515,7 +1483,7 @@ static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
u16 gap_to = peers_snd_nxt - 1;
skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
- 0, l->addr, link_own_addr(l), 0, 0, 0);
+ 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
if (!skb)
return false;
hdr = buf_msg(skb);
@@ -1670,7 +1638,7 @@ int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
if (mtyp != STATE_MSG)
return 0;
- if (dnode == link_own_addr(l)) {
+ if (dnode == tipc_own_addr(l->net)) {
tipc_link_bc_ack_rcv(l, acked, xmitq);
rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
l->stats.recv_nacks++;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index b4ee9d6e181d..6a94175ee20a 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -86,7 +86,6 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
struct sk_buff_head *namedq,
struct tipc_link *bc_sndlink,
struct tipc_link **link);
-void tipc_link_reinit(struct tipc_link *l, u32 addr);
void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
int mtyp, struct sk_buff_head *xmitq);
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 777b979b8463..e190460fe0d3 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -47,12 +47,6 @@
#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
-static const struct nla_policy
-tipc_nl_name_table_policy[TIPC_NLA_NAME_TABLE_MAX + 1] = {
- [TIPC_NLA_NAME_TABLE_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_NAME_TABLE_PUBL] = { .type = NLA_NESTED }
-};
-
/**
* struct name_info - name sequence publication info
* @node_list: circular list of publications made by own node
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 77bf9113c7a7..28bf4feeb81c 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -41,11 +41,7 @@
#include "socket.h"
#include "node.h"
#include "bcast.h"
-
-static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
- [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
-};
+#include "netlink.h"
/*
* The TIPC locking policy is designed to ensure a very fine locking
@@ -116,7 +112,6 @@ int tipc_net_start(struct net *net, u32 addr)
tn->own_addr = addr;
tipc_named_reinit(net);
tipc_sk_reinit(net);
- tipc_bcast_reinit(net);
tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
TIPC_ZONE_SCOPE, 0, tn->own_addr);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 8975b0135b76..56935df2167a 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -55,6 +55,75 @@ static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = {
[TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, }
};
+const struct nla_policy
+tipc_nl_name_table_policy[TIPC_NLA_NAME_TABLE_MAX + 1] = {
+ [TIPC_NLA_NAME_TABLE_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_NAME_TABLE_PUBL] = { .type = NLA_NESTED }
+};
+
+const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
+ [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 },
+ [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 },
+ [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED },
+ [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
+};
+
+const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+ [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
+};
+
+const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+ [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING,
+ .len = TIPC_MAX_LINK_NAME },
+ [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
+ [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
+};
+
+const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
+ [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
+ [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
+};
+
+/* Properties valid for media, bearer and link */
+const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
+ [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
+};
+
+const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
+ [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING,
+ .len = TIPC_MAX_BEARER_NAME },
+ [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
+};
+
+const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
+ [TIPC_NLA_MEDIA_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_MEDIA_NAME] = { .type = NLA_STRING },
+ [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
+};
+
+const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
+ [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
+ [TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY,
+ .len = sizeof(struct sockaddr_storage)},
+ [TIPC_NLA_UDP_REMOTE] = {.type = NLA_BINARY,
+ .len = sizeof(struct sockaddr_storage)},
+};
+
/* Users of the legacy API (tipc-config) can't handle that we add operations,
* so we have a separate genl handling for the new API.
*/
diff --git a/net/tipc/netlink.h b/net/tipc/netlink.h
index 08a1db67b927..ed1dbcb4afbd 100644
--- a/net/tipc/netlink.h
+++ b/net/tipc/netlink.h
@@ -35,6 +35,7 @@
#ifndef _TIPC_NETLINK_H
#define _TIPC_NETLINK_H
+#include <net/netlink.h>
extern struct genl_family tipc_genl_family;
int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf);
@@ -45,6 +46,16 @@ struct tipc_nl_msg {
u32 seq;
};
+extern const struct nla_policy tipc_nl_name_table_policy[];
+extern const struct nla_policy tipc_nl_sock_policy[];
+extern const struct nla_policy tipc_nl_net_policy[];
+extern const struct nla_policy tipc_nl_link_policy[];
+extern const struct nla_policy tipc_nl_node_policy[];
+extern const struct nla_policy tipc_nl_prop_policy[];
+extern const struct nla_policy tipc_nl_bearer_policy[];
+extern const struct nla_policy tipc_nl_media_policy[];
+extern const struct nla_policy tipc_nl_udp_policy[];
+
int tipc_netlink_start(void);
int tipc_netlink_compat_start(void);
void tipc_netlink_stop(void);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index cdb79503d890..ace178fd3850 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -41,6 +41,7 @@
#include "socket.h"
#include "bcast.h"
#include "discover.h"
+#include "netlink.h"
#define INVALID_NODE_SIG 0x10000
@@ -164,28 +165,6 @@ struct tipc_sock_conn {
struct list_head list;
};
-static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
- [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_LINK_NAME] = {
- .type = NLA_STRING,
- .len = TIPC_MAX_LINK_NAME
- },
- [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
- [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
- [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
- [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
- [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
- [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
- [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
- [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
-};
-
-static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
- [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
- [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
-};
-
static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
{
int bearer_id = n->active_links[sel & 1];
@@ -843,7 +822,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
memcpy(&le->maddr, maddr, sizeof(*maddr));
exit:
tipc_node_write_unlock(n);
- if (reset && !tipc_link_is_reset(l))
+ if (reset && l && !tipc_link_is_reset(l))
tipc_node_link_down(n, b->identity, false);
tipc_node_put(n);
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 69c29050f14a..3eeb50a27b89 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -42,6 +42,7 @@
#include "name_distr.h"
#include "socket.h"
#include "bcast.h"
+#include "netlink.h"
#define SS_LISTENING -1 /* socket is listening */
#define SS_READY -2 /* socket is connectionless */
@@ -126,14 +127,6 @@ static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
static struct proto tipc_proto;
-static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
- [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 },
- [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 },
- [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED },
- [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
-};
-
static const struct rhashtable_params tsk_rht_params;
/*
@@ -673,7 +666,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
struct tipc_sock *tsk = tipc_sk(sk);
struct net *net = sock_net(sk);
struct tipc_msg *mhdr = &tsk->phdr;
- struct sk_buff_head *pktchain = &sk->sk_write_queue;
+ struct sk_buff_head pktchain;
struct iov_iter save = msg->msg_iter;
uint mtu;
int rc;
@@ -687,14 +680,16 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
msg_set_nameupper(mhdr, seq->upper);
msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
+ skb_queue_head_init(&pktchain);
+
new_mtu:
mtu = tipc_bcast_get_mtu(net);
- rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
+ rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
if (unlikely(rc < 0))
return rc;
do {
- rc = tipc_bcast_xmit(net, pktchain);
+ rc = tipc_bcast_xmit(net, &pktchain);
if (likely(!rc))
return dsz;
@@ -704,7 +699,7 @@ new_mtu:
if (!rc)
continue;
}
- __skb_queue_purge(pktchain);
+ __skb_queue_purge(&pktchain);
if (rc == -EMSGSIZE) {
msg->msg_iter = save;
goto new_mtu;
@@ -863,7 +858,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
struct net *net = sock_net(sk);
struct tipc_msg *mhdr = &tsk->phdr;
u32 dnode, dport;
- struct sk_buff_head *pktchain = &sk->sk_write_queue;
+ struct sk_buff_head pktchain;
struct sk_buff *skb;
struct tipc_name_seq *seq;
struct iov_iter save;
@@ -924,17 +919,18 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
}
+ skb_queue_head_init(&pktchain);
save = m->msg_iter;
new_mtu:
mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
- rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
+ rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
if (rc < 0)
return rc;
do {
- skb = skb_peek(pktchain);
+ skb = skb_peek(&pktchain);
TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
- rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+ rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
if (likely(!rc)) {
if (sock->state != SS_READY)
sock->state = SS_CONNECTING;
@@ -946,7 +942,7 @@ new_mtu:
if (!rc)
continue;
}
- __skb_queue_purge(pktchain);
+ __skb_queue_purge(&pktchain);
if (rc == -EMSGSIZE) {
m->msg_iter = save;
goto new_mtu;
@@ -1016,7 +1012,7 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *mhdr = &tsk->phdr;
- struct sk_buff_head *pktchain = &sk->sk_write_queue;
+ struct sk_buff_head pktchain;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
u32 portid = tsk->portid;
int rc = -EINVAL;
@@ -1044,17 +1040,19 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
dnode = tsk_peer_node(tsk);
+ skb_queue_head_init(&pktchain);
next:
save = m->msg_iter;
mtu = tsk->max_pkt;
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
- rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
+ rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
if (unlikely(rc < 0))
return rc;
+
do {
if (likely(!tsk_conn_cong(tsk))) {
- rc = tipc_node_xmit(net, pktchain, dnode, portid);
+ rc = tipc_node_xmit(net, &pktchain, dnode, portid);
if (likely(!rc)) {
tsk->sent_unacked++;
sent += send;
@@ -1063,7 +1061,7 @@ next:
goto next;
}
if (rc == -EMSGSIZE) {
- __skb_queue_purge(pktchain);
+ __skb_queue_purge(&pktchain);
tsk->max_pkt = tipc_node_get_mtu(net, dnode,
portid);
m->msg_iter = save;
@@ -1077,7 +1075,7 @@ next:
rc = tipc_wait_for_sndpkt(sock, &timeo);
} while (!rc);
- __skb_queue_purge(pktchain);
+ __skb_queue_purge(&pktchain);
return sent ? sent : rc;
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 22963cafd5ed..e6cb386fbf34 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -326,7 +326,8 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
return tipc_subscrp_cancel(s, subscriber);
}
- tipc_subscrp_subscribe(net, s, subscriber, swap);
+ if (s)
+ tipc_subscrp_subscribe(net, s, subscriber, swap);
}
/* Handle one request to establish a new subscriber */
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index d63a911e7fe2..49b3c2ede7ab 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -48,20 +48,13 @@
#include <linux/tipc_netlink.h>
#include "core.h"
#include "bearer.h"
+#include "netlink.h"
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
#define UDP_MIN_HEADROOM 28
-static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
- [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
- [TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY,
- .len = sizeof(struct sockaddr_storage)},
- [TIPC_NLA_UDP_REMOTE] = {.type = NLA_BINARY,
- .len = sizeof(struct sockaddr_storage)},
-};
-
/**
* struct udp_media_addr - IP/UDP addressing information
*
@@ -181,6 +174,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
err = PTR_ERR(rt);
goto tx_error;
}
+
+ skb->dev = rt->dst.dev;
ttl = ip4_dst_hoplimit(&rt->dst);
udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
dst->ipv4.s_addr, 0, ttl, 0, src->udp_port,
@@ -274,7 +269,7 @@ static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub,
struct udp_media_addr *remote)
{
struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
- struct sockaddr_storage *sa_local, *sa_remote;
+ struct sockaddr_storage sa_local, sa_remote;
if (!attrs[TIPC_NLA_BEARER_UDP_OPTS])
goto err;
@@ -283,41 +278,48 @@ static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub,
tipc_nl_udp_policy))
goto err;
if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) {
- sa_local = nla_data(opts[TIPC_NLA_UDP_LOCAL]);
- sa_remote = nla_data(opts[TIPC_NLA_UDP_REMOTE]);
+ nla_memcpy(&sa_local, opts[TIPC_NLA_UDP_LOCAL],
+ sizeof(sa_local));
+ nla_memcpy(&sa_remote, opts[TIPC_NLA_UDP_REMOTE],
+ sizeof(sa_remote));
} else {
err:
pr_err("Invalid UDP bearer configuration");
return -EINVAL;
}
- if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET) {
+ if ((sa_local.ss_family & sa_remote.ss_family) == AF_INET) {
struct sockaddr_in *ip4;
- ip4 = (struct sockaddr_in *)sa_local;
+ ip4 = (struct sockaddr_in *)&sa_local;
local->proto = htons(ETH_P_IP);
local->udp_port = ip4->sin_port;
local->ipv4.s_addr = ip4->sin_addr.s_addr;
- ip4 = (struct sockaddr_in *)sa_remote;
+ ip4 = (struct sockaddr_in *)&sa_remote;
remote->proto = htons(ETH_P_IP);
remote->udp_port = ip4->sin_port;
remote->ipv4.s_addr = ip4->sin_addr.s_addr;
return 0;
#if IS_ENABLED(CONFIG_IPV6)
- } else if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET6) {
+ } else if ((sa_local.ss_family & sa_remote.ss_family) == AF_INET6) {
+ int atype;
struct sockaddr_in6 *ip6;
- ip6 = (struct sockaddr_in6 *)sa_local;
+ ip6 = (struct sockaddr_in6 *)&sa_local;
+ atype = ipv6_addr_type(&ip6->sin6_addr);
+ if (__ipv6_addr_needs_scope_id(atype) && !ip6->sin6_scope_id)
+ return -EINVAL;
+
local->proto = htons(ETH_P_IPV6);
local->udp_port = ip6->sin6_port;
- local->ipv6 = ip6->sin6_addr;
+ memcpy(&local->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
ub->ifindex = ip6->sin6_scope_id;
- ip6 = (struct sockaddr_in6 *)sa_remote;
+ ip6 = (struct sockaddr_in6 *)&sa_remote;
remote->proto = htons(ETH_P_IPV6);
remote->udp_port = ip6->sin6_port;
- remote->ipv6 = ip6->sin6_addr;
+ memcpy(&remote->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
return 0;
#endif
}
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index da72ed32f143..6c606120abfe 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -50,8 +50,8 @@ config CFG80211_DEVELOPER_WARNINGS
default n
help
This option enables some additional warnings that help
- cfg80211 developers and driver developers, but that can
- trigger due to races with userspace.
+ cfg80211 developers and driver developers, but beware that
+ they can also trigger due to races with userspace.
For example, when a driver reports that it was disconnected
from the AP, but the user disconnects manually at the same
@@ -61,19 +61,6 @@ config CFG80211_DEVELOPER_WARNINGS
on it (or mac80211).
-config CFG80211_REG_DEBUG
- bool "cfg80211 regulatory debugging"
- depends on CFG80211
- default n
- ---help---
- You can enable this if you want to debug regulatory changes.
- For more information on cfg80211 regulatory refer to the wireless
- wiki:
-
- http://wireless.kernel.org/en/developers/Regulatory
-
- If unsure, say N.
-
config CFG80211_CERTIFICATION_ONUS
bool "cfg80211 certification onus"
depends on CFG80211 && EXPERT
@@ -123,7 +110,7 @@ config CFG80211_REG_RELAX_NO_IR
interface which associated to an AP which userspace assumes or confirms
to be an authorized master, i.e., with radar detection support and DFS
capabilities. However, note that in order to not create daisy chain
- scenarios, this relaxation is not allowed in cases that the BSS client
+ scenarios, this relaxation is not allowed in cases where the BSS client
is associated to P2P GO and in addition the P2P GO instantiated on
a channel due to this relaxation should not allow connection from
non P2P clients.
@@ -148,7 +135,7 @@ config CFG80211_DEBUGFS
depends on CFG80211
depends on DEBUG_FS
---help---
- You can enable this if you want to debugfs entries for cfg80211.
+ You can enable this if you want debugfs entries for cfg80211.
If unsure, say N.
@@ -159,7 +146,7 @@ config CFG80211_INTERNAL_REGDB
---help---
This option generates an internal data structure representing
the wireless regulatory rules described in net/wireless/db.txt
- and includes code to query that database. This is an alternative
+ and includes code to query that database. This is an alternative
to using CRDA for defining regulatory rules for the kernel.
Using this option requires some parsing of the db.txt at build time,
@@ -172,7 +159,7 @@ config CFG80211_INTERNAL_REGDB
http://wireless.kernel.org/en/developers/Regulatory
- Most distributions have a CRDA package. So if unsure, say N.
+ Most distributions have a CRDA package. So if unsure, say N.
config CFG80211_CRDA_SUPPORT
bool "support CRDA" if CFG80211_INTERNAL_REGDB
diff --git a/net/wireless/core.c b/net/wireless/core.c
index b0915515640e..9f1c4aa851ef 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -352,6 +352,16 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
WARN_ON(ops->add_station && !ops->del_station);
WARN_ON(ops->add_mpath && !ops->del_mpath);
WARN_ON(ops->join_mesh && !ops->leave_mesh);
+ WARN_ON(ops->start_p2p_device && !ops->stop_p2p_device);
+ WARN_ON(ops->start_ap && !ops->stop_ap);
+ WARN_ON(ops->join_ocb && !ops->leave_ocb);
+ WARN_ON(ops->suspend && !ops->resume);
+ WARN_ON(ops->sched_scan_start && !ops->sched_scan_stop);
+ WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
+ WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
+ WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
+ WARN_ON(ops->set_tx_power && !ops->get_tx_power);
+ WARN_ON(ops->set_antenna && !ops->get_antenna);
alloc_size = sizeof(*rdev) + sizeof_priv;
@@ -1147,6 +1157,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
return NOTIFY_DONE;
}
+ wireless_nlevent_flush();
+
return NOTIFY_OK;
}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index fb44fa3bf4ef..ff328250bc44 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -711,7 +711,7 @@ EXPORT_SYMBOL(cfg80211_rx_mgmt);
void cfg80211_dfs_channels_update_work(struct work_struct *work)
{
- struct delayed_work *delayed_work;
+ struct delayed_work *delayed_work = to_delayed_work(work);
struct cfg80211_registered_device *rdev;
struct cfg80211_chan_def chandef;
struct ieee80211_supported_band *sband;
@@ -721,7 +721,6 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work)
unsigned long timeout, next_time = 0;
int bandid, i;
- delayed_work = container_of(work, struct delayed_work, work);
rdev = container_of(delayed_work, struct cfg80211_registered_device,
dfs_update_channels_wk);
wiphy = &rdev->wiphy;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d4786f2802aa..98c924260b3d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3,7 +3,7 @@
*
* Copyright 2006-2010 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2015 Intel Deutschland GmbH
+ * Copyright 2015-2016 Intel Deutschland GmbH
*/
#include <linux/if.h>
@@ -401,6 +401,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
[NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
[NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
+ [NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -3461,6 +3462,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(params.acl);
}
+ params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+ if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ])
+ return -EOPNOTSUPP;
+
wdev_lock(wdev);
err = rdev_start_ap(rdev, dev, &params);
if (!err) {
@@ -7281,9 +7286,11 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
}
if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) {
- if (!(rdev->wiphy.features &
- NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) ||
- !(rdev->wiphy.features & NL80211_FEATURE_QUIET))
+ if (!((rdev->wiphy.features &
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) &&
+ (rdev->wiphy.features & NL80211_FEATURE_QUIET)) &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_RRM))
return -EINVAL;
req.flags |= ASSOC_REQ_USE_RRM;
}
@@ -7547,7 +7554,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) &&
no_ht) {
- kfree(connkeys);
+ kzfree(connkeys);
return -EINVAL;
}
}
@@ -7971,15 +7978,23 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
}
if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) {
- if (!(rdev->wiphy.features &
- NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) ||
- !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) {
+ if (!((rdev->wiphy.features &
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) &&
+ (rdev->wiphy.features & NL80211_FEATURE_QUIET)) &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_RRM)) {
kzfree(connkeys);
return -EINVAL;
}
connect.flags |= ASSOC_REQ_USE_RRM;
}
+ connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+ if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) {
+ kzfree(connkeys);
+ return -EOPNOTSUPP;
+ }
+
wdev_lock(dev->ieee80211_ptr);
err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
wdev_unlock(dev->ieee80211_ptr);
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 722da616438c..6582d155e2fc 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -43,6 +43,7 @@ static const struct radiotap_align_size rtap_namespace_sizes[] = {
[IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
[IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, },
[IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, },
+ [IEEE80211_RADIOTAP_VHT] = { .align = 2, .size = 12, },
/*
* add more here as they are defined in radiotap.h
*/
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 547ceecc0523..c5fb317eee68 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -60,13 +60,6 @@
#include "regdb.h"
#include "nl80211.h"
-#ifdef CONFIG_CFG80211_REG_DEBUG
-#define REG_DBG_PRINT(format, args...) \
- printk(KERN_DEBUG pr_fmt(format), ##args)
-#else
-#define REG_DBG_PRINT(args...)
-#endif
-
/*
* Grace period we give before making sure all current interfaces reside on
* channels allowed by the current regulatory domain.
@@ -178,12 +171,10 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy)
if (wiphy_regd->dfs_region == regd->dfs_region)
goto out;
- REG_DBG_PRINT("%s: device specific dfs_region "
- "(%s) disagrees with cfg80211's "
- "central dfs_region (%s)\n",
- dev_name(&wiphy->dev),
- reg_dfs_region_str(wiphy_regd->dfs_region),
- reg_dfs_region_str(regd->dfs_region));
+ pr_debug("%s: device specific dfs_region (%s) disagrees with cfg80211's central dfs_region (%s)\n",
+ dev_name(&wiphy->dev),
+ reg_dfs_region_str(wiphy_regd->dfs_region),
+ reg_dfs_region_str(regd->dfs_region));
out:
return regd->dfs_region;
@@ -543,7 +534,7 @@ static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work);
static void crda_timeout_work(struct work_struct *work)
{
- REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
+ pr_debug("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
rtnl_lock();
reg_crda_timeouts++;
restore_regulatory_settings(true);
@@ -585,7 +576,7 @@ static int call_crda(const char *alpha2)
if (!is_world_regdom((char *) alpha2))
pr_debug("Calling CRDA for country: %c%c\n",
- alpha2[0], alpha2[1]);
+ alpha2[0], alpha2[1]);
else
pr_debug("Calling CRDA to update world regulatory domain\n");
@@ -1132,42 +1123,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
}
EXPORT_SYMBOL(reg_initiator_name);
-static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
- struct ieee80211_channel *chan,
- const struct ieee80211_reg_rule *reg_rule)
-{
-#ifdef CONFIG_CFG80211_REG_DEBUG
- const struct ieee80211_power_rule *power_rule;
- const struct ieee80211_freq_range *freq_range;
- char max_antenna_gain[32], bw[32];
-
- power_rule = &reg_rule->power_rule;
- freq_range = &reg_rule->freq_range;
-
- if (!power_rule->max_antenna_gain)
- snprintf(max_antenna_gain, sizeof(max_antenna_gain), "N/A");
- else
- snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d mBi",
- power_rule->max_antenna_gain);
-
- if (reg_rule->flags & NL80211_RRF_AUTO_BW)
- snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
- freq_range->max_bandwidth_khz,
- reg_get_max_bandwidth(regd, reg_rule));
- else
- snprintf(bw, sizeof(bw), "%d KHz",
- freq_range->max_bandwidth_khz);
-
- REG_DBG_PRINT("Updating information on frequency %d MHz with regulatory rule:\n",
- chan->center_freq);
-
- REG_DBG_PRINT("(%d KHz - %d KHz @ %s), (%s, %d mBm)\n",
- freq_range->start_freq_khz, freq_range->end_freq_khz,
- bw, max_antenna_gain,
- power_rule->max_eirp);
-#endif
-}
-
static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd,
const struct ieee80211_reg_rule *reg_rule,
const struct ieee80211_channel *chan)
@@ -1242,20 +1197,19 @@ static void handle_channel(struct wiphy *wiphy,
if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
request_wiphy && request_wiphy == wiphy &&
request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
- REG_DBG_PRINT("Disabling freq %d MHz for good\n",
- chan->center_freq);
+ pr_debug("Disabling freq %d MHz for good\n",
+ chan->center_freq);
chan->orig_flags |= IEEE80211_CHAN_DISABLED;
chan->flags = chan->orig_flags;
} else {
- REG_DBG_PRINT("Disabling freq %d MHz\n",
- chan->center_freq);
+ pr_debug("Disabling freq %d MHz\n",
+ chan->center_freq);
chan->flags |= IEEE80211_CHAN_DISABLED;
}
return;
}
regd = reg_get_regdomain(wiphy);
- chan_reg_rule_print_dbg(regd, chan, reg_rule);
power_rule = &reg_rule->power_rule;
bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
@@ -1393,18 +1347,15 @@ static bool ignore_reg_update(struct wiphy *wiphy,
return true;
if (!lr) {
- REG_DBG_PRINT("Ignoring regulatory request set by %s "
- "since last_request is not set\n",
- reg_initiator_name(initiator));
+ pr_debug("Ignoring regulatory request set by %s since last_request is not set\n",
+ reg_initiator_name(initiator));
return true;
}
if (initiator == NL80211_REGDOM_SET_BY_CORE &&
wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
- REG_DBG_PRINT("Ignoring regulatory request set by %s "
- "since the driver uses its own custom "
- "regulatory domain\n",
- reg_initiator_name(initiator));
+ pr_debug("Ignoring regulatory request set by %s since the driver uses its own custom regulatory domain\n",
+ reg_initiator_name(initiator));
return true;
}
@@ -1415,10 +1366,8 @@ static bool ignore_reg_update(struct wiphy *wiphy,
if (wiphy_strict_alpha2_regd(wiphy) && !wiphy->regd &&
initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
!is_world_regdom(lr->alpha2)) {
- REG_DBG_PRINT("Ignoring regulatory request set by %s "
- "since the driver requires its own regulatory "
- "domain to be set first\n",
- reg_initiator_name(initiator));
+ pr_debug("Ignoring regulatory request set by %s since the driver requires its own regulatory domain to be set first\n",
+ reg_initiator_name(initiator));
return true;
}
@@ -1699,7 +1648,7 @@ static void reg_check_chans_work(struct work_struct *work)
{
struct cfg80211_registered_device *rdev;
- REG_DBG_PRINT("Verifying active interfaces after reg change\n");
+ pr_debug("Verifying active interfaces after reg change\n");
rtnl_lock();
list_for_each_entry(rdev, &cfg80211_rdev_list, list)
@@ -1781,8 +1730,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
}
if (IS_ERR(reg_rule)) {
- REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
- chan->center_freq);
+ pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n",
+ chan->center_freq);
if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
chan->flags |= IEEE80211_CHAN_DISABLED;
} else {
@@ -1792,8 +1741,6 @@ static void handle_channel_custom(struct wiphy *wiphy,
return;
}
- chan_reg_rule_print_dbg(regd, chan, reg_rule);
-
power_rule = &reg_rule->power_rule;
bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
@@ -2524,7 +2471,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
if (is_user_regdom_saved()) {
/* Unless we're asked to ignore it and reset it */
if (reset_user) {
- REG_DBG_PRINT("Restoring regulatory settings including user preference\n");
+ pr_debug("Restoring regulatory settings including user preference\n");
user_alpha2[0] = '9';
user_alpha2[1] = '7';
@@ -2534,24 +2481,24 @@ static void restore_alpha2(char *alpha2, bool reset_user)
* back as they were for a full restore.
*/
if (!is_world_regdom(ieee80211_regdom)) {
- REG_DBG_PRINT("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
- ieee80211_regdom[0], ieee80211_regdom[1]);
+ pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
+ ieee80211_regdom[0], ieee80211_regdom[1]);
alpha2[0] = ieee80211_regdom[0];
alpha2[1] = ieee80211_regdom[1];
}
} else {
- REG_DBG_PRINT("Restoring regulatory settings while preserving user preference for: %c%c\n",
- user_alpha2[0], user_alpha2[1]);
+ pr_debug("Restoring regulatory settings while preserving user preference for: %c%c\n",
+ user_alpha2[0], user_alpha2[1]);
alpha2[0] = user_alpha2[0];
alpha2[1] = user_alpha2[1];
}
} else if (!is_world_regdom(ieee80211_regdom)) {
- REG_DBG_PRINT("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
- ieee80211_regdom[0], ieee80211_regdom[1]);
+ pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
+ ieee80211_regdom[0], ieee80211_regdom[1]);
alpha2[0] = ieee80211_regdom[0];
alpha2[1] = ieee80211_regdom[1];
} else
- REG_DBG_PRINT("Restoring regulatory settings\n");
+ pr_debug("Restoring regulatory settings\n");
}
static void restore_custom_reg_settings(struct wiphy *wiphy)
@@ -2663,14 +2610,14 @@ static void restore_regulatory_settings(bool reset_user)
list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);
spin_unlock(&reg_requests_lock);
- REG_DBG_PRINT("Kicking the queue\n");
+ pr_debug("Kicking the queue\n");
schedule_work(&reg_work);
}
void regulatory_hint_disconnect(void)
{
- REG_DBG_PRINT("All devices are disconnected, going to restore regulatory settings\n");
+ pr_debug("All devices are disconnected, going to restore regulatory settings\n");
restore_regulatory_settings(false);
}
@@ -2718,10 +2665,10 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
if (!reg_beacon)
return -ENOMEM;
- REG_DBG_PRINT("Found new beacon on frequency: %d MHz (Ch %d) on %s\n",
- beacon_chan->center_freq,
- ieee80211_frequency_to_channel(beacon_chan->center_freq),
- wiphy_name(wiphy));
+ pr_debug("Found new beacon on frequency: %d MHz (Ch %d) on %s\n",
+ beacon_chan->center_freq,
+ ieee80211_frequency_to_channel(beacon_chan->center_freq),
+ wiphy_name(wiphy));
memcpy(&reg_beacon->chan, beacon_chan,
sizeof(struct ieee80211_channel));
@@ -2800,8 +2747,7 @@ bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region)
case NL80211_DFS_JP:
return true;
default:
- REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n",
- dfs_region);
+ pr_debug("Ignoring uknown DFS master region: %d\n", dfs_region);
return false;
}
}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 8020b5b094d4..544558171787 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -264,7 +264,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
wdev->conn->params.bssid,
wdev->conn->params.ssid,
wdev->conn->params.ssid_len,
- IEEE80211_BSS_TYPE_ESS,
+ wdev->conn_bss_type,
IEEE80211_PRIVACY(wdev->conn->params.privacy));
if (!bss)
return NULL;
@@ -687,7 +687,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
wdev->ssid, wdev->ssid_len,
- IEEE80211_BSS_TYPE_ESS,
+ wdev->conn_bss_type,
IEEE80211_PRIVACY_ANY);
if (bss)
cfg80211_hold_bss(bss_from_pub(bss));
@@ -846,7 +846,7 @@ void cfg80211_roamed(struct net_device *dev,
bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
wdev->ssid_len,
- IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+ wdev->conn_bss_type, IEEE80211_PRIVACY_ANY);
if (WARN_ON(!bss))
return;
@@ -917,6 +917,12 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
+ /* stop critical protocol if supported */
+ if (rdev->ops->crit_proto_stop && rdev->crit_proto_nlportid) {
+ rdev->crit_proto_nlportid = 0;
+ rdev_crit_proto_stop(rdev, wdev);
+ }
+
/*
* Delete all the keys ... pairwise keys can't really
* exist any more anyway, but default keys might.
@@ -1017,6 +1023,9 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
wdev->ssid_len = connect->ssid_len;
+ wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS :
+ IEEE80211_BSS_TYPE_ESS;
+
if (!rdev->ops->connect)
err = cfg80211_sme_connect(wdev, connect, prev_bssid);
else
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 92770427b211..9f440a9de63b 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -393,9 +393,9 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
}
EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
-unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+static unsigned int __ieee80211_get_mesh_hdrlen(u8 flags)
{
- int ae = meshhdr->flags & MESH_FLAGS_AE;
+ int ae = flags & MESH_FLAGS_AE;
/* 802.11-2012, 8.2.4.7.3 */
switch (ae) {
default:
@@ -407,21 +407,31 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
return 18;
}
}
+
+unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+{
+ return __ieee80211_get_mesh_hdrlen(meshhdr->flags);
+}
EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
-int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype)
+static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
+ const u8 *addr, enum nl80211_iftype iftype)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u16 hdrlen, ethertype;
- u8 *payload;
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN] __aligned(2);
+ struct {
+ u8 hdr[ETH_ALEN] __aligned(2);
+ __be16 proto;
+ } payload;
+ struct ethhdr tmp;
+ u16 hdrlen;
+ u8 mesh_flags = 0;
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
return -1;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (skb->len < hdrlen + 8)
+ return -1;
/* convert IEEE 802.11 header + possible LLC headers into Ethernet
* header
@@ -432,8 +442,11 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
* 1 0 BSSID SA DA n/a
* 1 1 RA TA DA SA
*/
- memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
- memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
+ memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
+
+ if (iftype == NL80211_IFTYPE_MESH_POINT)
+ skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
switch (hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
@@ -450,44 +463,31 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
iftype != NL80211_IFTYPE_STATION))
return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) {
- struct ieee80211s_hdr *meshdr =
- (struct ieee80211s_hdr *) (skb->data + hdrlen);
- /* make sure meshdr->flags is on the linear part */
- if (!pskb_may_pull(skb, hdrlen + 1))
- return -1;
- if (meshdr->flags & MESH_FLAGS_AE_A4)
+ if (mesh_flags & MESH_FLAGS_AE_A4)
return -1;
- if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
+ if (mesh_flags & MESH_FLAGS_AE_A5_A6) {
skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1),
- dst, ETH_ALEN);
- skb_copy_bits(skb, hdrlen +
- offsetof(struct ieee80211s_hdr, eaddr2),
- src, ETH_ALEN);
+ tmp.h_dest, 2 * ETH_ALEN);
}
- hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+ hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
}
break;
case cpu_to_le16(IEEE80211_FCTL_FROMDS):
if ((iftype != NL80211_IFTYPE_STATION &&
iftype != NL80211_IFTYPE_P2P_CLIENT &&
iftype != NL80211_IFTYPE_MESH_POINT) ||
- (is_multicast_ether_addr(dst) &&
- ether_addr_equal(src, addr)))
+ (is_multicast_ether_addr(tmp.h_dest) &&
+ ether_addr_equal(tmp.h_source, addr)))
return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) {
- struct ieee80211s_hdr *meshdr =
- (struct ieee80211s_hdr *) (skb->data + hdrlen);
- /* make sure meshdr->flags is on the linear part */
- if (!pskb_may_pull(skb, hdrlen + 1))
- return -1;
- if (meshdr->flags & MESH_FLAGS_AE_A5_A6)
+ if (mesh_flags & MESH_FLAGS_AE_A5_A6)
return -1;
- if (meshdr->flags & MESH_FLAGS_AE_A4)
+ if (mesh_flags & MESH_FLAGS_AE_A4)
skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1),
- src, ETH_ALEN);
- hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+ tmp.h_source, ETH_ALEN);
+ hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
}
break;
case cpu_to_le16(0):
@@ -498,33 +498,33 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
break;
}
- if (!pskb_may_pull(skb, hdrlen + 8))
- return -1;
-
- payload = skb->data + hdrlen;
- ethertype = (payload[6] << 8) | payload[7];
+ skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
+ tmp.h_proto = payload.proto;
- if (likely((ether_addr_equal(payload, rfc1042_header) &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- ether_addr_equal(payload, bridge_tunnel_header))) {
+ if (likely((ether_addr_equal(payload.hdr, rfc1042_header) &&
+ tmp.h_proto != htons(ETH_P_AARP) &&
+ tmp.h_proto != htons(ETH_P_IPX)) ||
+ ether_addr_equal(payload.hdr, bridge_tunnel_header)))
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
- skb_pull(skb, hdrlen + 6);
- memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
- } else {
- struct ethhdr *ehdr;
- __be16 len;
+ hdrlen += ETH_ALEN + 2;
+ else
+ tmp.h_proto = htons(skb->len);
- skb_pull(skb, hdrlen);
- len = htons(skb->len);
+ pskb_pull(skb, hdrlen);
+
+ if (!ehdr)
ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
- memcpy(ehdr->h_dest, dst, ETH_ALEN);
- memcpy(ehdr->h_source, src, ETH_ALEN);
- ehdr->h_proto = len;
- }
+ memcpy(ehdr, &tmp, sizeof(tmp));
+
return 0;
}
+
+int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ enum nl80211_iftype iftype)
+{
+ return __ieee80211_data_to_8023(skb, NULL, addr, iftype);
+}
EXPORT_SYMBOL(ieee80211_data_to_8023);
int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
@@ -636,7 +636,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
/* Update skb pointers to various headers since this modified frame
* is going to go through Linux networking code that may potentially
* need things like pointer to IP header. */
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb_set_network_header(skb, nh_pos);
skb_set_transport_header(skb, h_pos);
@@ -644,70 +644,147 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
}
EXPORT_SYMBOL(ieee80211_data_from_8023);
+static void
+__frame_add_frag(struct sk_buff *skb, struct page *page,
+ void *ptr, int len, int size)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ int page_offset;
+
+ atomic_inc(&page->_count);
+ page_offset = ptr - page_address(page);
+ skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size);
+}
+
+static void
+__ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame,
+ int offset, int len)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ const skb_frag_t *frag = &sh->frags[-1];
+ struct page *frag_page;
+ void *frag_ptr;
+ int frag_len, frag_size;
+ int head_size = skb->len - skb->data_len;
+ int cur_len;
+
+ frag_page = virt_to_head_page(skb->head);
+ frag_ptr = skb->data;
+ frag_size = head_size;
+
+ while (offset >= frag_size) {
+ offset -= frag_size;
+ frag++;
+ frag_page = skb_frag_page(frag);
+ frag_ptr = skb_frag_address(frag);
+ frag_size = skb_frag_size(frag);
+ }
+
+ frag_ptr += offset;
+ frag_len = frag_size - offset;
+
+ cur_len = min(len, frag_len);
+
+ __frame_add_frag(frame, frag_page, frag_ptr, cur_len, frag_size);
+ len -= cur_len;
+
+ while (len > 0) {
+ frag++;
+ frag_len = skb_frag_size(frag);
+ cur_len = min(len, frag_len);
+ __frame_add_frag(frame, skb_frag_page(frag),
+ skb_frag_address(frag), cur_len, frag_len);
+ len -= cur_len;
+ }
+}
+
+static struct sk_buff *
+__ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
+ int offset, int len, bool reuse_frag)
+{
+ struct sk_buff *frame;
+ int cur_len = len;
+
+ if (skb->len - offset < len)
+ return NULL;
+
+ /*
+ * When reusing framents, copy some data to the head to simplify
+ * ethernet header handling and speed up protocol header processing
+ * in the stack later.
+ */
+ if (reuse_frag)
+ cur_len = min_t(int, len, 32);
+
+ /*
+ * Allocate and reserve two bytes more for payload
+ * alignment since sizeof(struct ethhdr) is 14.
+ */
+ frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
+
+ skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
+ skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
+
+ len -= cur_len;
+ if (!len)
+ return frame;
+
+ offset += cur_len;
+ __ieee80211_amsdu_copy_frag(skb, frame, offset, len);
+
+ return frame;
+}
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
bool has_80211_header)
{
+ unsigned int hlen = ALIGN(extra_headroom, 4);
struct sk_buff *frame = NULL;
u16 ethertype;
u8 *payload;
- const struct ethhdr *eth;
- int remaining, err;
- u8 dst[ETH_ALEN], src[ETH_ALEN];
+ int offset = 0, remaining, err;
+ struct ethhdr eth;
+ bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
+ bool reuse_skb = false;
+ bool last = false;
if (has_80211_header) {
- err = ieee80211_data_to_8023(skb, addr, iftype);
+ err = __ieee80211_data_to_8023(skb, &eth, addr, iftype);
if (err)
goto out;
-
- /* skip the wrapping header */
- eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
- if (!eth)
- goto out;
- } else {
- eth = (struct ethhdr *) skb->data;
}
- while (skb != frame) {
+ while (!last) {
+ unsigned int subframe_len;
+ int len;
u8 padding;
- __be16 len = eth->h_proto;
- unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
-
- remaining = skb->len;
- memcpy(dst, eth->h_dest, ETH_ALEN);
- memcpy(src, eth->h_source, ETH_ALEN);
+ skb_copy_bits(skb, offset, &eth, sizeof(eth));
+ len = ntohs(eth.h_proto);
+ subframe_len = sizeof(struct ethhdr) + len;
padding = (4 - subframe_len) & 0x3;
+
/* the last MSDU has no padding */
+ remaining = skb->len - offset;
if (subframe_len > remaining)
goto purge;
- skb_pull(skb, sizeof(struct ethhdr));
+ offset += sizeof(struct ethhdr);
/* reuse skb for the last subframe */
- if (remaining <= subframe_len + padding)
+ last = remaining <= subframe_len + padding;
+ if (!skb_is_nonlinear(skb) && !reuse_frag && last) {
+ skb_pull(skb, offset);
frame = skb;
- else {
- unsigned int hlen = ALIGN(extra_headroom, 4);
- /*
- * Allocate and reserve two bytes more for payload
- * alignment since sizeof(struct ethhdr) is 14.
- */
- frame = dev_alloc_skb(hlen + subframe_len + 2);
+ reuse_skb = true;
+ } else {
+ frame = __ieee80211_amsdu_copy(skb, hlen, offset, len,
+ reuse_frag);
if (!frame)
goto purge;
- skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
- memcpy(skb_put(frame, ntohs(len)), skb->data,
- ntohs(len));
-
- eth = (struct ethhdr *)skb_pull(skb, ntohs(len) +
- padding);
- if (!eth) {
- dev_kfree_skb(frame);
- goto purge;
- }
+ offset += len + padding;
}
skb_reset_network_header(frame);
@@ -716,24 +793,20 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
payload = frame->data;
ethertype = (payload[6] << 8) | payload[7];
-
if (likely((ether_addr_equal(payload, rfc1042_header) &&
ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
ether_addr_equal(payload, bridge_tunnel_header))) {
- /* remove RFC1042 or Bridge-Tunnel
- * encapsulation and replace EtherType */
- skb_pull(frame, 6);
- memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
- } else {
- memcpy(skb_push(frame, sizeof(__be16)), &len,
- sizeof(__be16));
- memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
+ eth.h_proto = htons(ethertype);
+ skb_pull(frame, ETH_ALEN + 2);
}
+
+ memcpy(skb_push(frame, sizeof(eth)), &eth, sizeof(eth));
__skb_queue_tail(list, frame);
}
+ if (!reuse_skb)
+ dev_kfree_skb(skb);
+
return;
purge:
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index c8717c1d082e..b50ee5d622e1 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -342,6 +342,40 @@ static const int compat_event_type_size[] = {
/* IW event code */
+void wireless_nlevent_flush(void)
+{
+ struct sk_buff *skb;
+ struct net *net;
+
+ ASSERT_RTNL();
+
+ for_each_net(net) {
+ while ((skb = skb_dequeue(&net->wext_nlevents)))
+ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+ GFP_KERNEL);
+ }
+}
+EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
+
+static int wext_netdev_notifier_call(struct notifier_block *nb,
+ unsigned long state, void *ptr)
+{
+ /*
+ * When a netdev changes state in any way, flush all pending messages
+ * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
+ * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
+ * or similar - all of which could otherwise happen due to delays from
+ * schedule_work().
+ */
+ wireless_nlevent_flush();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block wext_netdev_notifier = {
+ .notifier_call = wext_netdev_notifier_call,
+};
+
static int __net_init wext_pernet_init(struct net *net)
{
skb_queue_head_init(&net->wext_nlevents);
@@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = {
static int __init wireless_nlevent_init(void)
{
- return register_pernet_subsys(&wext_pernet_ops);
+ int err = register_pernet_subsys(&wext_pernet_ops);
+
+ if (err)
+ return err;
+
+ return register_netdevice_notifier(&wext_netdev_notifier);
}
subsys_initcall(wireless_nlevent_init);
@@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init);
/* Process events generated by the wireless layer or the driver. */
static void wireless_nlevent_process(struct work_struct *work)
{
- struct sk_buff *skb;
- struct net *net;
-
rtnl_lock();
-
- for_each_net(net) {
- while ((skb = skb_dequeue(&net->wext_nlevents)))
- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
- GFP_KERNEL);
- }
-
+ wireless_nlevent_flush();
rtnl_unlock();
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index f8110cfd80ff..f1ab71504e1d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3249,7 +3249,7 @@ static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t
static void selinux_inode_getsecid(struct inode *inode, u32 *secid)
{
- struct inode_security_struct *isec = inode_security(inode);
+ struct inode_security_struct *isec = inode_security_novalidate(inode);
*secid = isec->sid;
}
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index b9c0910fb8c4..0608f216f359 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
unsigned char reserved[128];
};
+#ifdef CONFIG_X86_X32
+/* x32 has a different alignment for 64bit values from ia32 */
+struct snd_ctl_elem_value_x32 {
+ struct snd_ctl_elem_id id;
+ unsigned int indirect; /* bit-field causes misalignment */
+ union {
+ s32 integer[128];
+ unsigned char data[512];
+ s64 integer64[64];
+ } value;
+ unsigned char reserved[128];
+};
+#endif /* CONFIG_X86_X32 */
/* get the value type and count of the control */
static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
static int copy_ctl_value_from_user(struct snd_card *card,
struct snd_ctl_elem_value *data,
- struct snd_ctl_elem_value32 __user *data32,
+ void __user *userdata,
+ void __user *valuep,
int *typep, int *countp)
{
+ struct snd_ctl_elem_value32 __user *data32 = userdata;
int i, type, size;
int uninitialized_var(count);
unsigned int indirect;
@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
for (i = 0; i < count; i++) {
+ s32 __user *intp = valuep;
int val;
- if (get_user(val, &data32->value.integer[i]))
+ if (get_user(val, &intp[i]))
return -EFAULT;
data->value.integer.value[i] = val;
}
@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
return -EINVAL;
}
- if (copy_from_user(data->value.bytes.data,
- data32->value.data, size))
+ if (copy_from_user(data->value.bytes.data, valuep, size))
return -EFAULT;
}
@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
}
/* restore the value to 32bit */
-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
+static int copy_ctl_value_to_user(void __user *userdata,
+ void __user *valuep,
struct snd_ctl_elem_value *data,
int type, int count)
{
@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
for (i = 0; i < count; i++) {
+ s32 __user *intp = valuep;
int val;
val = data->value.integer.value[i];
- if (put_user(val, &data32->value.integer[i]))
+ if (put_user(val, &intp[i]))
return -EFAULT;
}
} else {
size = get_elem_size(type, count);
- if (copy_to_user(data32->value.data,
- data->value.bytes.data, size))
+ if (copy_to_user(valuep, data->value.bytes.data, size))
return -EFAULT;
}
return 0;
}
-static int snd_ctl_elem_read_user_compat(struct snd_card *card,
- struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_read_user(struct snd_card *card,
+ void __user *userdata, void __user *valuep)
{
struct snd_ctl_elem_value *data;
int err, type, count;
@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
if (data == NULL)
return -ENOMEM;
- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+ err = copy_ctl_value_from_user(card, data, userdata, valuep,
+ &type, &count);
+ if (err < 0)
goto error;
snd_power_lock(card);
@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
err = snd_ctl_elem_read(card, data);
snd_power_unlock(card);
if (err >= 0)
- err = copy_ctl_value_to_user(data32, data, type, count);
+ err = copy_ctl_value_to_user(userdata, valuep, data,
+ type, count);
error:
kfree(data);
return err;
}
-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
- struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_write_user(struct snd_ctl_file *file,
+ void __user *userdata, void __user *valuep)
{
struct snd_ctl_elem_value *data;
struct snd_card *card = file->card;
@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
if (data == NULL)
return -ENOMEM;
- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+ err = copy_ctl_value_from_user(card, data, userdata, valuep,
+ &type, &count);
+ if (err < 0)
goto error;
snd_power_lock(card);
@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
err = snd_ctl_elem_write(card, file, data);
snd_power_unlock(card);
if (err >= 0)
- err = copy_ctl_value_to_user(data32, data, type, count);
+ err = copy_ctl_value_to_user(userdata, valuep, data,
+ type, count);
error:
kfree(data);
return err;
}
+static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+ struct snd_ctl_elem_value32 __user *data32)
+{
+ return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+ struct snd_ctl_elem_value32 __user *data32)
+{
+ return ctl_elem_write_user(file, data32, &data32->value);
+}
+
+#ifdef CONFIG_X86_X32
+static int snd_ctl_elem_read_user_x32(struct snd_card *card,
+ struct snd_ctl_elem_value_x32 __user *data32)
+{
+ return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
+ struct snd_ctl_elem_value_x32 __user *data32)
+{
+ return ctl_elem_write_user(file, data32, &data32->value);
+}
+#endif /* CONFIG_X86_X32 */
+
/* add or replace a user control */
static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
struct snd_ctl_elem_info32 __user *data32,
@@ -393,6 +441,10 @@ enum {
SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
+#ifdef CONFIG_X86_X32
+ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
+ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
+#endif /* CONFIG_X86_X32 */
};
static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
return snd_ctl_elem_add_compat(ctl, argp, 0);
case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
return snd_ctl_elem_add_compat(ctl, argp, 1);
+#ifdef CONFIG_X86_X32
+ case SNDRV_CTL_IOCTL_ELEM_READ_X32:
+ return snd_ctl_elem_read_user_x32(ctl->card, argp);
+ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
+ return snd_ctl_elem_write_user_x32(ctl, argp);
+#endif /* CONFIG_X86_X32 */
}
down_read(&snd_ioctl_rwsem);
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 9630e9f72b7b..1f64ab0c2a95 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream
return err;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */
+static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
+ struct snd_pcm_channel_info __user *src);
+#define snd_pcm_ioctl_channel_info_x32(s, p) \
+ snd_pcm_channel_info_user(s, p)
+#endif /* CONFIG_X86_X32 */
+
struct snd_pcm_status32 {
s32 state;
struct compat_timespec trigger_tstamp;
@@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
return err;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_pcm_status_x32 {
+ s32 state;
+ u32 rsvd; /* alignment */
+ struct timespec trigger_tstamp;
+ struct timespec tstamp;
+ u32 appl_ptr;
+ u32 hw_ptr;
+ s32 delay;
+ u32 avail;
+ u32 avail_max;
+ u32 overrange;
+ s32 suspended_state;
+ u32 audio_tstamp_data;
+ struct timespec audio_tstamp;
+ struct timespec driver_tstamp;
+ u32 audio_tstamp_accuracy;
+ unsigned char reserved[52-2*sizeof(struct timespec)];
+} __packed;
+
+#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
+
+static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream,
+ struct snd_pcm_status_x32 __user *src,
+ bool ext)
+{
+ struct snd_pcm_status status;
+ int err;
+
+ memset(&status, 0, sizeof(status));
+ /*
+ * with extension, parameters are read/write,
+ * get audio_tstamp_data from user,
+ * ignore rest of status structure
+ */
+ if (ext && get_user(status.audio_tstamp_data,
+ (u32 __user *)(&src->audio_tstamp_data)))
+ return -EFAULT;
+ err = snd_pcm_status(substream, &status);
+ if (err < 0)
+ return err;
+
+ if (clear_user(src, sizeof(*src)))
+ return -EFAULT;
+ if (put_user(status.state, &src->state) ||
+ put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
+ put_timespec(&status.tstamp, &src->tstamp) ||
+ put_user(status.appl_ptr, &src->appl_ptr) ||
+ put_user(status.hw_ptr, &src->hw_ptr) ||
+ put_user(status.delay, &src->delay) ||
+ put_user(status.avail, &src->avail) ||
+ put_user(status.avail_max, &src->avail_max) ||
+ put_user(status.overrange, &src->overrange) ||
+ put_user(status.suspended_state, &src->suspended_state) ||
+ put_user(status.audio_tstamp_data, &src->audio_tstamp_data) ||
+ put_timespec(&status.audio_tstamp, &src->audio_tstamp) ||
+ put_timespec(&status.driver_tstamp, &src->driver_tstamp) ||
+ put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy))
+ return -EFAULT;
+
+ return err;
+}
+#endif /* CONFIG_X86_X32 */
+
/* both for HW_PARAMS and HW_REFINE */
static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
int refine,
@@ -469,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
return 0;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_pcm_mmap_status_x32 {
+ s32 state;
+ s32 pad1;
+ u32 hw_ptr;
+ u32 pad2; /* alignment */
+ struct timespec tstamp;
+ s32 suspended_state;
+ struct timespec audio_tstamp;
+} __packed;
+
+struct snd_pcm_mmap_control_x32 {
+ u32 appl_ptr;
+ u32 avail_min;
+};
+
+struct snd_pcm_sync_ptr_x32 {
+ u32 flags;
+ u32 rsvd; /* alignment */
+ union {
+ struct snd_pcm_mmap_status_x32 status;
+ unsigned char reserved[64];
+ } s;
+ union {
+ struct snd_pcm_mmap_control_x32 control;
+ unsigned char reserved[64];
+ } c;
+} __packed;
+
+static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
+ struct snd_pcm_sync_ptr_x32 __user *src)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ volatile struct snd_pcm_mmap_status *status;
+ volatile struct snd_pcm_mmap_control *control;
+ u32 sflags;
+ struct snd_pcm_mmap_control scontrol;
+ struct snd_pcm_mmap_status sstatus;
+ snd_pcm_uframes_t boundary;
+ int err;
+
+ if (snd_BUG_ON(!runtime))
+ return -EINVAL;
+
+ if (get_user(sflags, &src->flags) ||
+ get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+ get_user(scontrol.avail_min, &src->c.control.avail_min))
+ return -EFAULT;
+ if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+ err = snd_pcm_hwsync(substream);
+ if (err < 0)
+ return err;
+ }
+ status = runtime->status;
+ control = runtime->control;
+ boundary = recalculate_boundary(runtime);
+ if (!boundary)
+ boundary = 0x7fffffff;
+ snd_pcm_stream_lock_irq(substream);
+ /* FIXME: we should consider the boundary for the sync from app */
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
+ control->appl_ptr = scontrol.appl_ptr;
+ else
+ scontrol.appl_ptr = control->appl_ptr % boundary;
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+ control->avail_min = scontrol.avail_min;
+ else
+ scontrol.avail_min = control->avail_min;
+ sstatus.state = status->state;
+ sstatus.hw_ptr = status->hw_ptr % boundary;
+ sstatus.tstamp = status->tstamp;
+ sstatus.suspended_state = status->suspended_state;
+ sstatus.audio_tstamp = status->audio_tstamp;
+ snd_pcm_stream_unlock_irq(substream);
+ if (put_user(sstatus.state, &src->s.status.state) ||
+ put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
+ put_timespec(&sstatus.tstamp, &src->s.status.tstamp) ||
+ put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
+ put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) ||
+ put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+ put_user(scontrol.avail_min, &src->c.control.avail_min))
+ return -EFAULT;
+
+ return 0;
+}
+#endif /* CONFIG_X86_X32 */
/*
*/
@@ -487,7 +647,12 @@ enum {
SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
-
+#ifdef CONFIG_X86_X32
+ SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info),
+ SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32),
+ SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32),
+ SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32),
+#endif /* CONFIG_X86_X32 */
};
static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -559,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
return snd_pcm_ioctl_rewind_compat(substream, argp);
case SNDRV_PCM_IOCTL_FORWARD32:
return snd_pcm_ioctl_forward_compat(substream, argp);
+#ifdef CONFIG_X86_X32
+ case SNDRV_PCM_IOCTL_STATUS_X32:
+ return snd_pcm_status_user_x32(substream, argp, false);
+ case SNDRV_PCM_IOCTL_STATUS_EXT_X32:
+ return snd_pcm_status_user_x32(substream, argp, true);
+ case SNDRV_PCM_IOCTL_SYNC_PTR_X32:
+ return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
+ case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
+ return snd_pcm_ioctl_channel_info_x32(substream, argp);
+#endif /* CONFIG_X86_X32 */
}
return -ENOIOCTLCMD;
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
index 5268c1f58c25..f69764d7cdd7 100644
--- a/sound/core/rawmidi_compat.c
+++ b/sound/core/rawmidi_compat.c
@@ -85,8 +85,7 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
if (err < 0)
return err;
- if (put_user(status.tstamp.tv_sec, &src->tstamp.tv_sec) ||
- put_user(status.tstamp.tv_nsec, &src->tstamp.tv_nsec) ||
+ if (compat_put_timespec(&status.tstamp, &src->tstamp) ||
put_user(status.avail, &src->avail) ||
put_user(status.xruns, &src->xruns))
return -EFAULT;
@@ -94,9 +93,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
return 0;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_rawmidi_status_x32 {
+ s32 stream;
+ u32 rsvd; /* alignment */
+ struct timespec tstamp;
+ u32 avail;
+ u32 xruns;
+ unsigned char reserved[16];
+} __attribute__((packed));
+
+#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
+
+static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
+ struct snd_rawmidi_status_x32 __user *src)
+{
+ int err;
+ struct snd_rawmidi_status status;
+
+ if (rfile->output == NULL)
+ return -EINVAL;
+ if (get_user(status.stream, &src->stream))
+ return -EFAULT;
+
+ switch (status.stream) {
+ case SNDRV_RAWMIDI_STREAM_OUTPUT:
+ err = snd_rawmidi_output_status(rfile->output, &status);
+ break;
+ case SNDRV_RAWMIDI_STREAM_INPUT:
+ err = snd_rawmidi_input_status(rfile->input, &status);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ if (put_timespec(&status.tstamp, &src->tstamp) ||
+ put_user(status.avail, &src->avail) ||
+ put_user(status.xruns, &src->xruns))
+ return -EFAULT;
+
+ return 0;
+}
+#endif /* CONFIG_X86_X32 */
+
enum {
SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
+#ifdef CONFIG_X86_X32
+ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
+#endif /* CONFIG_X86_X32 */
};
static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -115,6 +163,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
return snd_rawmidi_ioctl_params_compat(rfile, argp);
case SNDRV_RAWMIDI_IOCTL_STATUS32:
return snd_rawmidi_ioctl_status_compat(rfile, argp);
+#ifdef CONFIG_X86_X32
+ case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
+ return snd_rawmidi_ioctl_status_x32(rfile, argp);
+#endif /* CONFIG_X86_X32 */
}
return -ENOIOCTLCMD;
}
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
index 8db156b207f1..8cdf489df80e 100644
--- a/sound/core/seq/oss/seq_oss.c
+++ b/sound/core/seq/oss/seq_oss.c
@@ -149,8 +149,6 @@ odev_release(struct inode *inode, struct file *file)
if ((dp = file->private_data) == NULL)
return 0;
- snd_seq_oss_drain_write(dp);
-
mutex_lock(&register_mutex);
snd_seq_oss_release(dp);
mutex_unlock(&register_mutex);
diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
index b43924325249..d7b4d016b547 100644
--- a/sound/core/seq/oss/seq_oss_device.h
+++ b/sound/core/seq/oss/seq_oss_device.h
@@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
/* */
void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index 6779e82b46dd..92c96a95a903 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -436,22 +436,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
/*
- * Wait until the queue is empty (if we don't have nonblock)
- */
-void
-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
-{
- if (! dp->timer->running)
- return;
- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
- dp->writeq) {
- while (snd_seq_oss_writeq_sync(dp->writeq))
- ;
- }
-}
-
-
-/*
* reset sequencer devices
*/
void
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index e05802ae6e1b..2e908225d754 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
struct snd_timer_status32 __user *_status)
{
struct snd_timer_user *tu;
- struct snd_timer_status status;
+ struct snd_timer_status32 status;
tu = file->private_data;
if (snd_BUG_ON(!tu->timeri))
return -ENXIO;
memset(&status, 0, sizeof(status));
- status.tstamp = tu->tstamp;
+ status.tstamp.tv_sec = tu->tstamp.tv_sec;
+ status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
return 0;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has the same struct as x86-64 */
+#define snd_timer_user_status_x32(file, s) \
+ snd_timer_user_status(file, s)
+#endif /* CONFIG_X86_X32 */
+
/*
*/
enum {
SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
+#ifdef CONFIG_X86_X32
+ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
+#endif /* CONFIG_X86_X32 */
};
static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
return snd_timer_user_info_compat(file, argp);
case SNDRV_TIMER_IOCTL_STATUS32:
return snd_timer_user_status_compat(file, argp);
+#ifdef CONFIG_X86_X32
+ case SNDRV_TIMER_IOCTL_STATUS_X32:
+ return snd_timer_user_status_x32(file, argp);
+#endif /* CONFIG_X86_X32 */
}
return -ENOIOCTLCMD;
}
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index b5a17cb510a0..8c486235c905 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -426,18 +426,22 @@ EXPORT_SYMBOL_GPL(snd_hdac_bus_stop_chip);
* @bus: HD-audio core bus
* @status: INTSTS register value
* @ask: callback to be called for woken streams
+ *
+ * Returns the bits of handled streams, or zero if no stream is handled.
*/
-void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
+int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
void (*ack)(struct hdac_bus *,
struct hdac_stream *))
{
struct hdac_stream *azx_dev;
u8 sd_status;
+ int handled = 0;
list_for_each_entry(azx_dev, &bus->stream_list, list) {
if (status & azx_dev->sd_int_sta_mask) {
sd_status = snd_hdac_stream_readb(azx_dev, SD_STS);
snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK);
+ handled |= 1 << azx_dev->index;
if (!azx_dev->substream || !azx_dev->running ||
!(sd_status & SD_INT_COMPLETE))
continue;
@@ -445,6 +449,7 @@ void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
ack(bus, azx_dev);
}
}
+ return handled;
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_handle_stream_irq);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 37cf9cee9835..27de8015717d 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -930,6 +930,8 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
struct azx *chip = dev_id;
struct hdac_bus *bus = azx_bus(chip);
u32 status;
+ bool active, handled = false;
+ int repeat = 0; /* count for avoiding endless loop */
#ifdef CONFIG_PM
if (azx_has_pm_runtime(chip))
@@ -939,33 +941,36 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
spin_lock(&bus->reg_lock);
- if (chip->disabled) {
- spin_unlock(&bus->reg_lock);
- return IRQ_NONE;
- }
-
- status = azx_readl(chip, INTSTS);
- if (status == 0 || status == 0xffffffff) {
- spin_unlock(&bus->reg_lock);
- return IRQ_NONE;
- }
+ if (chip->disabled)
+ goto unlock;
- snd_hdac_bus_handle_stream_irq(bus, status, stream_update);
+ do {
+ status = azx_readl(chip, INTSTS);
+ if (status == 0 || status == 0xffffffff)
+ break;
- /* clear rirb int */
- status = azx_readb(chip, RIRBSTS);
- if (status & RIRB_INT_MASK) {
- if (status & RIRB_INT_RESPONSE) {
- if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
- udelay(80);
- snd_hdac_bus_update_rirb(bus);
+ handled = true;
+ active = false;
+ if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
+ active = true;
+
+ /* clear rirb int */
+ status = azx_readb(chip, RIRBSTS);
+ if (status & RIRB_INT_MASK) {
+ active = true;
+ if (status & RIRB_INT_RESPONSE) {
+ if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
+ udelay(80);
+ snd_hdac_bus_update_rirb(bus);
+ }
+ azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
}
- azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
- }
+ } while (active && ++repeat < 10);
+ unlock:
spin_unlock(&bus->reg_lock);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
EXPORT_SYMBOL_GPL(azx_interrupt);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index ce6b97f31390..e5240cb3749f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -363,7 +363,10 @@ enum {
((pci)->device == 0x0d0c) || \
((pci)->device == 0x160c))
-#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
+#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
@@ -540,13 +543,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
snd_hdac_set_codec_wakeup(bus, true);
- if (IS_BROXTON(pci)) {
+ if (IS_SKL_PLUS(pci)) {
pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
}
azx_init_chip(chip, full_reset);
- if (IS_BROXTON(pci)) {
+ if (IS_SKL_PLUS(pci)) {
pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
val = val | INTEL_HDA_CGCTL_MISCBDCGE;
pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
@@ -555,7 +558,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
snd_hdac_set_codec_wakeup(bus, false);
/* reduce dma latency to avoid noise */
- if (IS_BROXTON(pci))
+ if (IS_BXT(pci))
bxt_reduce_dma_latency(chip);
}
@@ -977,11 +980,6 @@ static int azx_resume(struct device *dev)
/* put codec down to D3 at hibernation for Intel SKL+;
* otherwise BIOS may still access the codec and screw up the driver
*/
-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
-
static int azx_freeze_noirq(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 8ee78dbd4c60..bcbc4ee10130 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2477,13 +2477,6 @@ static int patch_generic_hdmi(struct hda_codec *codec)
is_broxton(codec))
codec->core.link_power_control = 1;
- if (codec_has_acomp(codec)) {
- codec->depop_delay = 0;
- spec->i915_audio_ops.audio_ptr = codec;
- spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
- snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
- }
-
if (hdmi_parse_codec(codec) < 0) {
if (spec->i915_bound)
snd_hdac_i915_exit(&codec->bus->core);
@@ -2505,6 +2498,18 @@ static int patch_generic_hdmi(struct hda_codec *codec)
init_channel_allocations();
+ if (codec_has_acomp(codec)) {
+ codec->depop_delay = 0;
+ spec->i915_audio_ops.audio_ptr = codec;
+ /* intel_audio_codec_enable() or intel_audio_codec_disable()
+ * will call pin_eld_notify with using audio_ptr pointer
+ * We need make sure audio_ptr is really setup
+ */
+ wmb();
+ spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
+ snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
+ }
+
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index efd4980cffb8..93d2156b6241 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3801,6 +3801,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
static void alc_headset_mode_default(struct hda_codec *codec)
{
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+ {}
+ };
static struct coef_fw coef0255[] = {
WRITE_COEF(0x45, 0xc089),
WRITE_COEF(0x45, 0xc489),
@@ -3842,6 +3846,9 @@ static void alc_headset_mode_default(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ break;
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -4749,6 +4756,9 @@ enum {
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
+ ALC255_FIXUP_DELL_SPK_NOISE,
+ ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC280_FIXUP_HP_HEADSET_MIC,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5368,6 +5378,29 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
},
+ [ALC255_FIXUP_DELL_SPK_NOISE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_aamix,
+ .chained = true,
+ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
+ [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Disable pass-through path for FRONT 14h */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
+ [ALC280_FIXUP_HP_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_aamix,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5379,6 +5412,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
@@ -5410,6 +5444,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5470,6 +5505,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5638,10 +5674,10 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{0x21, 0x03211020}
static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
- SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
{0x14, 0x901701a0}),
- SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
{0x14, 0x901701b0}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 2875b4f6d8c9..7c8941b8b2de 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -2879,7 +2879,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
+ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
return 0;
}
@@ -2891,7 +2891,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
- val = ucontrol->value.enumerated.item[0];
+ val = ucontrol->value.integer.value[0];
spin_lock_irq(&hdsp->lock);
if (val != hdsp_dds_offset(hdsp))
change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 8bc8016c173d..a4a999a0317e 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
{
u64 n;
+ if (snd_BUG_ON(rate <= 0))
+ return;
+
if (rate >= 112000)
rate /= 4;
else if (rate >= 56000)
@@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
} else {
/* slave mode, return external sample rate */
rate = hdspm_external_sample_rate(hdspm);
+ if (!rate)
+ rate = hdspm->system_sample_rate;
}
}
@@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
ucontrol)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ int rate = ucontrol->value.integer.value[0];
- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
+ if (rate < 27000 || rate > 207000)
+ return -EINVAL;
+ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
return 0;
}
@@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- ucontrol->value.enumerated.item[0] = hdspm->tco->term;
+ ucontrol->value.integer.value[0] = hdspm->tco->term;
return 0;
}
@@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
- hdspm->tco->term = ucontrol->value.enumerated.item[0];
+ if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
+ hdspm->tco->term = ucontrol->value.integer.value[0];
hdspm_tco_write(hdspm);
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 4f6ce1cac8e2..c458d60d5030 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1124,6 +1124,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+ case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 90bd2ea41032..b3281dcd4a5d 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -217,13 +217,16 @@ static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
return rc;
}
+#define NFIT_TEST_ARS_RECORDS 4
+
static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
unsigned int buf_len)
{
if (buf_len < sizeof(*nd_cmd))
return -EINVAL;
- nd_cmd->max_ars_out = 256;
+ nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
+ + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
return 0;
@@ -246,7 +249,8 @@ static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd,
if (buf_len < sizeof(*nd_cmd))
return -EINVAL;
- nd_cmd->out_length = 256;
+ nd_cmd->out_length = sizeof(struct nd_cmd_ars_status);
+ /* TODO: emit error records */
nd_cmd->num_records = 0;
nd_cmd->address = 0;
nd_cmd->length = -1ULL;
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance.tc b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
index 773e276ff90b..1e1abe0ad354 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
@@ -39,28 +39,23 @@ instance_slam() {
}
instance_slam &
-x=`jobs -l`
-p1=`echo $x | cut -d' ' -f2`
+p1=$!
echo $p1
instance_slam &
-x=`jobs -l | tail -1`
-p2=`echo $x | cut -d' ' -f2`
+p2=$!
echo $p2
instance_slam &
-x=`jobs -l | tail -1`
-p3=`echo $x | cut -d' ' -f2`
+p3=$!
echo $p3
instance_slam &
-x=`jobs -l | tail -1`
-p4=`echo $x | cut -d' ' -f2`
+p4=$!
echo $p4
instance_slam &
-x=`jobs -l | tail -1`
-p5=`echo $x | cut -d' ' -f2`
+p5=$!
echo $p5
ls -lR >/dev/null
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 043032c6a5a4..00429b392c61 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-
- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
+ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
+ int sz = nr_longs * sizeof(unsigned long);
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 353159922456..db2dd3335c6a 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -172,7 +172,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
if (!work)
return 0;