aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.get_maintainer.ignore1
-rw-r--r--.mailmap1
-rw-r--r--Documentation/Intel-IOMMU.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt6
-rw-r--r--Documentation/devicetree/bindings/phy/ti-phy.txt16
-rw-r--r--Documentation/input/alps.txt6
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile11
-rw-r--r--arch/arc/Kconfig13
-rw-r--r--arch/arc/Makefile10
-rw-r--r--arch/arc/include/asm/arcregs.h7
-rw-r--r--arch/arc/include/asm/atomic.h78
-rw-r--r--arch/arc/include/asm/ptrace.h54
-rw-r--r--arch/arc/include/asm/spinlock.h538
-rw-r--r--arch/arc/include/asm/spinlock_types.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h20
-rw-r--r--arch/arc/kernel/setup.c12
-rw-r--r--arch/arc/kernel/time.c40
-rw-r--r--arch/arc/lib/memcpy-archs.S2
-rw-r--r--arch/arc/lib/memset-archs.S43
-rw-r--r--arch/arc/plat-axs10x/axs10x.c15
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/dts/dra7.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi8
-rw-r--r--arch/arm/boot/dts/k2e.dtsi15
-rw-r--r--arch/arm/boot/dts/k2hk.dtsi11
-rw-r--r--arch/arm/boot/dts/k2l.dtsi16
-rw-r--r--arch/arm/boot/dts/keystone.dtsi11
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi3
-rw-r--r--arch/arm/boot/dts/omap4.dtsi3
-rw-r--r--arch/arm/boot/dts/omap5.dtsi3
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi53
-rw-r--r--arch/arm/kernel/entry-common.S1
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/vdso.c7
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c2
-rw-r--r--arch/arm/mach-exynos/pm_domains.c3
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c1
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/signal32.c5
-rw-r--r--arch/arm64/kernel/vdso.c7
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/pgtable.h31
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/stackframe.h25
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c5
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S8
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/smp.c10
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/lantiq/irq.c3
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c7
-rw-r--r--arch/mips/mm/cache.c8
-rw-r--r--arch/mips/mm/fault.c3
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c16
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/common/smp.c2
-rw-r--r--arch/mips/paravirt/paravirt-smp.c2
-rw-r--r--arch/mips/pistachio/time.c1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smp.c2
-rw-r--r--arch/mips/ralink/irq.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c8
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c9
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/sparc/include/asm/visasm.h16
-rw-r--r--arch/sparc/lib/NG4memcpy.S5
-rw-r--r--arch/sparc/lib/VISsave.S67
-rw-r--r--arch/sparc/lib/ksyms.c4
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--arch/x86/entry/entry_64_compat.S3
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/switch_to.h12
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h21
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c23
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c8
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/fpu/init.c7
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/signal.c26
-rw-r--r--arch/x86/kernel/step.c4
-rw-r--r--arch/x86/kvm/mtrr.c8
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
-rw-r--r--arch/x86/math-emu/fpu_system.h21
-rw-r--r--arch/x86/math-emu/get_address.c3
-rw-r--r--arch/x86/xen/Kconfig4
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--block/blk-settings.c4
-rw-r--r--crypto/authencesn.c44
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/ata/ahci_brcmstb.c6
-rw-r--r--drivers/ata/libata-core.c24
-rw-r--r--drivers/ata/libata-eh.c105
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/ata/libata.h6
-rw-r--r--drivers/ata/sata_sx4.c16
-rw-r--r--drivers/base/regmap/regcache-rbtree.c19
-rw-r--r--drivers/block/rbd.c22
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkfront.c128
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c2
-rw-r--r--drivers/clocksource/sh_cmt.c6
-rw-r--r--drivers/clocksource/timer-imx-gpt.c1
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c6
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/nx/nx-sha256.c27
-rw-r--r--drivers/crypto/nx/nx-sha512.c28
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/extcon/extcon-palmas.c13
-rw-r--r--drivers/extcon/extcon.c61
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c18
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c20
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c21
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c45
-rw-r--r--drivers/gpu/drm/i915/intel_display.c59
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c35
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/wacom_sys.c70
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c18
-rw-r--r--drivers/hwmon/g762.c1
-rw-r--r--drivers/hwmon/nct7904.c1
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/i2c-core.c24
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c1
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/input/mouse/elantech.c22
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/iommu/amd_iommu.c21
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c4
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/fsl_pamu.c26
-rw-r--r--drivers/iommu/intel-iommu.c716
-rw-r--r--drivers/iommu/intel_irq_remapping.c6
-rw-r--r--drivers/iommu/irq_remapping.c2
-rw-r--r--drivers/iommu/msm_iommu.c4
-rw-r--r--drivers/iommu/of_iommu.c8
-rw-r--r--drivers/iommu/omap-iommu-debug.c131
-rw-r--r--drivers/iommu/omap-iommu.c198
-rw-r--r--drivers/iommu/omap-iommu.h79
-rw-r--r--drivers/iommu/omap-iopgtable.h27
-rw-r--r--drivers/iommu/tegra-smmu.c306
-rw-r--r--drivers/irqchip/irq-crossbar.c4
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/md/dm-cache-policy-mq.c2
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c17
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c37
-rw-r--r--drivers/md/persistent-data/dm-btree.c7
-rw-r--r--drivers/md/raid1.c10
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/media/dvb-frontends/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/Kconfig1
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c2
-rw-r--r--drivers/media/pci/mantis/mantis_dma.c5
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c116
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c122
-rw-r--r--drivers/media/rc/nuvoton-cir.c127
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-core-priv.h36
-rw-r--r--drivers/media/rc/rc-ir-raw.c139
-rw-r--r--drivers/media/rc/rc-loopback.c36
-rw-r--r--drivers/media/rc/rc-main.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c40
-rw-r--r--drivers/memory/omap-gpmc.c6
-rw-r--r--drivers/memory/tegra/tegra114.c18
-rw-r--r--drivers/memory/tegra/tegra124.c31
-rw-r--r--drivers/memory/tegra/tegra30.c18
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/arizona-core.c16
-rw-r--r--drivers/misc/eeprom/at24.c3
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cavium/Kconfig3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c191
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c345
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c4
-rw-r--r--drivers/net/ethernet/ti/netcp.h1
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c35
-rw-r--r--drivers/net/hamradio/mkiss.c7
-rw-r--r--drivers/net/ntb_netdev.c9
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/smsc.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c78
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/wan/cosa.c3
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c22
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c62
-rw-r--r--drivers/ntb/ntb.c2
-rw-r--r--drivers/ntb/ntb_transport.c201
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/probe.c7
-rw-r--r--drivers/phy/phy-sun4i-usb.c1
-rw-r--r--drivers/phy/phy-ti-pipe3.c61
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/libfc/fc_exch.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c19
-rw-r--r--drivers/scsi/libiscsi.c25
-rw-r--r--drivers/scsi/scsi_error.c31
-rw-r--r--drivers/scsi/scsi_pm.c22
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c2
-rw-r--r--drivers/staging/vt6655/device_main.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c4
-rw-r--r--drivers/target/target_core_configfs.c9
-rw-r--r--drivers/target/target_core_hba.c10
-rw-r--r--drivers/target/target_core_spc.c44
-rw-r--r--drivers/thermal/cpu_cooling.c73
-rw-r--r--drivers/thermal/hisi_thermal.c1
-rw-r--r--drivers/thermal/power_allocator.c34
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/usb/chipidea/core.c13
-rw-r--r--drivers/usb/chipidea/host.c7
-rw-r--r--drivers/usb/chipidea/host.h6
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_printer.c10
-rw-r--r--drivers/usb/gadget/function/f_uac2.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c1
-rw-r--r--drivers/usb/host/xhci-mem.c3
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/of_videomode.c4
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/xen/balloon.c15
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/ceph/caps.c22
-rw-r--r--fs/ceph/locks.c2
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/dcache.c13
-rw-r--r--fs/file_table.c24
-rw-r--r--fs/fuse/dev.c10
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfsd/nfs4layouts.c1
-rw-r--r--fs/nfsd/nfs4state.c12
-rw-r--r--fs/nfsd/nfs4xdr.c11
-rw-r--r--fs/notify/mark.c30
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/signalfd.c5
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_edid.h19
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/linux/ata.h18
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/mm.h28
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/media/rc-core.h7
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/soc/tegra/mc.h8
-rw-r--r--include/sound/soc-topology.h12
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/sound/asoc.h41
-rw-r--r--init/main.c2
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/sem.c47
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/events/core.c91
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/irq/chip.c19
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/locking/qspinlock_paravirt.h11
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/time/timer.c4
-rw-r--r--lib/iommu-common.c2
-rw-r--r--mm/cma.h2
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memory-failure.c54
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c62
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmscan.c16
-rw-r--r--net/9p/client.c2
-rw-r--r--net/batman-adv/distributed-arp-table.c18
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/soft-interface.c3
-rw-r--r--net/batman-adv/translation-table.c32
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/core/datagram.c13
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/request_sock.c8
-rw-r--r--net/core/skbuff.c39
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/igmp.c33
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c10
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/mcast_snoop.c33
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c19
-rw-r--r--net/ipv6/route.c85
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c11
-rw-r--r--net/netfilter/nf_conntrack_core.c8
-rw-r--r--net/netfilter/nf_synproxy_core.c4
-rw-r--r--net/netfilter/xt_CT.c5
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/openvswitch/actions.c16
-rw-r--r--net/rds/info.c2
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/sch_fq_codel.c22
-rwxr-xr-xscripts/kconfig/streamline_config.pl2
-rw-r--r--security/yama/yama_lsm.c1
-rw-r--r--sound/firewire/amdtp.c5
-rw-r--r--sound/firewire/amdtp.h2
-rw-r--r--sound/firewire/fireworks/fireworks.c10
-rw-r--r--sound/firewire/fireworks/fireworks.h2
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c12
-rw-r--r--sound/hda/ext/hdac_ext_controller.c6
-rw-r--r--sound/hda/ext/hdac_ext_stream.c2
-rw-r--r--sound/pci/hda/patch_realtek.c3
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c2
-rw-r--r--sound/soc/Kconfig3
-rw-r--r--sound/soc/Makefile3
-rw-r--r--sound/soc/codecs/cs4265.c10
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/rt5645.h4
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c2
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/soc-topology.c61
-rw-r--r--sound/usb/card.c2
-rw-r--r--tools/perf/builtin-record.c11
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/config/Makefile2
-rw-r--r--tools/perf/util/machine.c20
-rw-r--r--tools/perf/util/stat-shadow.c8
-rw-r--r--tools/perf/util/thread.c6
441 files changed, 4405 insertions, 3863 deletions
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
new file mode 100644
index 000000000000..cca6d870f7a5
--- /dev/null
+++ b/.get_maintainer.ignore
@@ -0,0 +1 @@
+Christoph Hellwig <[email protected]>
diff --git a/.mailmap b/.mailmap
index b4091b7a78fe..4b31af54ccd5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <[email protected]>
Andreas Herrmann <[email protected]>
Andrew Morton <[email protected]>
Andrew Vasquez <[email protected]>
Andy Adamson <[email protected]>
diff --git a/Documentation/Intel-IOMMU.txt b/Documentation/Intel-IOMMU.txt
index cf9431db8731..7b57fc087088 100644
--- a/Documentation/Intel-IOMMU.txt
+++ b/Documentation/Intel-IOMMU.txt
@@ -10,7 +10,7 @@ This guide gives a quick cheat sheet for some basic understanding.
Some Keywords
DMAR - DMA remapping
-DRHD - DMA Engine Reporting Structure
+DRHD - DMA Remapping Hardware Unit Definition
RMRR - Reserved memory Region Reporting Structure
ZLR - Zero length reads from PCI devices
IOVA - IO Virtual address.
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index d6b794cef0b8..91e6e5c478d0 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
"qcom,kpss-acc-v1"
"qcom,kpss-acc-v2"
"rockchip,rk3066-smp"
+ "ste,dbx500-smp"
- cpu-release-addr
Usage: required for systems that have an "enable-method"
diff --git a/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt b/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
index 42531dc387aa..869699925fd5 100644
--- a/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
@@ -8,6 +8,11 @@ Required properties:
- ti,hwmods : Name of the hwmod associated with the IOMMU instance
- reg : Address space for the configuration registers
- interrupts : Interrupt specifier for the IOMMU instance
+- #iommu-cells : Should be 0. OMAP IOMMUs are all "single-master" devices,
+ and needs no additional data in the pargs specifier. Please
+ also refer to the generic bindings document for more info
+ on this property,
+ Documentation/devicetree/bindings/iommu/iommu.txt
Optional properties:
- ti,#tlb-entries : Number of entries in the translation look-aside buffer.
@@ -18,6 +23,7 @@ Optional properties:
Example:
/* OMAP3 ISP MMU */
mmu_isp: mmu@480bd400 {
+ #iommu-cells = <0>;
compatible = "ti,omap2-iommu";
reg = <0x480bd400 0x80>;
interrupts = <24>;
diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt
index 305e3df3d9b1..9cf9446eaf2e 100644
--- a/Documentation/devicetree/bindings/phy/ti-phy.txt
+++ b/Documentation/devicetree/bindings/phy/ti-phy.txt
@@ -82,6 +82,9 @@ Optional properties:
- id: If there are multiple instance of the same type, in order to
differentiate between each instance "id" can be used (e.g., multi-lane PCIe
PHY). If "id" is not provided, it is set to default value of '1'.
+ - syscon-pllreset: Handle to system control region that contains the
+ CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
+ register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
This is usually a subnode of ocp2scp to which it is connected.
@@ -100,3 +103,16 @@ usb3phy@4a084400 {
"sysclk",
"refclk";
};
+
+sata_phy: phy@4A096000 {
+ compatible = "ti,phy-pipe3-sata";
+ reg = <0x4A096000 0x80>, /* phy_rx */
+ <0x4A096400 0x64>, /* phy_tx */
+ <0x4A096800 0x40>; /* pll_ctrl */
+ reg-names = "phy_rx", "phy_tx", "pll_ctrl";
+ ctrl-module = <&omap_control_sata>;
+ clocks = <&sys_clkin1>, <&sata_ref_clk>;
+ clock-names = "sysclk", "refclk";
+ syscon-pllreset = <&scm_conf 0x3fc>;
+ #phy-cells = <0>;
+};
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index c86f2f1ae4f6..1fec1135791d 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
-the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
-buttons get reported separately in the PSM, PSR and PSL bits.
+the DualPoint Stick. The M, R and L bits signal the combined status of both
+the pointingstick and touchpad buttons, except for Dell dualpoint devices
+where the pointingstick buttons get reported separately in the PSM, PSR
+and PSL bits.
Dualpoint device -- interleaved packet format
---------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index a9ae6c105520..569568f6644f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3587,6 +3587,15 @@ S: Maintained
F: drivers/gpu/drm/rockchip/
F: Documentation/devicetree/bindings/video/rockchip*
+DRM DRIVERS FOR STI
+M: Benjamin Gaignard <[email protected]>
+M: Vincent Abriou <[email protected]>
+T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+S: Maintained
+F: drivers/gpu/drm/sti
+F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <[email protected]>
diff --git a/Makefile b/Makefile
index e79448d90f19..246053f04fb5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc8
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
@@ -597,6 +597,11 @@ endif # $(dot-config)
# Defaults to vmlinux, but the arch makefile usually adds further targets
all: vmlinux
+# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
+# values of the respective KBUILD_* variables
+ARCH_CPPFLAGS :=
+ARCH_AFLAGS :=
+ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -848,10 +853,10 @@ export mod_strip_cmd
mod_compress_cmd = true
ifdef CONFIG_MODULE_COMPRESS
ifdef CONFIG_MODULE_COMPRESS_GZIP
- mod_compress_cmd = gzip -n
+ mod_compress_cmd = gzip -n -f
endif # CONFIG_MODULE_COMPRESS_GZIP
ifdef CONFIG_MODULE_COMPRESS_XZ
- mod_compress_cmd = xz
+ mod_compress_cmd = xz -f
endif # CONFIG_MODULE_COMPRESS_XZ
endif # CONFIG_MODULE_COMPRESS
export mod_compress_cmd
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 91cf4055acab..bd4670d1b89b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K
config ARC_PAGE_SIZE_16K
bool "16KB"
- depends on ARC_MMU_V3
+ depends on ARC_MMU_V3 || ARC_MMU_V4
config ARC_PAGE_SIZE_4K
bool "4KB"
- depends on ARC_MMU_V3
+ depends on ARC_MMU_V3 || ARC_MMU_V4
endchoice
@@ -365,6 +365,11 @@ config ARC_HAS_LLSC
default y
depends on !ARC_CANT_LLSC
+config ARC_STAR_9000923308
+ bool "Workaround for llock/scond livelock"
+ default y
+ depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
+
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
default y
@@ -379,6 +384,10 @@ config ARC_HAS_LL64
dest operands with 2 possible source operands.
default y
+config ARC_HAS_DIV_REM
+ bool "Insn: div, divu, rem, remu"
+ default y
+
config ARC_HAS_RTC
bool "Local 64-bit r/o cycle counter"
default n
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 46d87310220d..8a27a48304a4 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+ifdef CONFIG_ISA_ARCV2
+
ifndef CONFIG_ARC_HAS_LL64
-cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64
+cflags-y += -mno-ll64
+endif
+
+ifndef CONFIG_ARC_HAS_DIV_REM
+cflags-y += -mno-div-rem
+endif
+
endif
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 070f58827a5c..c8f57b8449dc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -89,11 +89,10 @@
#define ECR_C_BIT_DTLB_LD_MISS 8
#define ECR_C_BIT_DTLB_ST_MISS 9
-
/* Auxiliary registers */
#define AUX_IDENTITY 4
#define AUX_INTR_VEC_BASE 0x25
-
+#define AUX_NON_VOL 0x5e
/*
* Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {
struct bcr_perip {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int start:8, pad2:8, sz:8, pad:8;
+ unsigned int start:8, pad2:8, sz:8, ver:8;
#else
- unsigned int pad:8, sz:8, pad2:8, start:8;
+ unsigned int ver:8, sz:8, pad2:8, start:8;
#endif
};
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 03484cb4d16d..87d18ae53115 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -23,33 +23,60 @@
#define atomic_set(v, i) (((v)->counter) = (i))
-#ifdef CONFIG_ISA_ARCV2
-#define PREFETCHW " prefetchw [%1] \n"
-#else
-#define PREFETCHW
+#ifdef CONFIG_ARC_STAR_9000923308
+
+#define SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int delay = 1, tmp; \
+
+#define SCOND_FAIL_RETRY_ASM \
+ " bz 4f \n" \
+ " ; --- scond fail delay --- \n" \
+ " mov %[tmp], %[delay] \n" /* tmp = delay */ \
+ "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
+ " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
+ " rol %[delay], %[delay] \n" /* delay *= 2 */ \
+ " b 1b \n" /* start over */ \
+ "4: ; --- success --- \n" \
+
+#define SCOND_FAIL_RETRY_VARS \
+ ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
+
+#else /* !CONFIG_ARC_STAR_9000923308 */
+
+#define SCOND_FAIL_RETRY_VAR_DEF
+
+#define SCOND_FAIL_RETRY_ASM \
+ " bnz 1b \n" \
+
+#define SCOND_FAIL_RETRY_VARS
+
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- unsigned int temp; \
+ unsigned int val; \
+ SCOND_FAIL_RETRY_VAR_DEF \
\
__asm__ __volatile__( \
- "1: \n" \
- PREFETCHW \
- " llock %0, [%1] \n" \
- " " #asm_op " %0, %0, %2 \n" \
- " scond %0, [%1] \n" \
- " bnz 1b \n" \
- : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
- : "r"(&v->counter), "ir"(i) \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " \n" \
+ SCOND_FAIL_RETRY_ASM \
+ \
+ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
+ SCOND_FAIL_RETRY_VARS \
+ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
+ [i] "ir" (i) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
- unsigned int temp; \
+ unsigned int val; \
+ SCOND_FAIL_RETRY_VAR_DEF \
\
/* \
* Explicit full memory barrier needed before/after as \
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
smp_mb(); \
\
__asm__ __volatile__( \
- "1: \n" \
- PREFETCHW \
- " llock %0, [%1] \n" \
- " " #asm_op " %0, %0, %2 \n" \
- " scond %0, [%1] \n" \
- " bnz 1b \n" \
- : "=&r"(temp) \
- : "r"(&v->counter), "ir"(i) \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " \n" \
+ SCOND_FAIL_RETRY_ASM \
+ \
+ : [val] "=&r" (val) \
+ SCOND_FAIL_RETRY_VARS \
+ : [ctr] "r" (&v->counter), \
+ [i] "ir" (i) \
: "cc"); \
\
smp_mb(); \
\
- return temp; \
+ return val; \
}
#else /* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
/**
* __atomic_add_unless - add unless the number is a given value
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 91694ec1ce95..69095da1fcfd 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -20,20 +20,20 @@
struct pt_regs {
/* Real registers */
- long bta; /* bta_l1, bta_l2, erbta */
+ unsigned long bta; /* bta_l1, bta_l2, erbta */
- long lp_start, lp_end, lp_count;
+ unsigned long lp_start, lp_end, lp_count;
- long status32; /* status32_l1, status32_l2, erstatus */
- long ret; /* ilink1, ilink2 or eret */
- long blink;
- long fp;
- long r26; /* gp */
+ unsigned long status32; /* status32_l1, status32_l2, erstatus */
+ unsigned long ret; /* ilink1, ilink2 or eret */
+ unsigned long blink;
+ unsigned long fp;
+ unsigned long r26; /* gp */
- long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
- long sp; /* user/kernel sp depending on where we came from */
- long orig_r0;
+ unsigned long sp; /* User/Kernel depending on where we came from */
+ unsigned long orig_r0;
/*
* To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
unsigned long event;
};
- long user_r25;
+ unsigned long user_r25;
};
#else
struct pt_regs {
- long orig_r0;
+ unsigned long orig_r0;
union {
struct {
@@ -76,26 +76,26 @@ struct pt_regs {
unsigned long event;
};
- long bta; /* bta_l1, bta_l2, erbta */
+ unsigned long bta; /* bta_l1, bta_l2, erbta */
- long user_r25;
+ unsigned long user_r25;
- long r26; /* gp */
- long fp;
- long sp; /* user/kernel sp depending on where we came from */
+ unsigned long r26; /* gp */
+ unsigned long fp;
+ unsigned long sp; /* user/kernel sp depending on where we came from */
- long r12;
+ unsigned long r12;
/*------- Below list auto saved by h/w -----------*/
- long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
+ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
- long blink;
- long lp_end, lp_start, lp_count;
+ unsigned long blink;
+ unsigned long lp_end, lp_start, lp_count;
- long ei, ldi, jli;
+ unsigned long ei, ldi, jli;
- long ret;
- long status32;
+ unsigned long ret;
+ unsigned long status32;
};
#endif
@@ -103,10 +103,10 @@ struct pt_regs {
/* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs {
- long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+ unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
-#define instruction_pointer(regs) (unsigned long)((regs)->ret)
+#define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)
/* return 1 if user mode or 0 if kernel mode */
@@ -142,7 +142,7 @@ struct callee_regs {
static inline long regs_return_value(struct pt_regs *regs)
{
- return regs->r0;
+ return (long)regs->r0;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index e1651df6a93d..db8c59d1eaeb 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -18,9 +18,518 @@
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+#ifdef CONFIG_ARC_HAS_LLSC
+
+/*
+ * A normal LLOCK/SCOND based system, w/o need for livelock workaround
+ */
+#ifndef CONFIG_ARC_STAR_9000923308
+
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ unsigned int val;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int val, got_it = 0;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " mov %[got_it], 1 \n"
+ "4: \n"
+ " \n"
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ *
+ * if (rw->counter > 0) {
+ * rw->counter--;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
+ " sub %[val], %[val], 1 \n" /* reader lock */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
+ " sub %[val], %[val], 1 \n" /* counter-- */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ *
+ * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ * rw->counter = 0;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter++;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " add %[val], %[val], 1 \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter))
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ smp_mb();
+
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+
+ smp_mb();
+}
+
+#else /* CONFIG_ARC_STAR_9000923308 */
+
+/*
+ * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
+ * coherency transactions in the SCU. The exclusive line state keeps rotating
+ * among contenting cores leading to a never ending cycle. So break the cycle
+ * by deferring the retry of failed exclusive access (SCOND). The actual delay
+ * needed is function of number of contending cores as well as the unrelated
+ * coherency traffic from other cores. To keep the code simple, start off with
+ * small delay of 1 which would suffice most cases and in case of contention
+ * double the delay. Eventually the delay is sufficient such that the coherency
+ * pipeline is drained, thus a subsequent exclusive access would succeed.
+ */
+
+#define SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int delay, tmp; \
+
+#define SCOND_FAIL_RETRY_ASM \
+ " ; --- scond fail delay --- \n" \
+ " mov %[tmp], %[delay] \n" /* tmp = delay */ \
+ "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
+ " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
+ " rol %[delay], %[delay] \n" /* delay *= 2 */ \
+ " b 1b \n" /* start over */ \
+ " \n" \
+ "4: ; --- done --- \n" \
+
+#define SCOND_FAIL_RETRY_VARS \
+ ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int val;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bz 4f \n" /* done */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val)
+ SCOND_FAIL_RETRY_VARS
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int val, got_it = 0;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bz.d 4f \n"
+ " mov.z %[got_it], 1 \n" /* got it */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ SCOND_FAIL_RETRY_VARS
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ *
+ * if (rw->counter > 0) {
+ * rw->counter--;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
+ " sub %[val], %[val], 1 \n" /* reader lock */
+ " scond %[val], [%[rwlock]] \n"
+ " bz 4f \n" /* done */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
+ " sub %[val], %[val], 1 \n" /* counter-- */
+ " scond %[val], [%[rwlock]] \n"
+ " bz.d 4f \n"
+ " mov.z %[got_it], 1 \n" /* got it */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ *
+ * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ * rw->counter = 0;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bz 4f \n"
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bz.d 4f \n"
+ " mov.z %[got_it], 1 \n" /* got it */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter++;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " add %[val], %[val], 1 \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter))
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " scond %[UNLOCKED], [%[rwlock]]\n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
+
+#endif /* CONFIG_ARC_STAR_9000923308 */
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
/*
* This smp_mb() is technically superfluous, we only need the one
@@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ex %0, [%1] \n"
" breq %0, %2, 1b \n"
- : "+&r" (tmp)
+ : "+&r" (val)
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
: "memory");
@@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
smp_mb();
}
+/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
smp_mb();
__asm__ __volatile__(
"1: ex %0, [%1] \n"
- : "+r" (tmp)
+ : "+r" (val)
: "r"(&(lock->slock))
: "memory");
smp_mb();
- return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+ return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+ unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
/*
* RELEASE barrier: given the instructions avail on ARCv2, full barrier
@@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__asm__ __volatile__(
" ex %0, [%1] \n"
- : "+r" (tmp)
+ : "+r" (val)
: "r"(&(lock->slock))
: "memory");
@@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
*
* The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex.
- *
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
-/* Would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->counter > 0)
-
-/* Would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
@@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
arch_spin_unlock(&(rw->lock_mutex));
}
+#endif
+
+#define arch_read_can_lock(x) ((x)->counter > 0)
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 662627ced4f2..4e1ef5f650c6 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -26,7 +26,9 @@ typedef struct {
*/
typedef struct {
volatile unsigned int counter;
+#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t lock_mutex;
+#endif
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 76a7739aab1c..0b3ef63d4a03 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -32,20 +32,20 @@
*/
struct user_regs_struct {
- long pad;
+ unsigned long pad;
struct {
- long bta, lp_start, lp_end, lp_count;
- long status32, ret, blink, fp, gp;
- long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
- long sp;
+ unsigned long bta, lp_start, lp_end, lp_count;
+ unsigned long status32, ret, blink, fp, gp;
+ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ unsigned long sp;
} scratch;
- long pad2;
+ unsigned long pad2;
struct {
- long r25, r24, r23, r22, r21, r20;
- long r19, r18, r17, r16, r15, r14, r13;
+ unsigned long r25, r24, r23, r22, r21, r20;
+ unsigned long r19, r18, r17, r16, r15, r14, r13;
} callee;
- long efa; /* break pt addr, for break points in delay slots */
- long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
+ unsigned long efa; /* break pt addr, for break points in delay slots */
+ unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 18cc01591c96..cabde9dc0696 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
struct bcr_perip uncached_space;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ unsigned long perip_space;
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
@@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
- BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE);
+ if (uncached_space.ver < 3)
+ perip_space = uncached_space.start << 24;
+ else
+ perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
+
+ BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
@@ -330,6 +336,10 @@ static void arc_chk_core_config(void)
pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
else if (!cpu->extn.fpu_dp && fpu_enabled)
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
+
+ if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+ !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
+ panic("llock/scond livelock workaround missing\n");
}
/*
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 3364d2bbc515..4294761a2b3e 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
return 0;
}
-static void arc_clkevent_set_mode(enum clock_event_mode mode,
- struct clock_event_device *dev)
+static int arc_clkevent_set_periodic(struct clock_event_device *dev)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /*
- * At X Hz, 1 sec = 1000ms -> X cycles;
- * 10ms -> X / 100 cycles
- */
- arc_timer_event_setup(arc_get_core_freq() / HZ);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- break;
- default:
- break;
- }
-
- return;
+ /*
+ * At X Hz, 1 sec = 1000ms -> X cycles;
+ * 10ms -> X / 100 cycles
+ */
+ arc_timer_event_setup(arc_get_core_freq() / HZ);
+ return 0;
}
static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
- .name = "ARC Timer0",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .mode = CLOCK_EVT_MODE_UNUSED,
- .rating = 300,
- .irq = TIMER0_IRQ, /* hardwired, no need for resources */
- .set_next_event = arc_clkevent_set_next_event,
- .set_mode = arc_clkevent_set_mode,
+ .name = "ARC Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 300,
+ .irq = TIMER0_IRQ, /* hardwired, no need for resources */
+ .set_next_event = arc_clkevent_set_next_event,
+ .set_state_periodic = arc_clkevent_set_periodic,
};
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
* irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
*/
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
- int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC;
+ int irq_reenable = clockevent_state_periodic(evt);
/*
* Any write to CTRL reg ACks the interrupt, we rewrite the
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index 1b2b3acfed52..0cab0b8a57c5 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -206,7 +206,7 @@ unalignedOffby3:
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
- prefetch [r3, 32] ;Prefetch the next write location
+ prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 92d573c734b5..365b18364815 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -10,12 +10,6 @@
#undef PREALLOC_NOT_AVAIL
-#ifdef PREALLOC_NOT_AVAIL
-#define PREWRITE(A,B) prefetchw [(A),(B)]
-#else
-#define PREWRITE(A,B) prealloc [(A),(B)]
-#endif
-
ENTRY(memset)
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
@@ -51,9 +45,15 @@ ENTRY(memset)
;;; Convert len to Dwords, unfold x8
lsr.f lp_count, lp_count, 6
+
lpnz @.Lset64bytes
;; LOOP START
- PREWRITE(r3, 64) ;Prefetch the next write location
+#ifdef PREALLOC_NOT_AVAIL
+ prefetchw [r3, 64] ;Prefetch the next write location
+#else
+ prealloc [r3, 64]
+#endif
+#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
@@ -62,16 +62,45 @@ ENTRY(memset)
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
+#else
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+#endif
.Lset64bytes:
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
+#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
+#else
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+#endif
.Lset32bytes:
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 99f7da513a48..e7769c3ab5f2 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
static void __init axs103_early_init(void)
{
+ /*
+ * AXS103 configurations for SMP/QUAD configurations share device tree
+ * which defaults to 90 MHz. However recent failures of Quad config
+ * revealed P&R timing violations so clamp it down to safe 50 MHz
+ * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
+ *
+ * This hack is really hacky as of now. Fix it properly by getting the
+ * number of cores as return value of platform's early SMP callback
+ */
+#ifdef CONFIG_ARC_MCIP
+ unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
+ if (num_cores > 2)
+ arc_set_core_freq(50 * 1000000);
+#endif
+
switch (arc_get_core_freq()/1000000) {
case 33:
axs103_set_freq(1, 1, 1);
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 07ab3d203916..7451b447cc2d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+bootpImage uImage: zImage
+zImage: Image
+
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 8f1e25bcecbd..1e29ccf77ea2 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -116,7 +116,7 @@
ranges = <0 0x2000 0x2000>;
scm_conf: scm_conf@0 {
- compatible = "syscon";
+ compatible = "syscon", "simple-bus";
reg = <0x0 0x1400>;
#address-cells = <1>;
#size-cells = <1>;
@@ -1140,6 +1140,7 @@
ctrl-module = <&omap_control_sata>;
clocks = <&sys_clkin1>, <&sata_ref_clk>;
clock-names = "sysclk", "refclk";
+ syscon-pllreset = <&scm_conf 0x3fc>;
#phy-cells = <0>;
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e6d13592080d..b57033e8c633 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -181,10 +181,10 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
<&clks IMX6QDL_CLK_LVDS1_GATE>,
<&clks IMX6QDL_CLK_PCIE_REF_125M>;
diff --git a/arch/arm/boot/dts/k2e.dtsi b/arch/arm/boot/dts/k2e.dtsi
index 1b6494fbdb91..675fb8e492c6 100644
--- a/arch/arm/boot/dts/k2e.dtsi
+++ b/arch/arm/boot/dts/k2e.dtsi
@@ -131,10 +131,17 @@
<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
};
};
+
+ mdio: mdio@24200f00 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x24200f00 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2e-netcp.dtsi"
};
};
-
-&mdio {
- reg = <0x24200f00 0x100>;
-};
diff --git a/arch/arm/boot/dts/k2hk.dtsi b/arch/arm/boot/dts/k2hk.dtsi
index ae6472407b22..d0810a5f2968 100644
--- a/arch/arm/boot/dts/k2hk.dtsi
+++ b/arch/arm/boot/dts/k2hk.dtsi
@@ -98,6 +98,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x25c>;
};
+
+ mdio: mdio@02090300 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x02090300 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2hk-netcp.dtsi"
};
};
diff --git a/arch/arm/boot/dts/k2l.dtsi b/arch/arm/boot/dts/k2l.dtsi
index 0e007483615e..49fd414f680c 100644
--- a/arch/arm/boot/dts/k2l.dtsi
+++ b/arch/arm/boot/dts/k2l.dtsi
@@ -29,7 +29,6 @@
};
soc {
-
/include/ "k2l-clocks.dtsi"
uart2: serial@02348400 {
@@ -79,6 +78,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x24c>;
};
+
+ mdio: mdio@26200f00 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x26200f00 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2l-netcp.dtsi"
};
};
@@ -96,7 +106,3 @@
/* Pin muxed. Enabled and configured by Bootloader */
status = "disabled";
};
-
-&mdio {
- reg = <0x26200f00 0x100>;
-};
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index e7a6f6deabb6..72816d65f7ec 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -267,17 +267,6 @@
1 0 0x21000A00 0x00000100>;
};
- mdio: mdio@02090300 {
- compatible = "ti,keystone_mdio", "ti,davinci_mdio";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x02090300 0x100>;
- status = "disabled";
- clocks = <&clkpa>;
- clock-names = "fck";
- bus_freq = <2500000>;
- };
-
kirq0: keystone_irq@26202a0 {
compatible = "ti,keystone-irq";
interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 11a7963be003..2390f387c271 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -51,7 +51,8 @@
};
scm_conf: scm_conf@270 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x270 0x240>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 7d31c6ff246f..abc4473e6f8a 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -191,7 +191,8 @@
};
omap4_padconf_global: omap4_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0x170>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index c8fd648a7108..b1a1263e6001 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -180,7 +180,8 @@
};
omap5_padconf_global: omap5_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0xec>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index a75f3289e653..b8f81fb418ce 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -15,6 +15,33 @@
#include "skeleton.dtsi"
/ {
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "ste,dbx500-smp";
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+ };
+ CPU0: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0x300>;
+ };
+ CPU1: cpu@301 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0x301>;
+ };
+ };
+
soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -22,32 +49,6 @@
interrupt-parent = <&intc>;
ranges;
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu-map {
- cluster0 {
- core0 {
- cpu = <&CPU0>;
- };
- core1 {
- cpu = <&CPU1>;
- };
- };
- };
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <0>;
- };
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <1>;
- };
- };
-
ptm@801ae000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0x801ae000 0x1000>;
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1dec80..b48dd4f37f80 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -61,6 +61,7 @@ work_pending:
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
+ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d97e459..29e2991465cb 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
+ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
+ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
+ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd9b921..54a5aeab988d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
vdso_write_begin(vdso_data);
- xtime_coarse = __current_kernel_time();
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 3e58d710013c..4b39af2dfda9 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
}
/* the mmap semaphore is taken only if not in an atomic context */
- atomic = in_atomic();
+ atomic = faulthandler_disabled();
if (!atomic)
down_read(&current->mm->mmap_sem);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 6001f1c9d136..4a87e86dec45 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
pd->base = of_iomap(np, 0);
if (!pd->base) {
pr_warn("%s: failed to map memory\n", __func__);
- kfree(pd->pd.name);
+ kfree_const(pd->pd.name);
kfree(pd);
- of_node_put(np);
continue;
}
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 8e52621b5a6b..e1d2e991d17a 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
.irq_mask = wakeupgen_mask,
.irq_unmask = wakeupgen_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 9d259d94e429..1160434eece0 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
VDSO_LDFLAGS += -nostdlib -shared
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1670f15ef69e..948f0ad2de23 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
* Other callers might not initialize the si_lsb field,
* so check explicitely for the right codes here.
*/
- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+ if (from->si_signo == SIGBUS &&
+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif
break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE))
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ec37ab3f524f..97bc68f4c689 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -199,16 +199,15 @@ up_fail:
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
- xtime_coarse = __current_kernel_time();
vdso_data->use_syscall = use_syscall;
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cee5f93e5712..199a8357838c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -151,7 +151,6 @@ config BMIPS_GENERIC
select BCM7120_L2_IRQ
select BRCMSTB_L2_IRQ
select IRQ_MIPS_CPU
- select RAW_IRQ_ACCESSORS
select DMA_NONCOHERENT
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 01a644f174dd..1ba21204ebe0 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
{
return ATH79_MISC_IRQ(5);
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 56f5d080ef9d..b7fa9ae28c36 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b1b3..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9d8106758142..ae8569475264 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ " .set push\n"
+ " .set noreorder\n"
+ "1: " LL_INSN " %[tmp], %[buddy]\n"
+ " bnez %[tmp], 2f\n"
+ " or %[tmp], %[tmp], %[global]\n"
+ " " SC_INSN " %[tmp], %[buddy]\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ "2:\n"
+ " .set pop"
+ : [buddy] "+m" (buddy->pte),
+ [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
}
#endif
}
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 16f1ea9ab191..03722d4326a1 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void);
#endif
-extern asmlinkage void smp_call_function_interrupt(void);
-
static inline void arch_send_call_function_single_ipi(int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 28d6d9364bd1..a71da576883c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
.set noreorder
bltz k0, 8f
move k1, sp
+#ifdef CONFIG_EVA
+ /*
+ * Flush interAptiv's Return Prediction Stack (RPS) by writing
+ * EntryHi. Toggling Config7.RPS is slower and less portable.
+ *
+ * The RPS isn't automatically flushed when exceptions are
+ * taken, which can result in kernel mode speculative accesses
+ * to user addresses if the RPS mispredicts. That's harmless
+ * when user and kernel share the same address space, but with
+ * EVA the same user segments may be unmapped to kernel mode,
+ * even containing sensitive MMIO regions or invalid memory.
+ *
+ * This can happen when the kernel sets the return address to
+ * ret_from_* and jr's to the exception handler, which looks
+ * more like a tail call than a function call. If nested calls
+ * don't evict the last user address in the RPS, it will
+ * mispredict the return and fetch from a user controlled
+ * address into the icache.
+ *
+ * More recent EVA-capable cores with MAAR to restrict
+ * speculative accesses aren't affected.
+ */
+ MFC0 k0, CP0_ENTRYHI
+ MTC0 k0, CP0_ENTRYHI
+#endif
.set reorder
/* Called from user mode, new stack. */
get_saved_sp
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index af42e7003f12..baa7b6fc0a60 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
- __BUILD_clear_\clear
+ __build_clear_\clear
.set at
__BUILD_\verbose \exception
move a0, sp
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491aa6d6b..789d7bf4fef3 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
- cpumask_t mask;
+ cpumask_t allowed, mask;
int retval;
struct task_struct *p;
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;
- cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+ cpumask_and(&mask, &allowed, cpu_active_mask);
out_unlock:
read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index b130033838ba..5fcec3032f38 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
return mips_machine_name;
}
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 74bab9ddd0e1..c6bbf2165051 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
process_entry:
PTR_L s2, (s0)
- PTR_ADD s0, s0, SZREG
+ PTR_ADDIU s0, s0, SZREG
/*
* In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
/* copy page word by word */
REG_L s5, (s2)
REG_S s5, (s4)
- PTR_ADD s4, s4, SZREG
- PTR_ADD s2, s2, SZREG
- LONG_SUB s6, s6, 1
+ PTR_ADDIU s4, s4, SZREG
+ PTR_ADDIU s2, s2, SZREG
+ LONG_ADDIU s6, s6, -1
beq s6, zero, process_entry
b copy_word
b process_entry
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ad4d44635c76..a6f6b762c47a 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,7 +80,7 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_64_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 446cc654da56..4b2010654c46 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_N32_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 336708ae5c5b..78cf8c2f1de0 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
if (action == 0)
scheduler_ipi();
else
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d0744cc77ea7..a31896c33716 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-/*
- * Call into both interrupt handlers, as we share the IPI for them
- */
-void __irq_entry smp_call_function_interrupt(void)
-{
- irq_enter();
- generic_smp_call_function_interrupt();
- irq_exit();
-}
-
static void stop_this_cpu(void *dummy)
{
/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e207a43b5f8f..8ea28e6ab37d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
+ mm_segment_t old_fs = get_fs();
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
prepare_frametrace(&regs);
}
}
+ /*
+ * show_stack() deals exclusively with kernel mode, so be sure to access
+ * the stack in the kernel (not user) address space.
+ */
+ set_fs(KERNEL_DS);
show_stacktrace(task, &regs);
+ set_fs(old_fs);
}
static void show_code(unsigned int __user *pc)
@@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
+ mm_segment_t old_fs = get_fs();
prev_state = exception_enter();
show_regs(regs);
@@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
dump_tlb_all();
}
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+
show_code((unsigned int __user *) regs->cp0_epc);
+ set_fs(old_fs);
+
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef0c90d..eb3efd137fd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do { \
: "memory"); \
} while(0)
-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 6ab10573490d..2c218c3bbca5 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
{
return ltq_perfcount_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 509877c6e9d9..1a4738a8f2d3 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
- if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
if (action & SMP_ASK_C0COUNT) {
BUG_ON(cpu != 0);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 77d96db8253c..aab218c36e0d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
+ protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
} else {
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 36c0f26fac6b..852a41c6da45 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -133,7 +133,8 @@ good_area:
#endif
goto bad_area;
}
- if (!(vma->vm_flags & VM_READ)) {
+ if (!(vma->vm_flags & VM_READ) &&
+ exception_epc(regs) != address) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index d1392f8f5811..fa8f591f3713 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 5625b190edc0..b7bf721eabf5 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
return mips_cpu_perf_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
@@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
static void __init init_rtc(void)
{
- /* stop the clock whilst setting it up */
- CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
+ unsigned char freq, ctrl;
- /* 32KHz time base */
- CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+ /* Set 32KHz time base if not already set */
+ freq = CMOS_READ(RTC_FREQ_SELECT);
+ if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
- /* start the clock */
- CMOS_WRITE(RTC_24H, RTC_CONTROL);
+ /* Ensure SET bit is clear so RTC can run */
+ ctrl = CMOS_READ(RTC_CONTROL);
+ if (ctrl & RTC_SET)
+ CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
}
void __init plat_time_init(void)
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index e1d69895fb1d..a120b7a5a8fe 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
return -1;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index dc3e327fbbac..f5fff228b347 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
clear_c0_eimr(irq);
ack_c0_eirr(irq);
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
set_c0_eimr(irq);
}
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c
index 42181c7105df..f8d3e081b2eb 100644
--- a/arch/mips/paravirt/paravirt-smp.c
+++ b/arch/mips/paravirt/paravirt-smp.c
@@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 7c73fcb92a10..8a377346f0ca 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
{
return gic_get_c0_perfcount_int();
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
int get_c0_fdc_int(void)
{
diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
index 10170580a2de..ffa0f7101a97 100644
--- a/arch/mips/pmcs-msp71xx/msp_smp.c
+++ b/arch/mips/pmcs-msp71xx/msp_smp.c
@@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 53707aacc0f8..8c624a8b9ea2 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
{
return rt_perfcount_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 3fbaef97a1b8..16ec4e12daa3 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
scheduler_ipi();
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
- smp_call_function_interrupt();
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
- smp_call_function_interrupt();
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
} else
#endif
{
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index af7d44edd9a8..4c71aea25663 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -29,8 +29,6 @@
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
-extern void smp_call_function_interrupt(void);
-
/*
* These are routines for dealing with the bcm1480 smp capabilities
* independent of board/firmware
@@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
- if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index c0c4b3f88a08..1cf66f5ff23d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
- if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831ac0f92..da50e0c9c57e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2078f92d15ac..f32f843a3631 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
- if (!vcpu->requests)
- return 0;
retry:
kvm_s390_vcpu_request_handled(vcpu);
+ if (!vcpu->requests)
+ return 0;
/*
* We use MMU_RELOAD just to re-arm the ipte notifier for the
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 1f0aa2024e94..6424249d5f78 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,16 +28,10 @@
* Must preserve %o5 between VISEntryHalf and VISExitHalf */
#define VISEntryHalf \
- rd %fprs, %o5; \
- andcc %o5, FPRS_FEF, %g0; \
- be,pt %icc, 297f; \
- sethi %hi(298f), %g7; \
- sethi %hi(VISenterhalf), %g1; \
- jmpl %g1 + %lo(VISenterhalf), %g0; \
- or %g7, %lo(298f), %g7; \
- clr %o5; \
-297: wr %o5, FPRS_FEF, %fprs; \
-298:
+ VISEntry
+
+#define VISExitHalf \
+ VISExit
#define VISEntryHalfFast(fail_label) \
rd %fprs, %o5; \
@@ -47,7 +41,7 @@
ba,a,pt %xcc, fail_label; \
297: wr %o5, FPRS_FEF, %fprs;
-#define VISExitHalf \
+#define VISExitHalfFast \
wr %o5, 0, %fprs;
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 140527a20e7d..83aeeb1dffdb 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+#ifdef NON_USER_COPY
+ VISExitHalfFast
+#else
VISExitHalf
-
+#endif
brz,pn %o2, .Lexit
cmp %o2, 19
ble,pn %icc, .Lsmall_unaligned
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9e2e2e..a063d84336d6 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
stx %g3, [%g6 + TI_GSR]
2: add %g6, %g1, %g3
- cmp %o5, FPRS_DU
- be,pn %icc, 6f
- sll %g1, 3, %g1
+ mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+ sll %g1, 3, %g1
stb %o5, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
.align 32
80: jmpl %g7 + %g0, %g0
nop
-
-6: ldub [%g3 + TI_FPSAVED], %o5
- or %o5, FPRS_DU, %o5
- add %g6, TI_FPREGS+0x80, %g2
- stb %o5, [%g3 + TI_FPSAVED]
-
- sll %g1, 5, %g1
- add %g6, TI_FPREGS+0xc0, %g3
- wr %g0, FPRS_FEF, %fprs
- membar #Sync
- stda %f32, [%g2 + %g1] ASI_BLK_P
- stda %f48, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 80f
- nop
-
- .align 32
-80: jmpl %g7 + %g0, %g0
- nop
-
- .align 32
-VISenterhalf:
- ldub [%g6 + TI_FPDEPTH], %g1
- brnz,a,pn %g1, 1f
- cmp %g1, 1
- stb %g0, [%g6 + TI_FPSAVED]
- stx %fsr, [%g6 + TI_XFSR]
- clr %o5
- jmpl %g7 + %g0, %g0
- wr %g0, FPRS_FEF, %fprs
-
-1: bne,pn %icc, 2f
- srl %g1, 1, %g1
- ba,pt %xcc, vis1
- sub %g7, 8, %g7
-2: addcc %g6, %g1, %g3
- sll %g1, 3, %g1
- andn %o5, FPRS_DU, %g2
- stb %g2, [%g3 + TI_FPSAVED]
-
- rd %gsr, %g2
- add %g6, %g1, %g3
- stx %g2, [%g3 + TI_GSR]
- add %g6, %g1, %g2
- stx %fsr, [%g2 + TI_XFSR]
- sll %g1, 5, %g1
-3: andcc %o5, FPRS_DL, %g0
- be,pn %icc, 4f
- add %g6, TI_FPREGS, %g2
-
- add %g6, TI_FPREGS+0x40, %g3
- membar #Sync
- stda %f0, [%g2 + %g1] ASI_BLK_P
- stda %f16, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 4f
- nop
-
- .align 32
-4: and %o5, FPRS_DU, %o5
- jmpl %g7 + %g0, %g0
- wr %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1d649a95660c..8069ce12f20b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
void VISenter(void);
EXPORT_SYMBOL(VISenter);
-/* CRYPTO code needs this */
-void VISenterhalf(void);
-EXPORT_SYMBOL(VISenterhalf);
-
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04143cd..c667e104a0c2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
return -EFAULT;
- memset(to, 0, sizeof(*to));
-
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 5a1844765a7a..a7e257d9cb90 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
*/
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp), %ecx /* User %eip */
+ movq RAX(%rsp), %rax
RESTORE_RSI_RDI
xorl %edx, %edx /* Do not leak kernel information */
xorq %r8, %r8
@@ -219,7 +220,6 @@ sysexit_from_sys_call:
1: setbe %al /* 1 if error, 0 if not */
movzbl %al, %edi /* zero-extend that into %edi */
call __audit_syscall_exit
- movq RAX(%rsp), %rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -368,6 +368,7 @@ sysretl_from_sys_call:
RESTORE_RSI_RDI_RDX
movl RIP(%rsp), %ecx
movl EFLAGS(%rsp), %r11d
+ movq RAX(%rsp), %rax
xorq %r10, %r10
xorq %r9, %r9
xorq %r8, %r8
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 6fe6b182c998..9dfce4e0417d 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@ struct sigcontext {
unsigned long ip;
unsigned long flags;
unsigned short cs;
- unsigned short __pad2; /* Was called gs, but was always zero. */
- unsigned short __pad1; /* Was called fs, but was always zero. */
- unsigned short ss;
+ unsigned short gs;
+ unsigned short fs;
+ unsigned short __pad0;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 751bf4b7bf11..d7f3b3b78ac3 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -79,12 +79,12 @@ do { \
#else /* CONFIG_X86_32 */
/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
+#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
#define __EXTRA_CLOBBER \
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
- "r12", "r13", "r14", "r15", "flags"
+ "r12", "r13", "r14", "r15"
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary \
@@ -100,11 +100,7 @@ do { \
#define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */
-/*
- * There is no need to save or restore flags, because flags are always
- * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
- * has no effect.
- */
+/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 0e8a973de9ee..40836a9a7250 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,24 +177,9 @@ struct sigcontext {
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
-
- /*
- * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
- * Linux saved and restored fs and gs in these slots. This
- * was counterproductive, as fsbase and gsbase were never
- * saved, so arch_prctl was presumably unreliable.
- *
- * If these slots are ever needed for any other purpose, there
- * is some risk that very old 64-bit binaries could get
- * confused. I doubt that many such binaries still work,
- * though, since the same patch in 2.5.64 also removed the
- * 64-bit set_thread_area syscall, so it appears that there is
- * no TLS API that works in both pre- and post-2.5.64 kernels.
- */
- __u16 __pad2; /* Was gs. */
- __u16 __pad1; /* Was fs. */
-
- __u16 ss;
+ __u16 gs;
+ __u16 fs;
+ __u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index f813261d9740..2683f36e4e0a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
- err = assign_irq_vector_policy(virq, irq_data->node, data,
+ err = assign_irq_vector_policy(virq + i, irq_data->node, data,
info);
if (err)
goto error;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index b9826a981fb2..6326ae24e4d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
- return NOTIFY_BAD;
+ goto err;
}
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
if (!cpuc->constraint_list)
- return NOTIFY_BAD;
+ goto err_shared_regs;
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
- if (!cpuc->excl_cntrs) {
- kfree(cpuc->constraint_list);
- kfree(cpuc->shared_regs);
- return NOTIFY_BAD;
- }
+ if (!cpuc->excl_cntrs)
+ goto err_constraint_list;
+
cpuc->excl_thread_id = 0;
}
return NOTIFY_OK;
+
+err_constraint_list:
+ kfree(cpuc->constraint_list);
+ cpuc->constraint_list = NULL;
+
+err_shared_regs:
+ kfree(cpuc->shared_regs);
+ cpuc->shared_regs = NULL;
+
+err:
+ return NOTIFY_BAD;
}
static void intel_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 63eb68b73589..377e8f8ed391 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
cpumask_set_cpu(cpu, &cqm_cpumask);
}
-static void intel_cqm_cpu_prepare(unsigned int cpu)
+static void intel_cqm_cpu_starting(unsigned int cpu)
{
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- intel_cqm_cpu_prepare(cpu);
- break;
case CPU_DOWN_PREPARE:
intel_cqm_cpu_exit(cpu);
break;
case CPU_STARTING:
+ intel_cqm_cpu_starting(cpu);
cqm_pick_event_reader(cpu);
break;
}
@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
goto out;
for_each_online_cpu(i) {
- intel_cqm_cpu_prepare(i);
+ intel_cqm_cpu_starting(i);
cqm_pick_event_reader(i);
}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 79de954626fd..d25097c3fc1d 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
- if (src_fpu->fpstate_active)
+ if (src_fpu->fpstate_active && cpu_has_fpu)
fpu_copy(dst_fpu, src_fpu);
return 0;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 1e173f6285c7..d14e9ac3235a 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
write_cr0(cr0);
/* Flush out any pending x87 state: */
- asm volatile ("fninit");
+#ifdef CONFIG_MATH_EMULATION
+ if (!cpu_has_fpu)
+ fpstate_init_soft(&current->thread.fpu.state.soft);
+ else
+#endif
+ asm volatile ("fninit");
}
/*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 397688beed4b..c27cad726765 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
+ trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
smp_mb(); /* quirk */
clflush((void *)&current_thread_info()->flags);
@@ -419,6 +420,7 @@ static void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
+ trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 206996c1669d..71820c42b6ce 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
COPY(r15);
#endif /* CONFIG_X86_64 */
+#ifdef CONFIG_X86_32
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+ /* Kernel saves and restores only the CS segment register on signals,
+ * which is the bare minimum needed to allow mixed 32/64-bit code.
+ * App's signal handler can save/restore other segments if needed. */
+ COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
#else /* !CONFIG_X86_32 */
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->cs, &sc->cs);
- put_user_ex(0, &sc->__pad2);
- put_user_ex(0, &sc->__pad1);
- put_user_ex(regs->ss, &sc->ss);
+ put_user_ex(0, &sc->gs);
+ put_user_ex(0, &sc->fs);
#endif /* CONFIG_X86_32 */
put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
regs->sp = (unsigned long)frame;
- /*
- * Set up the CS and SS registers to run signal handlers in
- * 64-bit mode, even if the handler happens to be interrupting
- * 32-bit or 16-bit code.
- *
- * SS is subtle. In 64-bit mode, we don't need any particular
- * SS descriptor, but we do need SS to be valid. It's possible
- * that the old SS is entirely bogus -- this can happen if the
- * signal we're trying to deliver is #GP or #SS caused by a bad
- * SS value.
- */
+ /* Set up the CS register to run signal handlers in 64-bit mode,
+ even if the handler happens to be interrupting 32-bit code. */
regs->cs = __USER_CS;
- regs->ss = __USER_DS;
return 0;
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 6273324186ac..0ccb53a9fcd9 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -28,11 +28,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
struct desc_struct *desc;
unsigned long base;
- seg &= ~7UL;
+ seg >>= 3;
mutex_lock(&child->mm->context.lock);
if (unlikely(!child->mm->context.ldt ||
- (seg >> 3) >= child->mm->context.ldt->size))
+ seg >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
desc = &child->mm->context.ldt->entries[seg];
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index dc0a84a6f309..9e8bf13572e6 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
if (iter.mtrr_disabled)
return mtrr_disabled_type();
+ /* not contained in any MTRRs. */
+ if (type == -1)
+ return mtrr_default_type(mtrr_state);
+
/*
* We just check one page, partially covered by MTRRs is
* impossible.
*/
WARN_ON(iter.partial_map);
- /* not contained in any MTRRs. */
- if (type == -1)
- return mtrr_default_type(mtrr_state);
-
return type;
}
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5ef2560075bf..8f0f6eca69da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
- kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+ adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
@@ -6327,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
static void process_smi(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
+ struct desc_ptr dt;
char buf[512];
u32 cr0;
@@ -6359,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
kvm_x86_ops->set_cr4(vcpu, 0);
+ /* Undocumented: IDT limit is set to zero on entry to SMM. */
+ dt.address = dt.size = 0;
+ kvm_x86_ops->set_idt(vcpu, &dt);
+
__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index f37e84ab49f3..3d8f2e421466 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -29,7 +29,6 @@
#include <asm/uaccess.h>
#include <asm/traps.h>
-#include <asm/desc.h>
#include <asm/user.h>
#include <asm/fpu/internal.h>
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
math_abort(FPU_info, SIGILL);
}
- code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+ code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
if (SEG_D_SIZE(code_descriptor)) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 9ccecb61a4fa..5e044d506b7a 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+ static struct desc_struct zero_desc;
+ struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ seg >>= 3;
+ mutex_lock(&current->mm->context.lock);
+ if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+ ret = current->mm->context.ldt->entries[seg];
+ mutex_unlock(&current->mm->context.lock);
+#endif
+ return ret;
+}
+
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
#define SEG_G_BIT(x) ((x).b & (1 << 23))
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99380f9..8300db71c2a6 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
#include <linux/stddef.h>
#include <asm/uaccess.h>
-#include <asm/desc.h>
#include "fpu_system.h"
#include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
addr->selector = PM_REG_(segment);
}
- descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+ descriptor = FPU_get_ldt_descriptor(addr->selector);
base_address = SEG_BASE_ADDR(descriptor);
address = base_address + offset;
limit = base_address
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index e88fda867a33..484145368a24 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -8,7 +8,7 @@ config XEN
select PARAVIRT_CLOCK
select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE)
- depends on X86_TSC
+ depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
config XEN_DOM0
def_bool y
depends on XEN && PCI_XEN && SWIOTLB_XEN
- depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+ depends on X86_IO_APIC && ACPI && PCI
config XEN_PVHVM
def_bool y
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7322755f337a..4b6e29ac0968 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o platform-pci-unplug.o \
- p2m.o
+ p2m.o apic.o
obj-$(CONFIG_EVENT_TRACING) += trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_XEN_DOM0) += apic.o vga.o
+obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c20fe29e65f4..2292721b1d10 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
#ifdef CONFIG_XEN_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
-void __init xen_init_apic(void);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
size_t size)
{
}
-static inline void __init xen_init_apic(void)
-{
-}
#endif
+void __init xen_init_apic(void);
+
#ifdef CONFIG_XEN_EFI
extern void xen_efi_init(void);
#else
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 12600bfffca9..e0057d035200 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Description:
* Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
- * the device driver based upon the combined capabilities of I/O
- * controller and storage device.
+ * the device driver based upon the capabilities of the I/O
+ * controller.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index a3da6770bc9e..b8efe36ce114 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
- struct scatterlist *assoc1;
- struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
unsigned int cryptlen = req->cryptlen;
struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
cryptlen += ivsize;
}
- if (sg_is_last(assoc))
- return -EINVAL;
-
- assoc1 = assoc + 1;
- if (sg_is_last(assoc1))
- return -EINVAL;
-
- assoc2 = assoc + 2;
- if (!sg_is_last(assoc2))
+ if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
- sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
- sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+ sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
- sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+ sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
- areq_ctx->headlen = assoc->length + assoc2->length;
- areq_ctx->trailen = assoc1->length;
+ areq_ctx->headlen = 8;
+ areq_ctx->trailen = 4;
areq_ctx->sg = dst;
areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
- struct scatterlist *assoc1;
- struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
struct page *srcp;
u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
cryptlen += ivsize;
}
- if (sg_is_last(assoc))
- return -EINVAL;
-
- assoc1 = assoc + 1;
- if (sg_is_last(assoc1))
- return -EINVAL;
-
- assoc2 = assoc + 2;
- if (!sg_is_last(assoc2))
+ if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
- sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
- sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+ sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
- sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+ sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
- areq_ctx->headlen = assoc->length + assoc2->length;
- areq_ctx->trailen = assoc1->length;
+ areq_ctx->headlen = 8;
+ areq_ctx->trailen = 4;
areq_ctx->sg = src;
areq_ctx->complete = authenc_esn_verify_ahash_done;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 815f75ef2411..2922f1f252d5 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <acpi/video.h>
ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
static bool backlight_notifier_registered;
static struct notifier_block backlight_nb;
+static struct work_struct backlight_notify_work;
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
{ },
};
+/* This uses a workqueue to avoid various locking ordering issues */
+static void acpi_video_backlight_notify_work(struct work_struct *work)
+{
+ if (acpi_video_get_backlight_type() != acpi_backlight_video)
+ acpi_video_unregister_backlight();
+}
+
static int acpi_video_backlight_notify(struct notifier_block *nb,
unsigned long val, void *bd)
{
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
/* A raw bl registering may change video -> native */
if (backlight->props.type == BACKLIGHT_RAW &&
- val == BACKLIGHT_REGISTERED &&
- acpi_video_get_backlight_type() != acpi_backlight_video)
- acpi_video_unregister_backlight();
+ val == BACKLIGHT_REGISTERED)
+ schedule_work(&backlight_notify_work);
return NOTIFY_OK;
}
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
+ INIT_WORK(&backlight_notify_work,
+ acpi_video_backlight_notify_work);
backlight_nb.notifier_call = acpi_video_backlight_notify;
backlight_nb.priority = 0;
if (backlight_register_notifier(&backlight_nb) == 0)
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index ce1e3a885981..14b7305d2ba0 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
{
/* See brcm_sata_readreg() comments */
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
}
+#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
brcm_sata_phys_enable(priv);
return ahci_platform_resume(dev);
}
+#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index db5d9f79a247..19bcb80b2031 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
* RETURNS:
* Block address read from @tf.
*/
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
- if (!dev || tf->flags & ATA_TFLAG_LBA) {
+ if (tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
return 0;
}
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
- unsigned int err_mask;
-
- if (!ata_id_has_sense_reporting(dev->id))
- return;
-
- if (ata_id_sense_reporting_enabled(dev->id))
- return;
-
- err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
- if (err_mask) {
- ata_dev_dbg(dev,
- "failed to enable Sense Data Reporting, Emask 0x%x\n",
- err_mask);
- }
-}
-
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
dev->devslp_timing[i] = sata_setting[j];
}
}
- ata_dev_config_sense_reporting(dev);
+
dev->cdb_len = 16;
}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7465031a893c..cb0508af1459 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
- if (ata_id_has_ncq_autosense(dev->id))
- tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
@@ -1630,70 +1628,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
}
/**
- * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- * @dfl_sense_key: default sense key to use
- *
- * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- * SENSE. This function is EH helper.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * encoded sense data on success, 0 on failure or if sense data
- * is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
- struct scsi_cmnd *cmd)
-{
- struct ata_device *dev = qc->dev;
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- if (!cmd)
- return 0;
-
- DPRINTK("ATA request sense\n");
- ata_dev_warn(dev, "request sense\n");
- if (!ata_id_sense_reporting_enabled(dev->id)) {
- ata_dev_warn(qc->dev, "sense data reporting disabled\n");
- return 0;
- }
- ata_tf_init(dev, &tf);
-
- tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
- tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
- tf.command = ATA_CMD_REQ_SENSE_DATA;
- tf.protocol = ATA_PROT_NODATA;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- /*
- * ACS-4 states:
- * The device may set the SENSE DATA AVAILABLE bit to one in the
- * STATUS field and clear the ERROR bit to zero in the STATUS field
- * to indicate that the command returned completion without an error
- * and the sense data described in table 306 is available.
- *
- * IOW the 'ATA_SENSE' bit might not be set even though valid
- * sense data is available.
- * So check for both.
- */
- if ((tf.command & ATA_SENSE) ||
- tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
- ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
- tf.lbah, tf.lbam, tf.lbal);
- } else {
- ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
- tf.command, err_mask);
- }
- return err_mask;
-}
-
-/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if (qc->result_tf.auxiliary) {
- char sense_key, asc, ascq;
-
- sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
- asc = (qc->result_tf.auxiliary >> 8) & 0xff;
- ascq = qc->result_tf.auxiliary & 0xff;
- ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
- sense_key, asc, ascq);
- ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
- ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- }
-
ehc->i.err_mask &= ~AC_ERR_DEV;
}
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
return ATA_EH_RESET;
}
- /*
- * Sense data reporting does not work if the
- * device fault bit is set.
- */
- if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
- tmp = ata_eh_request_sense(qc, qc->scsicmd);
- if (tmp)
- qc->err_mask |= tmp;
- else
- ata_scsi_set_sense_information(qc->scsicmd, tf);
- } else {
- ata_dev_warn(qc->dev, "sense data available but port frozen\n");
- }
- }
-
- /* Set by NCQ autosense or request sense above */
- if (qc->flags & ATA_QCFLAG_SENSE_VALID)
- return 0;
-
if (stat & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
- ATA_SENSE | ATA_ERR)) {
+ ATA_ERR)) {
if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
- ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+ ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
- res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : "");
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 641a61a59e89..0d7f0da3a269 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
- if (!cmd)
- return;
-
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
-{
- u64 information;
-
- if (!cmd)
- return;
-
- information = ata_tf_read_block(tf, NULL);
- scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc);
} else {
- if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- cmd->result = SAM_STAT_CHECK_CONDITION;
- } else if (!need_sense) {
+ if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
/* TODO: decide which descriptor format to use
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a998a175f9f1..f840ca18a7c0 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
- struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 3a18a8a719b4..fab504fd9cfd 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
readl(mmio + PDC_SDRAM_CONTROL);
/* Turn on for ECC */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* ECC initiliazation. */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
void *buf;
VPRINTK("Start ECC initialization\n");
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 81751a49d8bf..56486d92c4e7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (!blk)
return -ENOMEM;
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
- if (!present) {
- kfree(blk);
- return -ENOMEM;
+ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present),
+ GFP_KERNEL);
+ if (!present) {
+ kfree(blk);
+ return -ENOMEM;
+ }
+
+ memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+ * sizeof(*present));
+ } else {
+ present = rbnode->cache_present;
}
/* insert the register value in the correct place in the rbnode block */
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d94529d5c8e9..bc67a93aa4f4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
obj_request_done_set(obj_request);
}
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p\n", __func__, obj_request);
+
+ if (obj_request_img_data_test(obj_request))
+ rbd_osd_copyup_callback(obj_request);
+ else
+ obj_request_done_set(obj_request);
+}
+
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
struct ceph_msg *msg)
{
@@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
rbd_osd_discard_callback(obj_request);
break;
case CEPH_OSD_OP_CALL:
+ rbd_osd_call_callback(obj_request);
+ break;
case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_WATCH:
rbd_osd_trivial_callback(obj_request);
@@ -2530,13 +2543,15 @@ out_unwind:
}
static void
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
struct rbd_device *rbd_dev;
struct page **pages;
u32 page_count;
+ dout("%s: obj %p\n", __func__, obj_request);
+
rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
obj_request->type == OBJ_REQUEST_NODATA);
rbd_assert(obj_request_img_data_test(obj_request));
@@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
if (!obj_request->result)
obj_request->xferred = obj_request->length;
- /* Finish up with the normal image object callback */
-
- rbd_img_obj_callback(obj_request);
+ obj_request_done_set(obj_request);
}
static void
@@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
/* All set, send it off. */
- orig_request->callback = rbd_img_obj_copyup_callback;
osdc = &rbd_dev->rbd_client->client->osdc;
img_result = rbd_obj_request_submit(osdc, orig_request);
if (!img_result)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index ced96777b677..954c0029fb3b 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
return;
}
- if (work_pending(&blkif->persistent_purge_work)) {
- pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
+ if (work_busy(&blkif->persistent_purge_work)) {
+ pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
return;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed35d80c..7a8a73f1fc04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
static int blkfront_setup_indirect(struct blkfront_info *info);
+static int blkfront_gather_backend_features(struct blkfront_info *info);
static int get_id_from_freelist(struct blkfront_info *info)
{
@@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
- indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
- list_add(&indirect_page->lru, &info->indirect_pages);
+ if (!info->feature_persistent) {
+ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+ list_add(&indirect_page->lru, &info->indirect_pages);
+ }
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
}
@@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
info->shadow_free = info->ring.req_prod_pvt;
info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
- rc = blkfront_setup_indirect(info);
+ rc = blkfront_gather_backend_features(info);
if (rc) {
kfree(copy);
return rc;
@@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
static int blkfront_setup_indirect(struct blkfront_info *info)
{
- unsigned int indirect_segments, segs;
+ unsigned int segs;
int err, i;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-max-indirect-segments", "%u", &indirect_segments,
- NULL);
- if (err) {
- info->max_indirect_segments = 0;
+ if (info->max_indirect_segments == 0)
segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
- } else {
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ else
segs = info->max_indirect_segments;
- }
err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
if (err)
@@ -1797,6 +1793,68 @@ out_of_memory:
}
/*
+ * Gather all backend feature-*
+ */
+static int blkfront_gather_backend_features(struct blkfront_info *info)
+{
+ int err;
+ int barrier, flush, discard, persistent;
+ unsigned int indirect_segments;
+
+ info->feature_flush = 0;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%d", &barrier,
+ NULL);
+
+ /*
+ * If there's no "feature-barrier" defined, then it means
+ * we're dealing with a very old backend which writes
+ * synchronously; nothing to do.
+ *
+ * If there are barriers, then we use flush.
+ */
+ if (!err && barrier)
+ info->feature_flush = REQ_FLUSH | REQ_FUA;
+ /*
+ * And if there is "feature-flush-cache" use that above
+ * barriers.
+ */
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-flush-cache", "%d", &flush,
+ NULL);
+
+ if (!err && flush)
+ info->feature_flush = REQ_FLUSH;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-discard", "%d", &discard,
+ NULL);
+
+ if (!err && discard)
+ blkfront_setup_discard(info);
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-persistent", "%u", &persistent,
+ NULL);
+ if (err)
+ info->feature_persistent = 0;
+ else
+ info->feature_persistent = persistent;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-max-indirect-segments", "%u", &indirect_segments,
+ NULL);
+ if (err)
+ info->max_indirect_segments = 0;
+ else
+ info->max_indirect_segments = min(indirect_segments,
+ xen_blkif_max_segments);
+
+ return blkfront_setup_indirect(info);
+}
+
+/*
* Invoked when the backend is finally 'ready' (and has told produced
* the details about the physical device - #sectors, size, etc).
*/
@@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned int physical_sector_size;
unsigned int binfo;
int err;
- int barrier, flush, discard, persistent;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
if (err != 1)
physical_sector_size = sector_size;
- info->feature_flush = 0;
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%d", &barrier,
- NULL);
-
- /*
- * If there's no "feature-barrier" defined, then it means
- * we're dealing with a very old backend which writes
- * synchronously; nothing to do.
- *
- * If there are barriers, then we use flush.
- */
- if (!err && barrier)
- info->feature_flush = REQ_FLUSH | REQ_FUA;
- /*
- * And if there is "feature-flush-cache" use that above
- * barriers.
- */
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-flush-cache", "%d", &flush,
- NULL);
-
- if (!err && flush)
- info->feature_flush = REQ_FLUSH;
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-discard", "%d", &discard,
- NULL);
-
- if (!err && discard)
- blkfront_setup_discard(info);
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-persistent", "%u", &persistent,
- NULL);
- if (err)
- info->feature_persistent = 0;
- else
- info->feature_persistent = persistent;
-
- err = blkfront_setup_indirect(info);
+ err = blkfront_gather_backend_features(info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fb655e8d1e3b..763301c7828c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
kfree(meta);
}
-static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
{
size_t num_pages;
- char pool_name[8];
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
goto out_error;
}
- snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
return -EINVAL;
disksize = PAGE_ALIGN(disksize);
- meta = zram_meta_alloc(zram->disk->first_minor, disksize);
+ meta = zram_meta_alloc(zram->disk->disk_name, disksize);
if (!meta)
return -ENOMEM;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index da8faf78536a..5643b65cee20 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
static void start_khwrngd(void)
{
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (hwrng_fill == ERR_PTR(-ENOMEM)) {
+ if (IS_ERR(hwrng_fill)) {
pr_err("hwrng_fill thread creation failed");
hwrng_fill = NULL;
}
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4b93a1efb36d..ac03ba49e9d1 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
div_hp, bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b8ff3c64cc45..c96de14036a0 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
}
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
sh_cmt_start(ch, FLAG_CLOCKSOURCE);
}
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 2d59038dec43..86c7eb66bdfb 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
BUG_ON(!imxtm->base);
imxtm->type = type;
+ imxtm->irq = irq;
_mxc_timer_init(imxtm);
}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index ae5b2bd3a978..fa3dd840a837 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
ret = exynos5250_cpufreq_init(exynos_info);
} else {
pr_err("%s: Unknown SoC type\n", __func__);
- return -ENODEV;
+ ret = -ENODEV;
}
if (ret)
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
if (exynos_info->set_freq == NULL) {
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
+ ret = -EINVAL;
goto err_vdd_arm;
}
arm_regulator = regulator_get(NULL, "vdd_arm");
if (IS_ERR(arm_regulator)) {
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
+ ret = -EINVAL;
goto err_vdd_arm;
}
@@ -225,7 +227,7 @@ err_cpufreq_reg:
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
- return -EINVAL;
+ return ret;
}
static struct platform_driver exynos_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index e362860c2b50..cd593c1f66dc 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -20,7 +20,7 @@
#include <asm/clock.h>
#include <asm/idle.h>
-#include <asm/mach-loongson/loongson.h>
+#include <asm/mach-loongson64/loongson.h>
static uint nowait;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index dae1e8099969..f9c78751989e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
- int sec4_sg_bytes;
+ int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7ba495f75370..402631a19a11 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
- BUG_ON(req->dst->length < nbytes);
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
flags, DMA_FROM_DEVICE))
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 08f8d5cd6334..becb738c897b 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
}
do {
- /*
- * to_process: the SHA256_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - to_process;
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len,
max_sg_len);
@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index aff0fe58eac0..b6e183d58d73 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
}
do {
- /*
- * to_process: the SHA512_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - leftover;
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
- leftover = total - to_process;
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len, max_sg_len);
@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 067402c7c2a9..df427c0e9e7b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -73,7 +73,8 @@
ICP_QAT_HW_CIPHER_KEY_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
-static atomic_t active_dev;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
struct qat_alg_buf {
uint32_t len;
@@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { {
int qat_algs_register(void)
{
- if (atomic_add_return(1, &active_dev) == 1) {
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs == 1) {
int i;
for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1289,21 +1293,25 @@ int qat_algs_register(void)
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
}
- return 0;
+ mutex_unlock(&algs_lock);
+ return ret;
}
int qat_algs_unregister(void)
{
- if (atomic_sub_return(1, &active_dev) == 0)
- return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
- return 0;
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (--active_devs == 0)
+ ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ mutex_unlock(&algs_lock);
+ return ret;
}
int qat_algs_init(void)
{
- atomic_set(&active_dev, 0);
crypto_get_default_rng();
return 0;
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4a4cce15f25d..3ff284c8e3d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
if (IS_ERR(ch))
return NULL;
+
+ dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
+ ch->device->privatecnt++;
+
return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3515b381c131..711d8ad74f11 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
*/
for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *csi = &mci->csrows[row];
+ struct csrow_info *csi = mci->csrows[row];
/*
* Get the configuration settings for this
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 080d5cc27055..eebdf2a33bfe 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
if (status) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- kfree(palmas_usb->edev->name);
return status;
}
@@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->id_irq, status);
- kfree(palmas_usb->edev->name);
return status;
}
}
@@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->vbus_irq, status);
- kfree(palmas_usb->edev->name);
return status;
}
}
@@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
return 0;
}
-static int palmas_usb_remove(struct platform_device *pdev)
-{
- struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
-
- kfree(palmas_usb->edev->name);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int palmas_usb_suspend(struct device *dev)
{
@@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = {
static struct platform_driver palmas_usb_driver = {
.probe = palmas_usb_probe,
- .remove = palmas_usb_remove,
.driver = {
.name = "palmas-usb",
.of_match_table = of_palmas_match_tbl,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 76157ab9faf3..43b57b02d050 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
return -EINVAL;
}
-static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
{
- unsigned int id = EXTCON_NONE;
+ unsigned int id = -EINVAL;
int i = 0;
- if (edev->max_supported == 0)
- return -EINVAL;
-
- /* Find the the number of extcon cable */
+ /* Find the id of extcon cable */
while (extcon_name[i]) {
if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
id = i;
break;
}
+ i++;
}
- if (id == EXTCON_NONE)
+ return id;
+}
+
+static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+{
+ unsigned int id;
+
+ if (edev->max_supported == 0)
return -EINVAL;
+ /* Find the the number of extcon cable */
+ id = find_cable_id_by_name(edev, name);
+ if (id < 0)
+ return id;
+
return find_cable_index_by_id(edev, id);
}
@@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev,
struct extcon_cable *cable = container_of(attr, struct extcon_cable,
attr_state);
+ int i = cable->cable_index;
+
return sprintf(buf, "%d\n",
extcon_get_cable_state_(cable->edev,
- cable->cable_index));
+ cable->edev->supported_cable[i]));
}
/**
@@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
spin_lock_irqsave(&edev->lock, flags);
if (edev->state != ((edev->state & ~mask) | (state & mask))) {
+ u32 old_state;
+
if (check_mutually_exclusive(edev, (edev->state & ~mask) |
(state & mask))) {
spin_unlock_irqrestore(&edev->lock, flags);
return -EPERM;
}
- for (index = 0; index < edev->max_supported; index++) {
- if (is_extcon_changed(edev->state, state, index, &attached))
- raw_notifier_call_chain(&edev->nh[index], attached, edev);
- }
-
+ old_state = edev->state;
edev->state &= ~mask;
edev->state |= state & mask;
+ for (index = 0; index < edev->max_supported; index++) {
+ if (is_extcon_changed(old_state, edev->state, index,
+ &attached))
+ raw_notifier_call_chain(&edev->nh[index],
+ attached, edev);
+ }
+
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (prop_buf) {
@@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
*/
int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
{
- return extcon_get_cable_state_(edev, find_cable_index_by_name
- (edev, cable_name));
+ unsigned int id;
+
+ id = find_cable_id_by_name(edev, cable_name);
+ if (id < 0)
+ return id;
+
+ return extcon_get_cable_state_(edev, id);
}
EXPORT_SYMBOL_GPL(extcon_get_cable_state);
@@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
int extcon_set_cable_state(struct extcon_dev *edev,
const char *cable_name, bool cable_state)
{
- return extcon_set_cable_state_(edev, find_cable_index_by_name
- (edev, cable_name), cable_state);
+ unsigned int id;
+
+ id = find_cable_id_by_name(edev, cable_name);
+ if (id < 0)
+ return id;
+
+ return extcon_set_cable_state_(edev, id, cable_state);
}
EXPORT_SYMBOL_GPL(extcon_set_cable_state);
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 87add3fdce52..e41594510b97 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
}
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 31b00f91cfcd..f7b49d5ce4b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1130,6 +1130,9 @@ struct amdgpu_gfx {
uint32_t me_feature_version;
uint32_t ce_feature_version;
uint32_t pfp_feature_version;
+ uint32_t rlc_feature_version;
+ uint32_t mec_feature_version;
+ uint32_t mec2_feature_version;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1639,6 +1642,7 @@ struct amdgpu_sdma {
/* SDMA firmware */
const struct firmware *fw;
uint32_t fw_version;
+ uint32_t feature_version;
struct amdgpu_ring ring;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 9736892bcdf9..3bfe67de8349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_INFO_FW_GFX_RLC:
fw_info.ver = adev->gfx.rlc_fw_version;
- fw_info.feature = 0;
+ fw_info.feature = adev->gfx.rlc_feature_version;
break;
case AMDGPU_INFO_FW_GFX_MEC:
- if (info->query_fw.index == 0)
+ if (info->query_fw.index == 0) {
fw_info.ver = adev->gfx.mec_fw_version;
- else if (info->query_fw.index == 1)
+ fw_info.feature = adev->gfx.mec_feature_version;
+ } else if (info->query_fw.index == 1) {
fw_info.ver = adev->gfx.mec2_fw_version;
- else
+ fw_info.feature = adev->gfx.mec2_feature_version;
+ } else
return -EINVAL;
- fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_SMC:
fw_info.ver = adev->pm.fw_version;
@@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (info->query_fw.index >= 2)
return -EINVAL;
fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
- fw_info.feature = 0;
+ fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5efa21c2..f5c22556ec2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
unsigned height_in_mb = ALIGN(height / 16, 2);
unsigned fs_in_mb = width_in_mb * height_in_mb;
- unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+ unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
image_size = width * height;
image_size += image_size / 2;
@@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
min_dpb_size = image_size * num_dpb_buffer;
+ min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
+ * 16 * num_dpb_buffer + 52 * 1024;
break;
default:
@@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
+ buf_sizes[0x4] = min_ctx_size;
return 0;
}
@@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
return -EINVAL;
}
+ } else if (cmd == 0x206) {
+ if ((end - start) < ctx->buf_sizes[4]) {
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+ (unsigned)(end - start),
+ ctx->buf_sizes[4]);
+ return -EINVAL;
+ }
} else if ((cmd != 0x100) && (cmd != 0x204)) {
DRM_ERROR("invalid UVD command %X!\n", cmd);
return -EINVAL;
@@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_uvd_cs_ctx ctx = {};
unsigned buf_sizes[] = {
[0x00000000] = 2048,
- [0x00000001] = 32 * 1024 * 1024,
- [0x00000002] = 2048 * 1152 * 3,
+ [0x00000001] = 0xFFFFFFFF,
+ [0x00000002] = 0xFFFFFFFF,
[0x00000003] = 2048,
+ [0x00000004] = 0xFFFFFFFF,
};
struct amdgpu_ib *ib = &parser->ibs[ib_idx];
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ab83cc1ca4cc..15df46c93f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
fw_data = (const __le32 *)
(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2db6ab0a543d..0d8bf2cb1956 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3080,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(
+ mec_hdr->ucode_feature_version);
gfx_v7_0_cp_compute_enable(adev, false);
@@ -3102,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version = le32_to_cpu(
+ mec2_hdr->ucode_feature_version);
/* MEC2 */
fw_data = (const __le32 *)
@@ -4066,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(
+ hdr->ucode_feature_version);
gfx_v7_0_rlc_stop(adev);
@@ -5122,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle)
dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
RREG32(mmCP_HPD_EOP_CONTROL));
- for (queue = 0; queue < 8; i++) {
+ for (queue = 0; queue < 8; queue++) {
cik_srbm_select(adev, me, pipe, queue, 0);
dev_info(adev->dev, " queue: %d\n", queue);
dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 9e1d4ddbf475..20e2cfd521d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
DRM_DEBUG("\n");
@@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version = le32_to_cpu(
+ cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version = le32_to_cpu(
+ cp_hdr->ucode_feature_version);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
@@ -1983,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 2;
switch (adev->pdev->revision) {
case 0xc4:
@@ -1991,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
case 0xcc:
/* B10 */
adev->gfx.config.max_cu_per_sh = 8;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc5:
case 0x81:
@@ -2000,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
case 0xcd:
/* B8 */
adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc6:
case 0xca:
case 0xce:
/* B6 */
adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc7:
case 0x87:
@@ -2015,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
default:
/* B4 */
adev->gfx.config.max_cu_per_sh = 4;
- adev->gfx.config.max_backends_per_se = 1;
break;
}
@@ -2275,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
- adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2361,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
- adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
- adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
- adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
- adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
gfx_v8_0_cp_gfx_enable(adev, false);
@@ -2622,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
- adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
@@ -2641,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
- adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
@@ -3125,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
- 0x7FFFF << 2);
+ AMDGPU_DOORBELL_MEC_RING7 << 2);
}
tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index d7895885fe0c..a988dfb1d394 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
int err, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->sdma[i].fw);
if (err)
goto out;
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+ adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
fw_data = (const __le32 *)
(adev->sdma[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 7bb37b93993f..2b86569b18d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
int err, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->sdma[i].fw);
if (err)
goto out;
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+ adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
fw_data = (const __le32 *)
(adev->sdma[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 6fad1f9648f3..ef6182bc8e5e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index aac212297b49..9dcc7280e572 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
funcs = connector->helper_private;
- new_encoder = funcs->best_encoder(connector);
+
+ if (funcs->atomic_best_encoder)
+ new_encoder = funcs->atomic_best_encoder(connector,
+ connector_state);
+ else
+ new_encoder = funcs->best_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,13 +234,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
}
+ if (WARN_ON(!connector_state->crtc))
+ return -EINVAL;
+
connector_state->best_encoder = new_encoder;
- if (connector_state->crtc) {
- idx = drm_crtc_index(connector_state->crtc);
+ idx = drm_crtc_index(connector_state->crtc);
- crtc_state = state->crtc_states[idx];
- crtc_state->mode_changed = true;
- }
+ crtc_state = state->crtc_states[idx];
+ crtc_state->mode_changed = true;
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
connector->base.id,
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6425b8..eb603f1defc2 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
from an EDID retrieval */
if (port->connector) {
mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+ list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
+ return;
}
drm_dp_port_teardown_pdt(port, port->pdt);
@@ -1294,7 +1295,6 @@ retry:
goto retry;
}
DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
- WARN(1, "fail\n");
return -EIO;
}
@@ -2660,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_connector *connector;
+ struct drm_dp_mst_port *port;
/*
* Not a regular list traverse as we have to drop the destroy
@@ -2669,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
*/
for (;;) {
mutex_lock(&mgr->destroy_connector_lock);
- connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
- if (!connector) {
+ port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
+ if (!port) {
mutex_unlock(&mgr->destroy_connector_lock);
break;
}
- list_del(&connector->destroy_list);
+ list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
- mgr->cbs->destroy_connector(mgr, connector);
+ mgr->cbs->destroy_connector(mgr, port->connector);
+
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+ if (!port->input && port->vcpi.vcpi > 0)
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ kfree(port);
}
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f9cc68fbd2a3..b50fa0afd907 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
static void store_vblank(struct drm_device *dev, int crtc,
- unsigned vblank_count_inc,
+ u32 vblank_count_inc,
struct timeval *t_vblank)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 842d6b8dc3c4..2a652359af64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
- pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = exynos_drm_ippdrv_register(ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8040ed2a831f..f1c6b76c127f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 99e286489031..4a00990e4ae4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
{
struct hdmi_context *hdata = ctx_from_connector(connector);
struct edid *edid;
+ int ret;
if (!hdata->ddc_adpt)
return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return ret;
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index cae98db33062..4706b56902b4 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* handling VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val |= MXR_INT_CLEAR_VSYNC;
+ val &= ~MXR_INT_STATUS_VSYNC;
+
/* interlace scan need to check shadow register */
if (ctx->interlace) {
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
out:
/* clear interrupts */
- if (~val & MXR_INT_EN_VSYNC) {
- /* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_EN_VSYNC;
- val |= MXR_INT_CLEAR_VSYNC;
- }
mixer_reg_write(res, MXR_INT_STATUS, val);
spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
}
/* enable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
- MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
return 0;
}
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
+ if (!mixer_ctx->powered) {
+ mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+ return;
+ }
+
/* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+ if (ctx->int_en & MXR_INT_EN_VSYNC)
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index fe1599d75f14..424228be79ae 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -606,8 +606,6 @@ static void
tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
uint8_t *buf, size_t size)
{
- buf[PB(0)] = tda998x_cksum(buf, size);
-
reg_clear(priv, REG_DIP_IF_FLAGS, bit);
reg_write_range(priv, addr, buf, size);
reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
buf[PB(4)] = p->audio_frame[4];
buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+ buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
+
tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
sizeof(buf));
}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7ed8033aae60..8e35e0d013df 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
- int ret;
- int i;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int ret, i;
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
return ret;
/* Point of no return */
-
- /*
- * FIXME: The proper sequence here will eventually be:
- *
- * drm_atomic_helper_swap_state(dev, state)
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- * drm_atomic_helper_commit_planes(dev, state);
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- * drm_atomic_helper_wait_for_vblanks(dev, state);
- * drm_atomic_helper_cleanup_planes(dev, state);
- * drm_atomic_state_free(state);
- *
- * once we have full atomic modeset. For now, just manually update
- * plane states to avoid clobbering good states with dummy states
- * while nuclear pageflipping.
- */
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
-
- if (!plane)
- continue;
-
- plane->state->state = state;
- swap(state->plane_states[i], plane->state);
- plane->state->state = NULL;
- }
+ drm_atomic_helper_swap_state(dev, state);
/* swap crtc_scaler_state */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc) {
- continue;
- }
-
- to_intel_crtc(crtc)->config->scaler_state =
- to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
if (INTEL_INFO(dev)->gen >= 9)
skl_detach_scalers(to_intel_crtc(crtc));
+
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
- drm_atomic_helper_commit_planes(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 30e0f54ba19d..87476ff181dd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11826,7 +11826,9 @@ encoder_retry:
goto encoder_retry;
}
- pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+ /* Dithering seems to not pass-through bits correctly when it should, so
+ * only enable it on 6bpc panels. */
+ pipe_config->dither = pipe_config->pipe_bpp == 6*3;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
@@ -12624,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
modeset_update_crtc_power_domains(state);
- drm_atomic_helper_commit_planes(dev, state);
-
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc->state) || !crtc->state->enable)
+ if (!needs_modeset(crtc->state) || !crtc->state->enable) {
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
continue;
+ }
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
- intel_crtc_enable_planes(crtc);
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
/* FIXME: add subpixel order */
@@ -12891,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
return 0;
}
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->primary->state);
-
- return plane_state->visible;
-}
-
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_atomic_state *state = NULL;
struct intel_crtc_state *pipe_config;
- bool primary_plane_was_visible;
int ret;
BUG_ON(!set);
@@ -12943,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
intel_update_pipe_size(to_intel_crtc(set->crtc));
- primary_plane_was_visible = primary_plane_visible(set->crtc);
-
ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
- if (ret == 0 &&
- pipe_config->base.enable &&
- pipe_config->base.planes_changed &&
- !needs_modeset(&pipe_config->base)) {
- struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
-
- /*
- * We need to make sure the primary plane is re-enabled if it
- * has previously been turned off.
- */
- if (ret == 0 && !primary_plane_was_visible &&
- primary_plane_visible(set->crtc)) {
- WARN_ON(!intel_crtc->active);
- intel_post_enable_primary(set->crtc);
- }
-
- /*
- * In the fastboot case this may be our only check of the
- * state after boot. It would be better to only do it on
- * the first update, but we don't have a nice way of doing that
- * (and really, set_config isn't used much for high freq page
- * flipping, so increasing its cost here shouldn't be a big
- * deal).
- */
- if (i915.fastboot && ret == 0)
- intel_modeset_check_state(set->crtc->dev);
- }
-
if (ret) {
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
@@ -13305,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
*/
if (IS_BROADWELL(dev))
intel_crtc->atomic.wait_vblank = true;
+
+ if (crtc_state)
+ intel_crtc->atomic.post_enable_primary = true;
}
/*
@@ -13317,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
if (!state->visible || !fb)
intel_crtc->atomic.disable_ips = true;
+ if (!state->visible && old_state->visible &&
+ crtc_state && !needs_modeset(&crtc_state->base))
+ intel_crtc->atomic.pre_disable_primary = true;
+
intel_crtc->atomic.fb_bits |=
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -15034,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane_state *plane_state;
memset(crtc->config, 0, sizeof(*crtc->config));
+ crtc->config->base.crtc = &crtc->base;
crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6e8faa253792..1df0e1fe235f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = {
static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
-static const int chv_rates[] = { 162000, 202500, 210000, 216000,
- 243000, 270000, 324000, 405000,
- 420000, 432000, 540000 };
static const int default_rates[] = { 162000, 270000, 540000 };
/**
@@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
+static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+{
+ /* WaDisableHBR2:skl */
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ return false;
+
+ if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+ (INTEL_INFO(dev)->gen >= 9))
+ return true;
+ else
+ return false;
+}
+
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
if (IS_SKYLAKE(dev)) {
*source_rates = skl_rates;
return ARRAY_SIZE(skl_rates);
- } else if (IS_CHERRYVIEW(dev)) {
- *source_rates = chv_rates;
- return ARRAY_SIZE(chv_rates);
}
*source_rates = default_rates;
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
- /* WaDisableHBR2:skl */
- return (DP_LINK_BW_2_7 >> 3) + 1;
- else if (INTEL_INFO(dev)->gen >= 8 ||
- (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (intel_dp_source_supports_hbr2(dev))
return (DP_LINK_BW_5_4 >> 3) + 1;
else
return (DP_LINK_BW_2_7 >> 3) + 1;
@@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
}
}
- /* Training Pattern 3 support, both source and sink */
+ /* Training Pattern 3 support, Intel platforms that support HBR2 alone
+ * have support for TP3 hence that check is used along with dpcd check
+ * to ensure TP3 can be enabled.
+ * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
+ * supported but still not enabled.
+ */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
- (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
+ intel_dp_source_supports_hbr2(dev)) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 6e4cc5334f47..600afdbef8c9 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+
+ return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+}
+
static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
+ .atomic_best_encoder = intel_mst_atomic_best_encoder,
.best_encoder = intel_mst_best_encoder,
};
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b74ffae5f5a..7f2161a1ff5d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
+
+ ctx_obj->dirty = true;
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 52c22b026005..e10f9644140f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -166,30 +166,14 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
}
static int
-gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
-{
- struct nvkm_object *obj = (void *)chan;
- struct gk104_fifo_priv *priv = (void *)obj->engine;
-
- nv_wr32(priv, 0x002634, chan->base.chid);
- if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
- nv_error(priv, "channel %d [%s] kick timeout\n",
- chan->base.chid, nvkm_client_name(chan));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct nvkm_object *object)
{
struct nvkm_bar *bar = nvkm_bar(parent);
+ struct gk104_fifo_priv *priv = (void *)parent->engine;
struct gk104_fifo_base *base = (void *)parent->parent;
struct gk104_fifo_chan *chan = (void *)parent;
u32 addr;
- int ret;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
@@ -204,9 +188,13 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return -EINVAL;
}
- ret = gk104_fifo_chan_kick(chan);
- if (ret && suspend)
- return ret;
+ nv_wr32(priv, 0x002634, chan->base.chid);
+ if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+ nv_error(priv, "channel %d [%s] kick timeout\n",
+ chan->base.chid, nvkm_client_name(chan));
+ if (suspend)
+ return -EBUSY;
+ }
if (addr) {
nv_wo32(base, addr + 0x00, 0x00000000);
@@ -331,7 +319,6 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
gk104_fifo_runlist_update(priv, chan->engine);
}
- gk104_fifo_chan_kick(chan);
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
return nvkm_fifo_channel_fini(&chan->base, suspend);
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1162bfa464f3..171d3e43c30c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ /* we can race here at startup, some boards seem to trigger
+ * hotplug irqs when they shouldn't. */
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
mutex_lock(&mode_config->mutex);
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..97ad3bcb99a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 3511bbaba505..e3c63640df73 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -462,12 +462,15 @@ out:
static void hidinput_cleanup_battery(struct hid_device *dev)
{
+ const struct power_supply_desc *psy_desc;
+
if (!dev->battery)
return;
+ psy_desc = dev->battery->desc;
power_supply_unregister(dev->battery);
- kfree(dev->battery->desc->name);
- kfree(dev->battery->desc);
+ kfree(psy_desc->name);
+ kfree(psy_desc);
dev->battery = NULL;
}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 94167310e15a..b905d501e752 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
for (p = drvdata->rdesc;
p <= drvdata->rdesc + drvdata->rsize - 4;) {
if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
- p[3] < sizeof(params)) {
+ p[3] < ARRAY_SIZE(params)) {
v = params[p[3]];
put_unaligned(cpu_to_le32(v), (s32 *)p);
p += 4;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 44958d79d598..01b937e63cf3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1284,6 +1284,39 @@ fail_register_pen_input:
return error;
}
+/*
+ * Not all devices report physical dimensions from HID.
+ * Compute the default from hardcoded logical dimension
+ * and resolution before driver overwrites them.
+ */
+static void wacom_set_default_phy(struct wacom_features *features)
+{
+ if (features->x_resolution) {
+ features->x_phy = (features->x_max * 100) /
+ features->x_resolution;
+ features->y_phy = (features->y_max * 100) /
+ features->y_resolution;
+ }
+}
+
+static void wacom_calculate_res(struct wacom_features *features)
+{
+ /* set unit to "100th of a mm" for devices not reported by HID */
+ if (!features->unit) {
+ features->unit = 0x11;
+ features->unitExpo = -3;
+ }
+
+ features->x_resolution = wacom_calc_hid_res(features->x_max,
+ features->x_phy,
+ features->unit,
+ features->unitExpo);
+ features->y_resolution = wacom_calc_hid_res(features->y_max,
+ features->y_phy,
+ features->unit,
+ features->unitExpo);
+}
+
static void wacom_wireless_work(struct work_struct *work)
{
struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1341,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work)
if (wacom_wac1->features.type != INTUOSHT &&
wacom_wac1->features.type != BAMBOO_PT)
wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
+ wacom_set_default_phy(&wacom_wac1->features);
+ wacom_calculate_res(&wacom_wac1->features);
snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
wacom_wac1->features.name);
snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1359,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_wac2->features =
*((struct wacom_features *)id->driver_data);
wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+ wacom_set_default_phy(&wacom_wac2->features);
wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+ wacom_calculate_res(&wacom_wac2->features);
snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
"%s (WL) Finger",wacom_wac2->features.name);
snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1407,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work)
}
}
-/*
- * Not all devices report physical dimensions from HID.
- * Compute the default from hardcoded logical dimension
- * and resolution before driver overwrites them.
- */
-static void wacom_set_default_phy(struct wacom_features *features)
-{
- if (features->x_resolution) {
- features->x_phy = (features->x_max * 100) /
- features->x_resolution;
- features->y_phy = (features->y_max * 100) /
- features->y_resolution;
- }
-}
-
-static void wacom_calculate_res(struct wacom_features *features)
-{
- /* set unit to "100th of a mm" for devices not reported by HID */
- if (!features->unit) {
- features->unit = 0x11;
- features->unitExpo = -3;
- }
-
- features->x_resolution = wacom_calc_hid_res(features->x_max,
- features->x_phy,
- features->unit,
- features->unitExpo);
- features->y_resolution = wacom_calc_hid_res(features->y_max,
- features->y_phy,
- features->unit,
- features->unitExpo);
-}
-
static size_t wacom_compute_pktlen(struct hid_device *hdev)
{
struct hid_report_enum *report_enum;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 37c16afe007a..c8487894b312 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+ {
+ /*
+ * CPU fan speed going up and down on Dell Studio XPS 8100
+ * for unknown reasons.
+ */
+ .ident = "Dell Studio XPS 8100",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
+ },
+ },
+ { }
+};
+
/*
* Probe for the presence of a supported laptop.
*/
@@ -940,7 +955,8 @@ static int __init i8k_probe(void)
/*
* Get DMI information
*/
- if (!dmi_check_system(i8k_dmi_table)) {
+ if (!dmi_check_system(i8k_dmi_table) ||
+ dmi_check_system(i8k_blacklist_dmi_table)) {
if (!ignore_dmi && !force)
return -ENODEV;
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 9b55e673b67c..85d106fe3ce8 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = {
{ .compatible = "gmt,g763" },
{ },
};
+MODULE_DEVICE_TABLE(of, g762_dt_match);
/*
* Grab clock (a required property), enable it, get (fixed) clock frequency
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 6153df735e82..08ff89d222e5 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
{"nct7904", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, nct7904_id);
static struct i2c_driver nct7904_driver = {
.class = I2C_CLASS_HWMON,
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index af162b4c7a6d..025686d41640 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iface);
- dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, "
+ dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, "
"regs_base@%p\n", iface->regs_base);
return 0;
@@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init);
module_exit(i2c_bfin_twi_exit);
MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
-MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver");
+MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d1c22e3fdd14..fc9bf7f30e35 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap)
u32 reg;
reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+ /* enable test mode */
reg |= OMAP_I2C_SYSTEST_ST_EN;
+ /* select SDA/SCL IO mode */
+ reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT;
+ /* set SCL to high-impedance state (reset value is 0) */
+ reg |= OMAP_I2C_SYSTEST_SCL_O;
+ /* set SDA to high-impedance state (reset value is 0) */
+ reg |= OMAP_I2C_SYSTEST_SDA_O;
omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
}
@@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap)
u32 reg;
reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+ /* restore reset values */
reg &= ~OMAP_I2C_SYSTEST_ST_EN;
+ reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK;
+ reg &= ~OMAP_I2C_SYSTEST_SCL_O;
+ reg &= ~OMAP_I2C_SYSTEST_SDA_O;
omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e6d4935161e4..c83e4d13cfc5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
+ bri->set_scl(adap, val);
+ ndelay(RECOVERY_NDELAY);
+
/*
* By this time SCL is high, as we need to give 9 falling-rising edges
*/
@@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
{
- adap->bus_recovery_info->set_scl(adap, 1);
return i2c_generic_recovery(adap);
}
EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
@@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data)
struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
{
struct device *dev;
+ struct i2c_client *client;
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_node_match);
+ dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
if (!dev)
return NULL;
- return i2c_verify_client(dev);
+ client = i2c_verify_client(dev);
+ if (!client)
+ put_device(dev);
+
+ return client;
}
EXPORT_SYMBOL(of_find_i2c_device_by_node);
@@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node);
struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
{
struct device *dev;
+ struct i2c_adapter *adapter;
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_node_match);
+ dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
if (!dev)
return NULL;
- return i2c_verify_adapter(dev);
+ adapter = i2c_verify_adapter(dev);
+ if (!adapter)
+ put_device(dev);
+
+ return adapter;
}
EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
#else
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 822374654609..1da449614779 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count > attr->size)
- return -EFBIG;
-
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count > attr->size)
- return -EFBIG;
-
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c7aab48f07cd..92d518382a9f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
printk(KERN_ERR MOD
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe));
- ret = -EINVAL;
+ wc->status = IB_WC_FATAL_ERR;
}
}
out:
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 27b6a3ce18ca..891797ad76bc 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
if (n_buttons[i] < 1)
continue;
- if (n_buttons[i] > 6) {
+ if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) {
printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]);
err = -EINVAL;
goto err_unreg_devs;
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 097d7216d98e..c6dc644aa580 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
* convert it to descriptor.
*/
if (!button->gpiod && gpio_is_valid(button->gpio)) {
- unsigned flags = 0;
+ unsigned flags = GPIOF_IN;
if (button->active_low)
flags |= GPIOF_ACTIVE_LOW;
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 10e140af5aac..1ac898db303a 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver);
MODULE_DESCRIPTION("axp20x Power Button");
MODULE_AUTHOR("Carlo Caione <[email protected]>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fc17b9592f54..10c4e3d462f1 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
if (pdata && pdata->coexist)
return true;
- if (of_find_node_by_name(node, "codec")) {
+ node = of_find_node_by_name(node, "codec");
+ if (node) {
of_node_put(node);
return true;
}
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 113d6f1516a5..4d246861d692 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -20,6 +20,7 @@
#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/libps2.h>
+#include <linux/dmi.h>
#include "psmouse.h"
#include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
6-byte ALPS packet */
+#define ALPS_DELL 0x100 /* device is a Dell laptop */
#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
return;
}
- /* Non interleaved V2 dualpoint has separate stick button bits */
+ /* Dell non interleaved V2 dualpoint has separate stick button bits */
if (priv->proto_version == ALPS_PROTO_V2 &&
- priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
+ priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
left |= packet[0] & 1;
right |= packet[0] & 2;
middle |= packet[0] & 4;
@@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->byte0 = protocol->byte0;
priv->mask0 = protocol->mask0;
priv->flags = protocol->flags;
+ if (dmi_name_in_vendors("Dell"))
+ priv->flags |= ALPS_DELL;
priv->x_max = 2000;
priv->y_max = 1400;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 22b9ca901f4e..2955f1d0ca6c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
unsigned char packet_type = packet[3] & 0x03;
+ unsigned int ic_version;
bool sanity_check;
if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
return PACKET_TRACKPOINT;
+ /* This represents the version of IC body. */
+ ic_version = (etd->fw_version & 0x0f0000) >> 16;
+
/*
* Sanity check based on the constant bits of a packet.
* The constant bits change depending on the value of
- * the hardware flag 'crc_enabled' but are the same for
- * every packet, regardless of the type.
+ * the hardware flag 'crc_enabled' and the version of
+ * the IC body, but are the same for every packet,
+ * regardless of the type.
*/
if (etd->crc_enabled)
sanity_check = ((packet[3] & 0x08) == 0x00);
+ else if (ic_version == 7 && etd->samples[1] == 0x2A)
+ sanity_check = ((packet[3] & 0x1c) == 0x10);
else
sanity_check = ((packet[0] & 0x0c) == 0x04 &&
(packet[3] & 0x1c) == 0x10);
@@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
+ * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
* Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1651,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse)
etd->capabilities[0], etd->capabilities[1],
etd->capabilities[2]);
+ if (etd->hw_version != 1) {
+ if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) {
+ psmouse_err(psmouse, "failed to query sample data\n");
+ goto init_fail;
+ }
+ psmouse_info(psmouse,
+ "Elan sample query result %02x, %02x, %02x\n",
+ etd->samples[0], etd->samples[1], etd->samples[2]);
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index f965d1569cc3..e1cbf409d9c8 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
unsigned char reg_26;
unsigned char debug;
unsigned char capabilities[3];
+ unsigned char samples[3];
bool paritycheck;
bool jumpy_cursor;
bool reports_pressure;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 658ee39e6569..f82060e778a2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1835,8 +1835,8 @@ static void free_gcr3_table(struct protection_domain *domain)
free_gcr3_tbl_level2(domain->gcr3_tbl);
else if (domain->glx == 1)
free_gcr3_tbl_level1(domain->gcr3_tbl);
- else if (domain->glx != 0)
- BUG();
+ else
+ BUG_ON(domain->glx != 0);
free_page((unsigned long)domain->gcr3_tbl);
}
@@ -3947,11 +3947,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (ret < 0)
return ret;
- ret = -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- goto out_free_parent;
-
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
if (get_irq_table(devid, true))
index = info->ioapic_pin;
@@ -3962,7 +3957,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
}
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
- kfree(data);
goto out_free_parent;
}
@@ -3974,17 +3968,18 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
goto out_free_data;
}
- if (i > 0) {
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- goto out_free_data;
- }
+ ret = -ENOMEM;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out_free_data;
+
irq_data->hwirq = (devid << 16) + i;
irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip;
irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
+
return 0;
out_free_data:
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index a24495eb4e26..5ef347a13cb5 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -154,7 +154,7 @@ bool amd_iommu_iotlb_sup __read_mostly = true;
u32 amd_iommu_max_pasid __read_mostly = ~0;
bool amd_iommu_v2_present __read_mostly;
-bool amd_iommu_pc_present __read_mostly;
+static bool amd_iommu_pc_present __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index f7b875bb70d4..1131664b918b 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -356,8 +356,8 @@ static void free_pasid_states(struct device_state *dev_state)
free_pasid_states_level2(dev_state->states);
else if (dev_state->pasid_levels == 1)
free_pasid_states_level1(dev_state->states);
- else if (dev_state->pasid_levels != 0)
- BUG();
+ else
+ BUG_ON(dev_state->pasid_levels != 0);
free_page((unsigned long)dev_state->states);
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index c9db04d4ef39..8757f8dfc4e5 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1068,7 +1068,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (intel_iommu_enabled)
iommu->iommu_dev = iommu_device_create(NULL, iommu,
intel_iommu_groups,
- iommu->name);
+ "%s", iommu->name);
return 0;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index abeedc9a78c2..2570f2a25dc4 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -41,7 +41,6 @@ struct pamu_isr_data {
static struct paace *ppaact;
static struct paace *spaact;
-static struct ome *omt __initdata;
/*
* Table for matching compatible strings, for device tree
@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
* SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
* string would be used.
*/
-static const struct of_device_id guts_device_ids[] __initconst = {
+static const struct of_device_id guts_device_ids[] = {
{ .compatible = "fsl,qoriq-device-config-1.0", },
{ .compatible = "fsl,qoriq-device-config-2.0", },
{}
@@ -599,7 +598,7 @@ found_cpu_node:
* Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them.
*/
-static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
+static void setup_qbman_paace(struct paace *ppaace, int paace_type)
{
switch (paace_type) {
case QMAN_PAACE:
@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
* this table to translate device transaction to appropriate corenet
* transaction.
*/
-static void __init setup_omt(struct ome *omt)
+static void setup_omt(struct ome *omt)
{
struct ome *ome;
@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
* Get the maximum number of PAACT table entries
* and subwindows supported by PAMU
*/
-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
+static void get_pamu_cap_values(unsigned long pamu_reg_base)
{
u32 pc_val;
@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
}
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
- phys_addr_t omt_phys)
+static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+ phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+ phys_addr_t omt_phys)
{
u32 *pc;
struct pamu_mmap_regs *pamu_regs;
@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
}
/* Enable all device LIODNS */
-static void __init setup_liodns(void)
+static void setup_liodns(void)
{
int i, len;
struct paace *ppaace;
@@ -846,7 +845,7 @@ struct ccsr_law {
/*
* Create a coherence subdomain for a given memory block.
*/
-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
{
struct device_node *np;
const __be32 *iprop;
@@ -988,7 +987,7 @@ error:
static const struct {
u32 svr;
u32 port_id;
-} port_id_map[] __initconst = {
+} port_id_map[] = {
{(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
{(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
{(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
@@ -1006,7 +1005,7 @@ static const struct {
#define SVR_SECURITY 0x80000 /* The Security (E) bit */
-static int __init fsl_pamu_probe(struct platform_device *pdev)
+static int fsl_pamu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
void __iomem *pamu_regs = NULL;
@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
int irq;
phys_addr_t ppaact_phys;
phys_addr_t spaact_phys;
+ struct ome *omt;
phys_addr_t omt_phys;
size_t mem_size = 0;
unsigned int order = 0;
@@ -1200,7 +1200,7 @@ error:
return ret;
}
-static struct platform_driver fsl_of_pamu_driver __initdata = {
+static struct platform_driver fsl_of_pamu_driver = {
.driver = {
.name = "fsl-of-pamu",
},
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0649b94f5958..63daf1ba04b7 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -364,7 +364,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
-/* domain represents a virtual machine, more than one devices
+/*
+ * Domain represents a virtual machine, more than one devices
* across iommus may be owned in one domain, e.g. kvm guest.
*/
#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
@@ -372,11 +373,21 @@ static int hw_pass_through = 1;
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
+#define for_each_domain_iommu(idx, domain) \
+ for (idx = 0; idx < g_num_of_iommus; idx++) \
+ if (domain->iommu_refcnt[idx])
+
struct dmar_domain {
- int id; /* domain id */
int nid; /* node id */
- DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
- /* bitmap of iommus this domain uses*/
+
+ unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
+ /* Refcount of devices per iommu */
+
+
+ u16 iommu_did[DMAR_UNITS_SUPPORTED];
+ /* Domain ids per IOMMU. Use u16 since
+ * domain ids are 16 bit wide according
+ * to VT-d spec, section 9.3 */
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
@@ -395,7 +406,6 @@ struct dmar_domain {
int iommu_superpage;/* Level of superpages supported:
0 == 4KiB (no superpages), 1 == 2MiB,
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
- spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
struct iommu_domain domain; /* generic domain data structure for
@@ -461,10 +471,11 @@ static long list_size;
static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev);
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
- struct device *dev);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+ struct device *dev);
+static void __dmar_remove_one_dev_info(struct device_domain_info *info);
+static void domain_context_clear(struct intel_iommu *iommu,
+ struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
@@ -564,6 +575,36 @@ __setup("intel_iommu=", intel_iommu_setup);
static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
+static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
+{
+ struct dmar_domain **domains;
+ int idx = did >> 8;
+
+ domains = iommu->domains[idx];
+ if (!domains)
+ return NULL;
+
+ return domains[did & 0xff];
+}
+
+static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
+ struct dmar_domain *domain)
+{
+ struct dmar_domain **domains;
+ int idx = did >> 8;
+
+ if (!iommu->domains[idx]) {
+ size_t size = 256 * sizeof(struct dmar_domain *);
+ iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
+ }
+
+ domains = iommu->domains[idx];
+ if (WARN_ON(!domains))
+ return;
+ else
+ domains[did & 0xff] = domain;
+}
+
static inline void *alloc_pgtable_page(int node)
{
struct page *page;
@@ -605,6 +646,11 @@ static inline int domain_type_is_vm(struct dmar_domain *domain)
return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
}
+static inline int domain_type_is_si(struct dmar_domain *domain)
+{
+ return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
+}
+
static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
{
return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
@@ -659,7 +705,9 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
/* si_domain and vm domain should not get here. */
BUG_ON(domain_type_is_vm_or_si(domain));
- iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+ for_each_domain_iommu(iommu_id, domain)
+ break;
+
if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
return NULL;
@@ -675,7 +723,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
domain->iommu_coherency = 1;
- for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
+ for_each_domain_iommu(i, domain) {
found = true;
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
@@ -755,6 +803,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
struct context_entry *context;
u64 *entry;
+ entry = &root->lo;
if (ecs_enabled(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
@@ -762,7 +811,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
}
devfn *= 2;
}
- entry = &root->lo;
if (*entry & 1)
context = phys_to_virt(*entry & VTD_PAGE_MASK);
else {
@@ -1162,9 +1210,9 @@ next:
/* We can't just free the pages because the IOMMU may still be walking
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
-struct page *domain_unmap(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
+static struct page *domain_unmap(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long last_pfn)
{
struct page *freelist = NULL;
@@ -1188,7 +1236,7 @@ struct page *domain_unmap(struct dmar_domain *domain,
return freelist;
}
-void dma_free_pagelist(struct page *freelist)
+static void dma_free_pagelist(struct page *freelist)
{
struct page *pg;
@@ -1356,24 +1404,23 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
bool found = false;
- unsigned long flags;
struct device_domain_info *info;
struct pci_dev *pdev;
+ assert_spin_locked(&device_domain_lock);
+
if (!ecap_dev_iotlb_support(iommu->ecap))
return NULL;
if (!iommu->qi)
return NULL;
- spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link)
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
found = true;
break;
}
- spin_unlock_irqrestore(&device_domain_lock, flags);
if (!found || !info->dev || !dev_is_pci(info->dev))
return NULL;
@@ -1430,11 +1477,14 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- unsigned long pfn, unsigned int pages, int ih, int map)
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
+ struct dmar_domain *domain,
+ unsigned long pfn, unsigned int pages,
+ int ih, int map)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
+ u16 did = domain->iommu_did[iommu->seq_id];
BUG_ON(pages == 0);
@@ -1458,7 +1508,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
* flush. However, device IOTLB doesn't need to be flushed in this case.
*/
if (!cap_caching_mode(iommu->cap) || !map)
- iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
+ iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
+ addr, mask);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1513,65 +1564,80 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
static int iommu_init_domains(struct intel_iommu *iommu)
{
- unsigned long ndomains;
- unsigned long nlongs;
+ u32 ndomains, nlongs;
+ size_t size;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("%s: Number of Domains supported <%ld>\n",
+ pr_debug("%s: Number of Domains supported <%d>\n",
iommu->name, ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
- /* TBD: there might be 64K domains,
- * consider other allocation for future chip
- */
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) {
pr_err("%s: Allocating domain id array failed\n",
iommu->name);
return -ENOMEM;
}
- iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
- GFP_KERNEL);
- if (!iommu->domains) {
+
+ size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
+ iommu->domains = kzalloc(size, GFP_KERNEL);
+
+ if (iommu->domains) {
+ size = 256 * sizeof(struct dmar_domain *);
+ iommu->domains[0] = kzalloc(size, GFP_KERNEL);
+ }
+
+ if (!iommu->domains || !iommu->domains[0]) {
pr_err("%s: Allocating domain array failed\n",
iommu->name);
kfree(iommu->domain_ids);
+ kfree(iommu->domains);
iommu->domain_ids = NULL;
+ iommu->domains = NULL;
return -ENOMEM;
}
+
+
/*
- * if Caching mode is set, then invalid translations are tagged
- * with domainid 0. Hence we need to pre-allocate it.
+ * If Caching mode is set, then invalid translations are tagged
+ * with domain-id 0, hence we need to pre-allocate it. We also
+ * use domain-id 0 as a marker for non-allocated domain-id, so
+ * make sure it is not used for a real domain.
*/
- if (cap_caching_mode(iommu->cap))
- set_bit(0, iommu->domain_ids);
+ set_bit(0, iommu->domain_ids);
+
return 0;
}
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- struct dmar_domain *domain;
- int i;
+ struct device_domain_info *info, *tmp;
+ unsigned long flags;
- if ((iommu->domains) && (iommu->domain_ids)) {
- for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
- /*
- * Domain id 0 is reserved for invalid translation
- * if hardware supports caching mode.
- */
- if (cap_caching_mode(iommu->cap) && i == 0)
- continue;
+ if (!iommu->domains || !iommu->domain_ids)
+ return;
- domain = iommu->domains[i];
- clear_bit(i, iommu->domain_ids);
- if (domain_detach_iommu(domain, iommu) == 0 &&
- !domain_type_is_vm(domain))
- domain_exit(domain);
- }
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
+ struct dmar_domain *domain;
+
+ if (info->iommu != iommu)
+ continue;
+
+ if (!info->dev || !info->domain)
+ continue;
+
+ domain = info->domain;
+
+ dmar_remove_one_dev_info(domain, info->dev);
+
+ if (!domain_type_is_vm_or_si(domain))
+ domain_exit(domain);
}
+ spin_unlock_irqrestore(&device_domain_lock, flags);
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
@@ -1580,6 +1646,11 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
if ((iommu->domains) && (iommu->domain_ids)) {
+ int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
+ int i;
+
+ for (i = 0; i < elems; i++)
+ kfree(iommu->domains[i]);
kfree(iommu->domains);
kfree(iommu->domain_ids);
iommu->domains = NULL;
@@ -1594,8 +1665,6 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
static struct dmar_domain *alloc_domain(int flags)
{
- /* domain id for virtual machine, it won't be set in context */
- static atomic_t vm_domid = ATOMIC_INIT(0);
struct dmar_domain *domain;
domain = alloc_domain_mem();
@@ -1605,111 +1674,64 @@ static struct dmar_domain *alloc_domain(int flags)
memset(domain, 0, sizeof(*domain));
domain->nid = -1;
domain->flags = flags;
- spin_lock_init(&domain->iommu_lock);
INIT_LIST_HEAD(&domain->devices);
- if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- domain->id = atomic_inc_return(&vm_domid);
return domain;
}
-static int __iommu_attach_domain(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- int num;
- unsigned long ndomains;
-
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num < ndomains) {
- set_bit(num, iommu->domain_ids);
- iommu->domains[num] = domain;
- } else {
- num = -ENOSPC;
- }
-
- return num;
-}
-
-static int iommu_attach_domain(struct dmar_domain *domain,
+/* Must be called with iommu->lock */
+static int domain_attach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- int num;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
- num = __iommu_attach_domain(domain, iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
- if (num < 0)
- pr_err("%s: No free domain ids\n", iommu->name);
-
- return num;
-}
-
-static int iommu_attach_vm_domain(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- int num;
unsigned long ndomains;
+ int num;
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains)
- if (iommu->domains[num] == domain)
- return num;
-
- return __iommu_attach_domain(domain, iommu);
-}
-
-static void iommu_detach_domain(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- unsigned long flags;
- int num, ndomains;
+ assert_spin_locked(&device_domain_lock);
+ assert_spin_locked(&iommu->lock);
- spin_lock_irqsave(&iommu->lock, flags);
- if (domain_type_is_vm_or_si(domain)) {
+ domain->iommu_refcnt[iommu->seq_id] += 1;
+ domain->iommu_count += 1;
+ if (domain->iommu_refcnt[iommu->seq_id] == 1) {
ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains) {
- if (iommu->domains[num] == domain) {
- clear_bit(num, iommu->domain_ids);
- iommu->domains[num] = NULL;
- break;
- }
+ num = find_first_zero_bit(iommu->domain_ids, ndomains);
+
+ if (num >= ndomains) {
+ pr_err("%s: No free domain ids\n", iommu->name);
+ domain->iommu_refcnt[iommu->seq_id] -= 1;
+ domain->iommu_count -= 1;
+ return -ENOSPC;
}
- } else {
- clear_bit(domain->id, iommu->domain_ids);
- iommu->domains[domain->id] = NULL;
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-static void domain_attach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- unsigned long flags;
+ set_bit(num, iommu->domain_ids);
+ set_iommu_domain(iommu, num, domain);
+
+ domain->iommu_did[iommu->seq_id] = num;
+ domain->nid = iommu->node;
- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
- domain->iommu_count++;
- if (domain->iommu_count == 1)
- domain->nid = iommu->node;
domain_update_iommu_cap(domain);
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
+ return 0;
}
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- unsigned long flags;
- int count = INT_MAX;
+ int num, count = INT_MAX;
+
+ assert_spin_locked(&device_domain_lock);
+ assert_spin_locked(&iommu->lock);
+
+ domain->iommu_refcnt[iommu->seq_id] -= 1;
+ count = --domain->iommu_count;
+ if (domain->iommu_refcnt[iommu->seq_id] == 0) {
+ num = domain->iommu_did[iommu->seq_id];
+ clear_bit(num, iommu->domain_ids);
+ set_iommu_domain(iommu, num, NULL);
- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
- count = --domain->iommu_count;
domain_update_iommu_cap(domain);
+ domain->iommu_did[iommu->seq_id] = 0;
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
return count;
}
@@ -1776,9 +1798,9 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
-static int domain_init(struct dmar_domain *domain, int guest_width)
+static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+ int guest_width)
{
- struct intel_iommu *iommu;
int adjust_width, agaw;
unsigned long sagaw;
@@ -1787,7 +1809,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
domain_reserve_special_ranges(domain);
/* calculate AGAW */
- iommu = domain_get_iommu(domain);
if (guest_width > cap_mgaw(iommu->cap))
guest_width = cap_mgaw(iommu->cap);
domain->gaw = guest_width;
@@ -1830,8 +1851,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain)
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
struct page *freelist = NULL;
/* Domain 0 is reserved, so dont process it */
@@ -1842,22 +1861,16 @@ static void domain_exit(struct dmar_domain *domain)
if (!intel_iommu_strict)
flush_unmaps_timeout(0);
- /* remove associated devices */
+ /* Remove associated devices and clear attached or cached domains */
+ rcu_read_lock();
domain_remove_dev_info(domain);
+ rcu_read_unlock();
/* destroy iovas */
put_iova_domain(&domain->iovad);
freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
- /* clear attached or cached domains */
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd)
- if (domain_type_is_vm(domain) ||
- test_bit(iommu->seq_id, domain->iommu_bmp))
- iommu_detach_domain(domain, iommu);
- rcu_read_unlock();
-
dma_free_pagelist(freelist);
free_domain_mem(domain);
@@ -1865,79 +1878,68 @@ static void domain_exit(struct dmar_domain *domain)
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
- u8 bus, u8 devfn, int translation)
+ u8 bus, u8 devfn)
{
+ u16 did = domain->iommu_did[iommu->seq_id];
+ int translation = CONTEXT_TT_MULTI_LEVEL;
+ struct device_domain_info *info = NULL;
struct context_entry *context;
unsigned long flags;
struct dma_pte *pgd;
- int id;
- int agaw;
- struct device_domain_info *info = NULL;
+ int ret, agaw;
+
+ WARN_ON(did == 0);
+
+ if (hw_pass_through && domain_type_is_si(domain))
+ translation = CONTEXT_TT_PASS_THROUGH;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd);
- BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
- translation != CONTEXT_TT_MULTI_LEVEL);
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&iommu->lock);
+
+ ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (!context)
- return -ENOMEM;
- spin_lock_irqsave(&iommu->lock, flags);
- if (context_present(context)) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return 0;
- }
+ goto out_unlock;
- context_clear_entry(context);
+ ret = 0;
+ if (context_present(context))
+ goto out_unlock;
- id = domain->id;
pgd = domain->pgd;
- if (domain_type_is_vm_or_si(domain)) {
- if (domain_type_is_vm(domain)) {
- id = iommu_attach_vm_domain(domain, iommu);
- if (id < 0) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- pr_err("%s: No free domain ids\n", iommu->name);
- return -EFAULT;
- }
- }
+ context_clear_entry(context);
+ context_set_domain_id(context, did);
- /* Skip top levels of page tables for
- * iommu which has less agaw than default.
- * Unnecessary for PT mode.
- */
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd)) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return -ENOMEM;
- }
- }
+ /*
+ * Skip top levels of page tables for iommu which has less agaw
+ * than default. Unnecessary for PT mode.
+ */
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
+ for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+ ret = -ENOMEM;
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd))
+ goto out_unlock;
}
- }
-
- context_set_domain_id(context, id);
- if (translation != CONTEXT_TT_PASS_THROUGH) {
info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
translation = info ? CONTEXT_TT_DEV_IOTLB :
CONTEXT_TT_MULTI_LEVEL;
- }
- /*
- * In pass through mode, AW must be programmed to indicate the largest
- * AGAW value supported by hardware. And ASR is ignored by hardware.
- */
- if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
- context_set_address_width(context, iommu->msagaw);
- else {
+
context_set_address_root(context, virt_to_phys(pgd));
context_set_address_width(context, iommu->agaw);
+ } else {
+ /*
+ * In pass through mode, AW must be programmed to
+ * indicate the largest AGAW value supported by
+ * hardware. And ASR is ignored by hardware.
+ */
+ context_set_address_width(context, iommu->msagaw);
}
context_set_translation_type(context, translation);
@@ -1956,14 +1958,17 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
iommu_enable_dev_iotlb(info);
- spin_unlock_irqrestore(&iommu->lock, flags);
- domain_attach_iommu(domain, iommu);
+ ret = 0;
+
+out_unlock:
+ spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
}
@@ -1971,7 +1976,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct domain_context_mapping_data {
struct dmar_domain *domain;
struct intel_iommu *iommu;
- int translation;
};
static int domain_context_mapping_cb(struct pci_dev *pdev,
@@ -1980,13 +1984,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev,
struct domain_context_mapping_data *data = opaque;
return domain_context_mapping_one(data->domain, data->iommu,
- PCI_BUS_NUM(alias), alias & 0xff,
- data->translation);
+ PCI_BUS_NUM(alias), alias & 0xff);
}
static int
-domain_context_mapping(struct dmar_domain *domain, struct device *dev,
- int translation)
+domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
@@ -1997,12 +1999,10 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev,
return -ENODEV;
if (!dev_is_pci(dev))
- return domain_context_mapping_one(domain, iommu, bus, devfn,
- translation);
+ return domain_context_mapping_one(domain, iommu, bus, devfn);
data.domain = domain;
data.iommu = iommu;
- data.translation = translation;
return pci_for_each_dma_alias(to_pci_dev(dev),
&domain_context_mapping_cb, &data);
@@ -2188,7 +2188,7 @@ static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long i
return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
}
-static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
if (!iommu)
return;
@@ -2214,21 +2214,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link) {
- unlink_domain_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- iommu_disable_dev_iotlb(info);
- iommu_detach_dev(info->iommu, info->bus, info->devfn);
-
- if (domain_type_is_vm(domain)) {
- iommu_detach_dependent_devices(info->iommu, info->dev);
- domain_detach_iommu(domain, info->iommu);
- }
-
- free_devinfo_mem(info);
- spin_lock_irqsave(&device_domain_lock, flags);
- }
+ list_for_each_entry_safe(info, tmp, &domain->devices, link)
+ __dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -2260,14 +2247,15 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
return NULL;
}
-static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
- int bus, int devfn,
- struct device *dev,
- struct dmar_domain *domain)
+static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
+ int bus, int devfn,
+ struct device *dev,
+ struct dmar_domain *domain)
{
struct dmar_domain *found = NULL;
struct device_domain_info *info;
unsigned long flags;
+ int ret;
info = alloc_devinfo_mem();
if (!info)
@@ -2282,12 +2270,16 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
spin_lock_irqsave(&device_domain_lock, flags);
if (dev)
found = find_domain(dev);
- else {
+
+ if (!found) {
struct device_domain_info *info2;
info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
- if (info2)
- found = info2->domain;
+ if (info2) {
+ found = info2->domain;
+ info2->dev = dev;
+ }
}
+
if (found) {
spin_unlock_irqrestore(&device_domain_lock, flags);
free_devinfo_mem(info);
@@ -2295,12 +2287,27 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
return found;
}
+ spin_lock(&iommu->lock);
+ ret = domain_attach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+
+ if (ret) {
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ return NULL;
+ }
+
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
if (dev)
dev->archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (dev && domain_context_mapping(domain, dev)) {
+ pr_err("Domain context map for %s failed\n", dev_name(dev));
+ dmar_remove_one_dev_info(domain, dev);
+ return NULL;
+ }
+
return domain;
}
@@ -2313,10 +2320,10 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
/* domain is initialized */
static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
{
+ struct device_domain_info *info = NULL;
struct dmar_domain *domain, *tmp;
struct intel_iommu *iommu;
- struct device_domain_info *info;
- u16 dma_alias;
+ u16 req_id, dma_alias;
unsigned long flags;
u8 bus, devfn;
@@ -2328,6 +2335,8 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
if (!iommu)
return NULL;
+ req_id = ((u16)bus << 8) | devfn;
+
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2352,21 +2361,15 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
domain = alloc_domain(0);
if (!domain)
return NULL;
- domain->id = iommu_attach_domain(domain, iommu);
- if (domain->id < 0) {
- free_domain_mem(domain);
- return NULL;
- }
- domain_attach_iommu(domain, iommu);
- if (domain_init(domain, gaw)) {
+ if (domain_init(domain, iommu, gaw)) {
domain_exit(domain);
return NULL;
}
/* register PCI DMA alias device */
- if (dev_is_pci(dev)) {
- tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff, NULL, domain);
+ if (req_id != dma_alias && dev_is_pci(dev)) {
+ tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
+ dma_alias & 0xff, NULL, domain);
if (!tmp || tmp != domain) {
domain_exit(domain);
@@ -2378,7 +2381,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
}
found_domain:
- tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+ tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
if (!tmp || tmp != domain) {
domain_exit(domain);
@@ -2406,8 +2409,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
return -ENOMEM;
}
- pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
- start, end, domain->id);
+ pr_debug("Mapping reserved region %llx-%llx\n", start, end);
/*
* RMRR range might have overlap with physical memory range,
* clear it first
@@ -2468,11 +2470,6 @@ static int iommu_prepare_identity_map(struct device *dev,
if (ret)
goto error;
- /* context entry init */
- ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
- if (ret)
- goto error;
-
return 0;
error:
@@ -2518,37 +2515,18 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
int nid, ret = 0;
- bool first = true;
si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
return -EFAULT;
- for_each_active_iommu(iommu, drhd) {
- ret = iommu_attach_domain(si_domain, iommu);
- if (ret < 0) {
- domain_exit(si_domain);
- return -EFAULT;
- } else if (first) {
- si_domain->id = ret;
- first = false;
- } else if (si_domain->id != ret) {
- domain_exit(si_domain);
- return -EFAULT;
- }
- domain_attach_iommu(si_domain, iommu);
- }
-
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
return -EFAULT;
}
- pr_debug("Identity mapping domain is domain %d\n",
- si_domain->id);
+ pr_debug("Identity mapping domain allocated\n");
if (hw)
return 0;
@@ -2582,28 +2560,20 @@ static int identity_mapping(struct device *dev)
return 0;
}
-static int domain_add_dev_info(struct dmar_domain *domain,
- struct device *dev, int translation)
+static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct dmar_domain *ndomain;
struct intel_iommu *iommu;
u8 bus, devfn;
- int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
- ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+ ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
if (ndomain != domain)
return -EBUSY;
- ret = domain_context_mapping(domain, dev, translation);
- if (ret) {
- domain_remove_one_dev_info(domain, dev);
- return ret;
- }
-
return 0;
}
@@ -2743,9 +2713,7 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
if (!iommu_should_identity_map(dev, 1))
return 0;
- ret = domain_add_dev_info(si_domain, dev,
- hw ? CONTEXT_TT_PASS_THROUGH :
- CONTEXT_TT_MULTI_LEVEL);
+ ret = domain_add_dev_info(si_domain, dev);
if (!ret)
pr_info("%s identity mapping for device %s\n",
hw ? "Hardware" : "Software", dev_name(dev));
@@ -2831,15 +2799,18 @@ static void intel_iommu_init_qi(struct intel_iommu *iommu)
}
static int copy_context_table(struct intel_iommu *iommu,
- struct root_entry *old_re,
+ struct root_entry __iomem *old_re,
struct context_entry **tbl,
int bus, bool ext)
{
- struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
+ struct context_entry __iomem *old_ce = NULL;
+ struct context_entry *new_ce = NULL, ce;
+ struct root_entry re;
phys_addr_t old_ce_phys;
tbl_idx = ext ? bus * 2 : bus;
+ memcpy_fromio(&re, old_re, sizeof(re));
for (devfn = 0; devfn < 256; devfn++) {
/* First calculate the correct index */
@@ -2859,9 +2830,9 @@ static int copy_context_table(struct intel_iommu *iommu,
ret = 0;
if (devfn < 0x80)
- old_ce_phys = root_entry_lctp(old_re);
+ old_ce_phys = root_entry_lctp(&re);
else
- old_ce_phys = root_entry_uctp(old_re);
+ old_ce_phys = root_entry_uctp(&re);
if (!old_ce_phys) {
if (ext && devfn == 0) {
@@ -2886,7 +2857,7 @@ static int copy_context_table(struct intel_iommu *iommu,
}
/* Now copy the context entry */
- ce = old_ce[idx];
+ memcpy_fromio(&ce, old_ce + idx, sizeof(ce));
if (!__context_present(&ce))
continue;
@@ -2930,8 +2901,8 @@ out:
static int copy_translation_tables(struct intel_iommu *iommu)
{
+ struct root_entry __iomem *old_rt;
struct context_entry **ctxt_tbls;
- struct root_entry *old_rt;
phys_addr_t old_rt_phys;
int ctxt_table_entries;
unsigned long flags;
@@ -3261,7 +3232,6 @@ static struct iova *intel_alloc_iova(struct device *dev,
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
{
struct dmar_domain *domain;
- int ret;
domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain) {
@@ -3270,16 +3240,6 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
return NULL;
}
- /* make sure context mapping is ok */
- if (unlikely(!domain_context_mapped(dev))) {
- ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
- if (ret) {
- pr_err("Domain context map for %s failed\n",
- dev_name(dev));
- return NULL;
- }
- }
-
return domain;
}
@@ -3315,7 +3275,7 @@ static int iommu_no_mapping(struct device *dev)
* 32 bit DMA is removed from si_domain and fall back
* to non-identity mapping.
*/
- domain_remove_one_dev_info(si_domain, dev);
+ dmar_remove_one_dev_info(si_domain, dev);
pr_info("32bit %s uses non-identity mapping\n",
dev_name(dev));
return 0;
@@ -3327,10 +3287,7 @@ static int iommu_no_mapping(struct device *dev)
*/
if (iommu_should_identity_map(dev, 0)) {
int ret;
- ret = domain_add_dev_info(si_domain, dev,
- hw_pass_through ?
- CONTEXT_TT_PASS_THROUGH :
- CONTEXT_TT_MULTI_LEVEL);
+ ret = domain_add_dev_info(si_domain, dev);
if (!ret) {
pr_info("64bit %s uses identity mapping\n",
dev_name(dev));
@@ -3391,7 +3348,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
+ iommu_flush_iotlb_psi(iommu, domain,
+ mm_to_dma_pfn(iova->pfn_lo),
+ size, 0, 1);
else
iommu_flush_write_buffer(iommu);
@@ -3442,7 +3401,7 @@ static void flush_unmaps(void)
/* On real hardware multiple invalidations are expensive */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id,
+ iommu_flush_iotlb_psi(iommu, domain,
iova->pfn_lo, iova_size(iova),
!deferred_flush[i].freelist[j], 0);
else {
@@ -3526,7 +3485,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
freelist = domain_unmap(domain, start_pfn, last_pfn);
if (intel_iommu_strict) {
- iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ iommu_flush_iotlb_psi(iommu, domain, start_pfn,
last_pfn - start_pfn + 1, !freelist, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
@@ -3684,7 +3643,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
+ iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
else
iommu_flush_write_buffer(iommu);
@@ -4161,13 +4120,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_enable_translation(iommu);
- if (si_domain) {
- ret = iommu_attach_domain(si_domain, iommu);
- if (ret < 0 || si_domain->id != ret)
- goto disable_iommu;
- domain_attach_iommu(si_domain, iommu);
- }
-
iommu_disable_protect_mem_regions(iommu);
return 0;
@@ -4329,11 +4281,9 @@ static int device_notifier(struct notifier_block *nb,
if (!domain)
return 0;
- down_read(&dmar_global_lock);
- domain_remove_one_dev_info(domain, dev);
+ dmar_remove_one_dev_info(domain, dev);
if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
domain_exit(domain);
- up_read(&dmar_global_lock);
return 0;
}
@@ -4390,7 +4340,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
- iommu_flush_iotlb_psi(iommu, si_domain->id,
+ iommu_flush_iotlb_psi(iommu, si_domain,
iova->pfn_lo, iova_size(iova),
!freelist, 0);
rcu_read_unlock();
@@ -4449,11 +4399,32 @@ static ssize_t intel_iommu_show_ecap(struct device *dev,
}
static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
+static ssize_t intel_iommu_show_ndoms(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct intel_iommu *iommu = dev_get_drvdata(dev);
+ return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
+}
+static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
+
+static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct intel_iommu *iommu = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
+ cap_ndoms(iommu->cap)));
+}
+static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
+
static struct attribute *intel_iommu_attrs[] = {
&dev_attr_version.attr,
&dev_attr_address.attr,
&dev_attr_cap.attr,
&dev_attr_ecap.attr,
+ &dev_attr_domains_supported.attr,
+ &dev_attr_domains_used.attr,
NULL,
};
@@ -4533,7 +4504,7 @@ int __init intel_iommu_init(void)
for_each_active_iommu(iommu, drhd)
iommu->iommu_dev = iommu_device_create(NULL, iommu,
intel_iommu_groups,
- iommu->name);
+ "%s", iommu->name);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
bus_register_notifier(&pci_bus_type, &device_nb);
@@ -4553,11 +4524,11 @@ out_free_dmar:
return ret;
}
-static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
struct intel_iommu *iommu = opaque;
- iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+ domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
return 0;
}
@@ -4567,63 +4538,50 @@ static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
* devices, unbinding the driver from any one of them will possibly leave
* the others unable to operate.
*/
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
- struct device *dev)
+static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
{
if (!iommu || !dev || !dev_is_pci(dev))
return;
- pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
+ pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
}
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev)
+static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{
- struct device_domain_info *info, *tmp;
struct intel_iommu *iommu;
unsigned long flags;
- bool found = false;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
+ assert_spin_locked(&device_domain_lock);
+
+ if (WARN_ON(!info))
return;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link) {
- if (info->iommu == iommu && info->bus == bus &&
- info->devfn == devfn) {
- unlink_domain_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu = info->iommu;
- iommu_disable_dev_iotlb(info);
- iommu_detach_dev(iommu, info->bus, info->devfn);
- iommu_detach_dependent_devices(iommu, dev);
- free_devinfo_mem(info);
+ if (info->dev) {
+ iommu_disable_dev_iotlb(info);
+ domain_context_clear(iommu, info->dev);
+ }
- spin_lock_irqsave(&device_domain_lock, flags);
+ unlink_domain_info(info);
- if (found)
- break;
- else
- continue;
- }
+ spin_lock_irqsave(&iommu->lock, flags);
+ domain_detach_iommu(info->domain, iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
- /* if there is no other devices under the same iommu
- * owned by this domain, clear this iommu in iommu_bmp
- * update iommu count and coherency
- */
- if (info->iommu == iommu)
- found = true;
- }
+ free_devinfo_mem(info);
+}
- spin_unlock_irqrestore(&device_domain_lock, flags);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
- if (found == 0) {
- domain_detach_iommu(domain, iommu);
- if (!domain_type_is_vm_or_si(domain))
- iommu_detach_domain(domain, iommu);
- }
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ __dmar_remove_one_dev_info(info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
}
static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4704,10 +4662,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
old_domain = find_domain(dev);
if (old_domain) {
- if (domain_type_is_vm_or_si(dmar_domain))
- domain_remove_one_dev_info(old_domain, dev);
- else
- domain_remove_dev_info(old_domain);
+ rcu_read_lock();
+ dmar_remove_one_dev_info(old_domain, dev);
+ rcu_read_unlock();
if (!domain_type_is_vm_or_si(old_domain) &&
list_empty(&old_domain->devices))
@@ -4747,13 +4704,13 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
- return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
+ return domain_add_dev_info(dmar_domain, dev);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- domain_remove_one_dev_info(to_dmar_domain(domain), dev);
+ dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
}
static int intel_iommu_map(struct iommu_domain *domain,
@@ -4802,12 +4759,11 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
struct intel_iommu *iommu;
unsigned long start_pfn, last_pfn;
unsigned int npages;
- int iommu_id, num, ndomains, level = 0;
+ int iommu_id, level = 0;
/* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */
- if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
- BUG();
+ BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -4819,19 +4775,11 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
npages = last_pfn - start_pfn + 1;
- for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
- iommu = g_iommus[iommu_id];
-
- /*
- * find bit position of dmar_domain
- */
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains) {
- if (iommu->domains[num] == dmar_domain)
- iommu_flush_iotlb_psi(iommu, num, start_pfn,
- npages, !freelist, 0);
- }
+ for_each_domain_iommu(iommu_id, dmar_domain) {
+ iommu = g_iommus[iommu_id];
+ iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+ start_pfn, npages, !freelist, 0);
}
dma_free_pagelist(freelist);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index f15692a410c7..9ec4e0d94ffd 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -384,7 +384,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
static int iommu_load_old_irte(struct intel_iommu *iommu)
{
- struct irte *old_ir_table;
+ struct irte __iomem *old_ir_table;
phys_addr_t irt_phys;
unsigned int i;
size_t size;
@@ -413,7 +413,7 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
return -ENOMEM;
/* Copy data over */
- memcpy(iommu->ir_table->base, old_ir_table, size);
+ memcpy_fromio(iommu->ir_table->base, old_ir_table, size);
__iommu_flush_cache(iommu, iommu->ir_table->base, size);
@@ -426,6 +426,8 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
bitmap_set(iommu->ir_table->bitmap, i, 1);
}
+ iounmap(old_ir_table);
+
return 0;
}
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 2d9993062ded..913455a5fd40 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -84,7 +84,7 @@ void set_irq_remapping_broken(void)
bool irq_remapping_cap(enum irq_remap_cap cap)
{
if (!remap_ops || disable_irq_post)
- return 0;
+ return false;
return (remap_ops->capability & (1 << cap));
}
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 15a2063812fa..e321fa517a45 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -106,8 +106,8 @@ static int __flush_iotlb(struct iommu_domain *domain)
#endif
list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
- if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
- BUG();
+
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
BUG_ON(!iommu_drvdata);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 43429ab62228..60ba238090d9 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -141,10 +141,12 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
struct iommu_ops *ops = NULL;
int idx = 0;
- if (dev_is_pci(dev)) {
- dev_err(dev, "IOMMU is currently not supported for PCI\n");
+ /*
+ * We can't do much for PCI devices without knowing how
+ * device IDs are wired up from the PCI bus to the IOMMU.
+ */
+ if (dev_is_pci(dev))
return NULL;
- }
/*
* We don't currently walk up the tree looking for a parent IOMMU.
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index f3d20a2039d2..0717aa96ce39 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <linux/debugfs.h>
#include <linux/platform_data/iommu-omap.h>
@@ -29,6 +30,59 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
return !obj->domain;
}
+#define pr_reg(name) \
+ do { \
+ ssize_t bytes; \
+ const char *str = "%20s: %08x\n"; \
+ const int maxcol = 32; \
+ bytes = snprintf(p, maxcol, str, __stringify(name), \
+ iommu_read_reg(obj, MMU_##name)); \
+ p += bytes; \
+ len -= bytes; \
+ if (len < maxcol) \
+ goto out; \
+ } while (0)
+
+static ssize_t
+omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+ char *p = buf;
+
+ pr_reg(REVISION);
+ pr_reg(IRQSTATUS);
+ pr_reg(IRQENABLE);
+ pr_reg(WALKING_ST);
+ pr_reg(CNTL);
+ pr_reg(FAULT_AD);
+ pr_reg(TTB);
+ pr_reg(LOCK);
+ pr_reg(LD_TLB);
+ pr_reg(CAM);
+ pr_reg(RAM);
+ pr_reg(GFLUSH);
+ pr_reg(FLUSH_ENTRY);
+ pr_reg(READ_CAM);
+ pr_reg(READ_RAM);
+ pr_reg(EMU_FAULT_AD);
+out:
+ return p - buf;
+}
+
+static ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf,
+ ssize_t bytes)
+{
+ if (!obj || !buf)
+ return -EINVAL;
+
+ pm_runtime_get_sync(obj->dev);
+
+ bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
+
+ pm_runtime_put_sync(obj->dev);
+
+ return bytes;
+}
+
static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -55,34 +109,71 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
return bytes;
}
-static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static int
+__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
{
- struct omap_iommu *obj = file->private_data;
- char *p, *buf;
- ssize_t bytes, rest;
+ int i;
+ struct iotlb_lock saved;
+ struct cr_regs tmp;
+ struct cr_regs *p = crs;
+
+ pm_runtime_get_sync(obj->dev);
+ iotlb_lock_get(obj, &saved);
+
+ for_each_iotlb_cr(obj, num, i, tmp) {
+ if (!iotlb_cr_valid(&tmp))
+ continue;
+ *p++ = tmp;
+ }
+
+ iotlb_lock_set(obj, &saved);
+ pm_runtime_put_sync(obj->dev);
+
+ return p - crs;
+}
+
+static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
+ struct seq_file *s)
+{
+ return seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
+ (cr->cam & MMU_CAM_P) ? 1 : 0);
+}
+
+static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s)
+{
+ int i, num;
+ struct cr_regs *cr;
+
+ num = obj->nr_tlb_entries;
+
+ cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
+ if (!cr)
+ return 0;
+
+ num = __dump_tlb_entries(obj, cr, num);
+ for (i = 0; i < num; i++)
+ iotlb_dump_cr(obj, cr + i, s);
+ kfree(cr);
+
+ return 0;
+}
+
+static int debug_read_tlb(struct seq_file *s, void *data)
+{
+ struct omap_iommu *obj = s->private;
if (is_omap_iommu_detached(obj))
return -EPERM;
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- p = buf;
-
mutex_lock(&iommu_debug_lock);
- p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
- p += sprintf(p, "-----------------------------------------\n");
- rest = count - (p - buf);
- p += omap_dump_tlb_entries(obj, p, rest);
-
- bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ seq_printf(s, "%8s %8s\n", "cam:", "ram:");
+ seq_puts(s, "-----------------------------------------\n");
+ omap_dump_tlb_entries(obj, s);
mutex_unlock(&iommu_debug_lock);
- kfree(buf);
- return bytes;
+ return 0;
}
static void dump_ioptable(struct seq_file *s)
@@ -154,10 +245,10 @@ static int debug_read_pagetable(struct seq_file *s, void *data)
.open = simple_open, \
.read = debug_read_##name, \
.llseek = generic_file_llseek, \
- };
+ }
DEBUG_FOPS_RO(regs);
-DEBUG_FOPS_RO(tlb);
+DEBUG_SEQ_FOPS_RO(tlb);
DEBUG_SEQ_FOPS_RO(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index a22c33d6a486..36d0033c2ccb 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -12,7 +12,6 @@
*/
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -38,11 +37,6 @@
#define to_iommu(dev) \
((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
-#define for_each_iotlb_cr(obj, n, __i, cr) \
- for (__i = 0; \
- (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
- __i++)
-
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
@@ -72,11 +66,6 @@ struct omap_iommu_domain {
#define MMU_LOCK_VICT(x) \
((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
-struct iotlb_lock {
- short base;
- short vict;
-};
-
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
@@ -213,14 +202,6 @@ static void iommu_disable(struct omap_iommu *obj)
/*
* TLB operations
*/
-static inline int iotlb_cr_valid(struct cr_regs *cr)
-{
- if (!cr)
- return -EINVAL;
-
- return cr->cam & MMU_CAM_V;
-}
-
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
@@ -260,7 +241,7 @@ static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
return status;
}
-static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
{
u32 val;
@@ -268,10 +249,9 @@ static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
l->base = MMU_LOCK_BASE(val);
l->vict = MMU_LOCK_VICT(val);
-
}
-static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
{
u32 val;
@@ -297,7 +277,7 @@ static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
}
/* only used in iotlb iteration for-loop */
-static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{
struct cr_regs cr;
struct iotlb_lock l;
@@ -468,129 +448,6 @@ static void flush_iotlb_all(struct omap_iommu *obj)
pm_runtime_put_sync(obj->dev);
}
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-
-#define pr_reg(name) \
- do { \
- ssize_t bytes; \
- const char *str = "%20s: %08x\n"; \
- const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
- iommu_read_reg(obj, MMU_##name)); \
- p += bytes; \
- len -= bytes; \
- if (len < maxcol) \
- goto out; \
- } while (0)
-
-static ssize_t
-omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
-{
- char *p = buf;
-
- pr_reg(REVISION);
- pr_reg(IRQSTATUS);
- pr_reg(IRQENABLE);
- pr_reg(WALKING_ST);
- pr_reg(CNTL);
- pr_reg(FAULT_AD);
- pr_reg(TTB);
- pr_reg(LOCK);
- pr_reg(LD_TLB);
- pr_reg(CAM);
- pr_reg(RAM);
- pr_reg(GFLUSH);
- pr_reg(FLUSH_ENTRY);
- pr_reg(READ_CAM);
- pr_reg(READ_RAM);
- pr_reg(EMU_FAULT_AD);
-out:
- return p - buf;
-}
-
-ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
- if (!obj || !buf)
- return -EINVAL;
-
- pm_runtime_get_sync(obj->dev);
-
- bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
-
- pm_runtime_put_sync(obj->dev);
-
- return bytes;
-}
-
-static int
-__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
-{
- int i;
- struct iotlb_lock saved;
- struct cr_regs tmp;
- struct cr_regs *p = crs;
-
- pm_runtime_get_sync(obj->dev);
- iotlb_lock_get(obj, &saved);
-
- for_each_iotlb_cr(obj, num, i, tmp) {
- if (!iotlb_cr_valid(&tmp))
- continue;
- *p++ = tmp;
- }
-
- iotlb_lock_set(obj, &saved);
- pm_runtime_put_sync(obj->dev);
-
- return p - crs;
-}
-
-/**
- * iotlb_dump_cr - Dump an iommu tlb entry into buf
- * @obj: target iommu
- * @cr: contents of cam and ram register
- * @buf: output buffer
- **/
-static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
- char *buf)
-{
- char *p = buf;
-
- /* FIXME: Need more detail analysis of cam/ram */
- p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
- (cr->cam & MMU_CAM_P) ? 1 : 0);
-
- return p - buf;
-}
-
-/**
- * omap_dump_tlb_entries - dump cr arrays to given buffer
- * @obj: target iommu
- * @buf: output buffer
- **/
-size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
- int i, num;
- struct cr_regs *cr;
- char *p = buf;
-
- num = bytes / sizeof(*cr);
- num = min(obj->nr_tlb_entries, num);
-
- cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
- if (!cr)
- return 0;
-
- num = __dump_tlb_entries(obj, cr, num);
- for (i = 0; i < num; i++)
- p += iotlb_dump_cr(obj, cr + i, p);
- kfree(cr);
-
- return p - buf;
-}
-
-#endif /* CONFIG_OMAP_IOMMU_DEBUG */
-
/*
* H/W pagetable operations
*/
@@ -930,14 +787,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
if (!iopgd_is_table(*iopgd)) {
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
- obj->name, errs, da, iopgd, *iopgd);
+ obj->name, errs, da, iopgd, *iopgd);
return IRQ_NONE;
}
iopte = iopte_offset(iopgd, da);
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
- obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
+ obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
return IRQ_NONE;
}
@@ -963,9 +820,8 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
struct device *dev;
struct omap_iommu *obj;
- dev = driver_find_device(&omap_iommu_driver.driver, NULL,
- (void *)name,
- device_match_by_alias);
+ dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
+ device_match_by_alias);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -1089,7 +945,6 @@ static const struct of_device_id omap_iommu_of_match[] = {
{ .compatible = "ti,dra7-iommu" },
{},
};
-MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
static struct platform_driver omap_iommu_driver = {
.probe = omap_iommu_probe,
@@ -1121,7 +976,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1148,7 +1003,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
}
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
- size_t size)
+ size_t size)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1199,7 +1054,7 @@ out:
}
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
- struct device *dev)
+ struct device *dev)
{
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
@@ -1220,7 +1075,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
@@ -1237,16 +1092,12 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
return NULL;
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
- if (!omap_domain) {
- pr_err("kzalloc failed\n");
+ if (!omap_domain)
goto out;
- }
omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
- if (!omap_domain->pgtable) {
- pr_err("kzalloc failed\n");
+ if (!omap_domain->pgtable)
goto fail_nomem;
- }
/*
* should never fail, but please keep this around to ensure
@@ -1285,7 +1136,7 @@ static void omap_iommu_domain_free(struct iommu_domain *domain)
}
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t da)
+ dma_addr_t da)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1302,7 +1153,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
else
dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
- (unsigned long long)da);
+ (unsigned long long)da);
} else {
if (iopgd_is_section(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
@@ -1310,7 +1161,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
else
dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
- (unsigned long long)da);
+ (unsigned long long)da);
}
return ret;
@@ -1405,20 +1256,5 @@ static int __init omap_iommu_init(void)
return platform_driver_register(&omap_iommu_driver);
}
-/* must be ready before omap3isp is probed */
subsys_initcall(omap_iommu_init);
-
-static void __exit omap_iommu_exit(void)
-{
- kmem_cache_destroy(iopte_cachep);
-
- platform_driver_unregister(&omap_iommu_driver);
-
- omap_iommu_debugfs_exit();
-}
-module_exit(omap_iommu_exit);
-
-MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
-MODULE_ALIAS("platform:omap-iommu");
-MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
-MODULE_LICENSE("GPL v2");
+/* must be ready before omap3isp is probed */
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index d736630df3c8..a656df2f9e03 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -13,16 +13,18 @@
#ifndef _OMAP_IOMMU_H
#define _OMAP_IOMMU_H
+#include <linux/bitops.h>
+
+#define for_each_iotlb_cr(obj, n, __i, cr) \
+ for (__i = 0; \
+ (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
+ __i++)
+
struct iotlb_entry {
u32 da;
u32 pa;
u32 pgsz, prsvd, valid;
- union {
- u16 ap;
- struct {
- u32 endian, elsz, mixed;
- };
- };
+ u32 endian, elsz, mixed;
};
struct omap_iommu {
@@ -49,20 +51,13 @@ struct omap_iommu {
};
struct cr_regs {
- union {
- struct {
- u16 cam_l;
- u16 cam_h;
- };
- u32 cam;
- };
- union {
- struct {
- u16 ram_l;
- u16 ram_h;
- };
- u32 ram;
- };
+ u32 cam;
+ u32 ram;
+};
+
+struct iotlb_lock {
+ short base;
+ short vict;
};
/**
@@ -103,11 +98,11 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
* MMU Register bit definitions
*/
/* IRQSTATUS & IRQENABLE */
-#define MMU_IRQ_MULTIHITFAULT (1 << 4)
-#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
-#define MMU_IRQ_EMUMISS (1 << 2)
-#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
-#define MMU_IRQ_TLBMISS (1 << 0)
+#define MMU_IRQ_MULTIHITFAULT BIT(4)
+#define MMU_IRQ_TABLEWALKFAULT BIT(3)
+#define MMU_IRQ_EMUMISS BIT(2)
+#define MMU_IRQ_TRANSLATIONFAULT BIT(1)
+#define MMU_IRQ_TLBMISS BIT(0)
#define __MMU_IRQ_FAULT \
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
@@ -119,16 +114,16 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/* MMU_CNTL */
#define MMU_CNTL_SHIFT 1
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
-#define MMU_CNTL_EML_TLB (1 << 3)
-#define MMU_CNTL_TWL_EN (1 << 2)
-#define MMU_CNTL_MMU_EN (1 << 1)
+#define MMU_CNTL_EML_TLB BIT(3)
+#define MMU_CNTL_TWL_EN BIT(2)
+#define MMU_CNTL_MMU_EN BIT(1)
/* CAM */
#define MMU_CAM_VATAG_SHIFT 12
#define MMU_CAM_VATAG_MASK \
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
-#define MMU_CAM_P (1 << 3)
-#define MMU_CAM_V (1 << 2)
+#define MMU_CAM_P BIT(3)
+#define MMU_CAM_V BIT(2)
#define MMU_CAM_PGSZ_MASK 3
#define MMU_CAM_PGSZ_1M (0 << 0)
#define MMU_CAM_PGSZ_64K (1 << 0)
@@ -141,9 +136,9 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
#define MMU_RAM_ENDIAN_SHIFT 9
-#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_MASK BIT(MMU_RAM_ENDIAN_SHIFT)
#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)
-#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_BIG BIT(MMU_RAM_ENDIAN_SHIFT)
#define MMU_RAM_ELSZ_SHIFT 7
#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT)
@@ -152,7 +147,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_RAM_ELSZ_32 (2 << MMU_RAM_ELSZ_SHIFT)
#define MMU_RAM_ELSZ_NONE (3 << MMU_RAM_ELSZ_SHIFT)
#define MMU_RAM_MIXED_SHIFT 6
-#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
+#define MMU_RAM_MIXED_MASK BIT(MMU_RAM_MIXED_SHIFT)
#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
@@ -190,12 +185,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* global functions
*/
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-extern ssize_t
-omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
-extern size_t
-omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n);
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l);
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l);
+
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
void omap_iommu_debugfs_init(void);
void omap_iommu_debugfs_exit(void);
@@ -222,4 +217,12 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
__raw_writel(val, obj->regbase + offs);
}
+static inline int iotlb_cr_valid(struct cr_regs *cr)
+{
+ if (!cr)
+ return -EINVAL;
+
+ return cr->cam & MMU_CAM_V;
+}
+
#endif /* _OMAP_IOMMU_H */
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index f891683e3f05..01a315227bf0 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -10,25 +10,30 @@
* published by the Free Software Foundation.
*/
+#ifndef _OMAP_IOPGTABLE_H
+#define _OMAP_IOPGTABLE_H
+
+#include <linux/bitops.h>
+
/*
* "L2 table" address mask and size definitions.
*/
#define IOPGD_SHIFT 20
-#define IOPGD_SIZE (1UL << IOPGD_SHIFT)
+#define IOPGD_SIZE BIT(IOPGD_SHIFT)
#define IOPGD_MASK (~(IOPGD_SIZE - 1))
/*
* "section" address mask and size definitions.
*/
#define IOSECTION_SHIFT 20
-#define IOSECTION_SIZE (1UL << IOSECTION_SHIFT)
+#define IOSECTION_SIZE BIT(IOSECTION_SHIFT)
#define IOSECTION_MASK (~(IOSECTION_SIZE - 1))
/*
* "supersection" address mask and size definitions.
*/
#define IOSUPER_SHIFT 24
-#define IOSUPER_SIZE (1UL << IOSUPER_SHIFT)
+#define IOSUPER_SIZE BIT(IOSUPER_SHIFT)
#define IOSUPER_MASK (~(IOSUPER_SIZE - 1))
#define PTRS_PER_IOPGD (1UL << (32 - IOPGD_SHIFT))
@@ -38,14 +43,14 @@
* "small page" address mask and size definitions.
*/
#define IOPTE_SHIFT 12
-#define IOPTE_SIZE (1UL << IOPTE_SHIFT)
+#define IOPTE_SIZE BIT(IOPTE_SHIFT)
#define IOPTE_MASK (~(IOPTE_SIZE - 1))
/*
* "large page" address mask and size definitions.
*/
#define IOLARGE_SHIFT 16
-#define IOLARGE_SIZE (1UL << IOLARGE_SHIFT)
+#define IOLARGE_SIZE BIT(IOLARGE_SHIFT)
#define IOLARGE_MASK (~(IOLARGE_SIZE - 1))
#define PTRS_PER_IOPTE (1UL << (IOPGD_SHIFT - IOPTE_SHIFT))
@@ -69,16 +74,16 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
/*
* some descriptor attributes.
*/
-#define IOPGD_TABLE (1 << 0)
-#define IOPGD_SECTION (2 << 0)
-#define IOPGD_SUPER (1 << 18 | 2 << 0)
+#define IOPGD_TABLE (1)
+#define IOPGD_SECTION (2)
+#define IOPGD_SUPER (BIT(18) | IOPGD_SECTION)
#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE)
#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
-#define IOPTE_SMALL (2 << 0)
-#define IOPTE_LARGE (1 << 0)
+#define IOPTE_SMALL (2)
+#define IOPTE_LARGE (1)
#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL)
#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
@@ -93,3 +98,5 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
/* to find an entry in the second-level page table. */
#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
+
+#endif /* _OMAP_IOPGTABLE_H */
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index c1f2e521dc52..9305964250ac 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -27,6 +27,7 @@ struct tegra_smmu {
const struct tegra_smmu_soc *soc;
unsigned long pfn_mask;
+ unsigned long tlb_mask;
unsigned long *asids;
struct mutex lock;
@@ -40,8 +41,10 @@ struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
- struct page *count;
+ u32 *count;
+ struct page **pts;
struct page *pd;
+ dma_addr_t pd_dma;
unsigned id;
u32 attr;
};
@@ -68,7 +71,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_TLB_CONFIG 0x14
#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
-#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
+#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
+ ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
#define SMMU_PTC_CONFIG 0x18
#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
@@ -79,9 +83,9 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
#define SMMU_PTB_DATA 0x020
-#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
+#define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
-#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
+#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
#define SMMU_TLB_FLUSH 0x030
#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
@@ -134,29 +138,49 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
SMMU_PTE_NONSECURE)
-static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
+static unsigned int iova_pd_index(unsigned long iova)
+{
+ return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
+}
+
+static unsigned int iova_pt_index(unsigned long iova)
+{
+ return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
+}
+
+static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
+{
+ addr >>= 12;
+ return (addr & smmu->pfn_mask) == addr;
+}
+
+static dma_addr_t smmu_pde_to_dma(u32 pde)
+{
+ return pde << 12;
+}
+
+static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
+{
+ smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
+}
+
+static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
unsigned long offset)
{
- phys_addr_t phys = page ? page_to_phys(page) : 0;
u32 value;
- if (page) {
- offset &= ~(smmu->mc->soc->atom_size - 1);
+ offset &= ~(smmu->mc->soc->atom_size - 1);
- if (smmu->mc->soc->num_address_bits > 32) {
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
- value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
+ if (smmu->mc->soc->num_address_bits > 32) {
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
#else
- value = 0;
+ value = 0;
#endif
- smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
- }
-
- value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
- } else {
- value = SMMU_PTC_FLUSH_TYPE_ALL;
+ smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
}
+ value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
smmu_writel(smmu, value, SMMU_PTC_FLUSH);
}
@@ -236,8 +260,6 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{
struct tegra_smmu_as *as;
- unsigned int i;
- uint32_t *pd;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
@@ -248,32 +270,26 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
+ as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
if (!as->pd) {
kfree(as);
return NULL;
}
- as->count = alloc_page(GFP_KERNEL);
+ as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
if (!as->count) {
__free_page(as->pd);
kfree(as);
return NULL;
}
- /* clear PDEs */
- pd = page_address(as->pd);
- SetPageReserved(as->pd);
-
- for (i = 0; i < SMMU_NUM_PDE; i++)
- pd[i] = 0;
-
- /* clear PDE usage counters */
- pd = page_address(as->count);
- SetPageReserved(as->count);
-
- for (i = 0; i < SMMU_NUM_PDE; i++)
- pd[i] = 0;
+ as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
+ if (!as->pts) {
+ kfree(as->count);
+ __free_page(as->pd);
+ kfree(as);
+ return NULL;
+ }
/* setup aperture */
as->domain.geometry.aperture_start = 0;
@@ -288,7 +304,6 @@ static void tegra_smmu_domain_free(struct iommu_domain *domain)
struct tegra_smmu_as *as = to_smmu_as(domain);
/* TODO: free page directory and page tables */
- ClearPageReserved(as->pd);
kfree(as);
}
@@ -376,16 +391,26 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
return 0;
}
+ as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(smmu->dev, as->pd_dma))
+ return -ENOMEM;
+
+ /* We can't handle 64-bit DMA addresses */
+ if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
err = tegra_smmu_alloc_asid(smmu, &as->id);
if (err < 0)
- return err;
+ goto err_unmap;
- smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
- smmu_flush_ptc(smmu, as->pd, 0);
+ smmu_flush_ptc(smmu, as->pd_dma, 0);
smmu_flush_tlb_asid(smmu, as->id);
smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
- value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
+ value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
smmu_writel(smmu, value, SMMU_PTB_DATA);
smmu_flush(smmu);
@@ -393,6 +418,10 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
as->use_count++;
return 0;
+
+err_unmap:
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ return err;
}
static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
@@ -402,6 +431,9 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
return;
tegra_smmu_free_asid(smmu, as->id);
+
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+
as->smmu = NULL;
}
@@ -465,96 +497,155 @@ static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *de
}
}
+static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
+ u32 value)
+{
+ unsigned int pd_index = iova_pd_index(iova);
+ struct tegra_smmu *smmu = as->smmu;
+ u32 *pd = page_address(as->pd);
+ unsigned long offset = pd_index * sizeof(*pd);
+
+ /* Set the page directory entry first */
+ pd[pd_index] = value;
+
+ /* The flush the page directory entry from caches */
+ dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
+ sizeof(*pd), DMA_TO_DEVICE);
+
+ /* And flush the iommu */
+ smmu_flush_ptc(smmu, as->pd_dma, offset);
+ smmu_flush_tlb_section(smmu, as->id, iova);
+ smmu_flush(smmu);
+}
+
+static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+{
+ u32 *pt = page_address(pt_page);
+
+ return pt + iova_pt_index(iova);
+}
+
+static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
+ dma_addr_t *dmap)
+{
+ unsigned int pd_index = iova_pd_index(iova);
+ struct page *pt_page;
+ u32 *pd;
+
+ pt_page = as->pts[pd_index];
+ if (!pt_page)
+ return NULL;
+
+ pd = page_address(as->pd);
+ *dmap = smmu_pde_to_dma(pd[pd_index]);
+
+ return tegra_smmu_pte_offset(pt_page, iova);
+}
+
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- struct page **pagep)
+ dma_addr_t *dmap)
{
- u32 *pd = page_address(as->pd), *pt, *count;
- u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
- u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
+ unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- struct page *page;
- unsigned int i;
- if (pd[pde] == 0) {
- page = alloc_page(GFP_KERNEL | __GFP_DMA);
+ if (!as->pts[pde]) {
+ struct page *page;
+ dma_addr_t dma;
+
+ page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
if (!page)
return NULL;
- pt = page_address(page);
- SetPageReserved(page);
+ dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(smmu->dev, dma)) {
+ __free_page(page);
+ return NULL;
+ }
- for (i = 0; i < SMMU_NUM_PTE; i++)
- pt[i] = 0;
+ if (!smmu_dma_addr_valid(smmu, dma)) {
+ dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ __free_page(page);
+ return NULL;
+ }
- smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
+ as->pts[pde] = page;
- pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
+ tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
+ SMMU_PDE_NEXT));
- smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
- smmu_flush_ptc(smmu, as->pd, pde << 2);
- smmu_flush_tlb_section(smmu, as->id, iova);
- smmu_flush(smmu);
+ *dmap = dma;
} else {
- page = pfn_to_page(pd[pde] & smmu->pfn_mask);
- pt = page_address(page);
+ u32 *pd = page_address(as->pd);
+
+ *dmap = smmu_pde_to_dma(pd[pde]);
}
- *pagep = page;
+ return tegra_smmu_pte_offset(as->pts[pde], iova);
+}
- /* Keep track of entries in this page table. */
- count = page_address(as->count);
- if (pt[pte] == 0)
- count[pde]++;
+static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
+{
+ unsigned int pd_index = iova_pd_index(iova);
- return &pt[pte];
+ as->count[pd_index]++;
}
-static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
+static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
{
- u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
- u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
- u32 *count = page_address(as->count);
- u32 *pd = page_address(as->pd), *pt;
- struct page *page;
-
- page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
- pt = page_address(page);
+ unsigned int pde = iova_pd_index(iova);
+ struct page *page = as->pts[pde];
/*
* When no entries in this page table are used anymore, return the
* memory page to the system.
*/
- if (pt[pte] != 0) {
- if (--count[pde] == 0) {
- ClearPageReserved(page);
- __free_page(page);
- pd[pde] = 0;
- }
+ if (--as->count[pde] == 0) {
+ struct tegra_smmu *smmu = as->smmu;
+ u32 *pd = page_address(as->pd);
+ dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+
+ tegra_smmu_set_pde(as, iova, 0);
- pt[pte] = 0;
+ dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
+ __free_page(page);
+ as->pts[pde] = NULL;
}
}
+static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
+ u32 *pte, dma_addr_t pte_dma, u32 val)
+{
+ struct tegra_smmu *smmu = as->smmu;
+ unsigned long offset = offset_in_page(pte);
+
+ *pte = val;
+
+ dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
+ 4, DMA_TO_DEVICE);
+ smmu_flush_ptc(smmu, pte_dma, offset);
+ smmu_flush_tlb_group(smmu, as->id, iova);
+ smmu_flush(smmu);
+}
+
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
- struct tegra_smmu *smmu = as->smmu;
- unsigned long offset;
- struct page *page;
+ dma_addr_t pte_dma;
u32 *pte;
- pte = as_get_pte(as, iova, &page);
+ pte = as_get_pte(as, iova, &pte_dma);
if (!pte)
return -ENOMEM;
- *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
- offset = offset_in_page(pte);
+ /* If we aren't overwriting a pre-existing entry, increment use */
+ if (*pte == 0)
+ tegra_smmu_pte_get_use(as, iova);
- smmu->soc->ops->flush_dcache(page, offset, 4);
- smmu_flush_ptc(smmu, page, offset);
- smmu_flush_tlb_group(smmu, as->id, iova);
- smmu_flush(smmu);
+ tegra_smmu_set_pte(as, iova, pte, pte_dma,
+ __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
return 0;
}
@@ -563,22 +654,15 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
- struct tegra_smmu *smmu = as->smmu;
- unsigned long offset;
- struct page *page;
+ dma_addr_t pte_dma;
u32 *pte;
- pte = as_get_pte(as, iova, &page);
- if (!pte)
+ pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+ if (!pte || !*pte)
return 0;
- offset = offset_in_page(pte);
- as_put_pte(as, iova);
-
- smmu->soc->ops->flush_dcache(page, offset, 4);
- smmu_flush_ptc(smmu, page, offset);
- smmu_flush_tlb_group(smmu, as->id, iova);
- smmu_flush(smmu);
+ tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
+ tegra_smmu_pte_put_use(as, iova);
return size;
}
@@ -587,11 +671,14 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
- struct page *page;
unsigned long pfn;
+ dma_addr_t pte_dma;
u32 *pte;
- pte = as_get_pte(as, iova, &page);
+ pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+ if (!pte || !*pte)
+ return 0;
+
pfn = *pte & as->smmu->pfn_mask;
return PFN_PHYS(pfn);
@@ -816,6 +903,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
+ smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
+ dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
+ smmu->tlb_mask);
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
@@ -825,14 +915,14 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu_writel(smmu, value, SMMU_PTC_CONFIG);
value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
- SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
+ SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
if (soc->supports_round_robin_arbitration)
value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
smmu_writel(smmu, value, SMMU_TLB_CONFIG);
- smmu_flush_ptc(smmu, NULL, 0);
+ smmu_flush_ptc_all(smmu);
smmu_flush_tlb(smmu);
smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
smmu_flush(smmu);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 692fe2bc8197..c12bb93334ff 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_set_wake = irq_chip_set_wake_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index b7d54d428b5e..ff4be0515a0d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 32814371b8d3..aa1b41ca40f7 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
MODULE_AUTHOR("Joe Thornber <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("mq cache policy");
-
-MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 48a4a826ae07..200366c62231 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1789,3 +1789,5 @@ module_exit(smq_exit);
MODULE_AUTHOR("Joe Thornber <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("smq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 48dfe3c4d6aa..6ba47cfb1443 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
return r;
disk_super = dm_block_data(copy);
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+ dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
+ dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
dm_sm_dec_block(pmd->metadata_sm, held_root);
return dm_tm_unlock(pmd->tm, copy);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ab37ae114e94..0d7ab20c58df 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1729,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q,
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table_fast(md);
struct dm_target *ti;
- sector_t max_sectors, max_size = 0;
+ sector_t max_sectors;
+ int max_size = 0;
if (unlikely(!map))
goto out;
@@ -1742,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q,
* Find maximum amount of I/O that won't need splitting
*/
max_sectors = min(max_io_len(bvm->bi_sector, ti),
- (sector_t) queue_max_sectors(q));
+ (sector_t) BIO_MAX_SECTORS);
max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-
- /*
- * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
- * to the targets' merge function since it holds sectors not bytes).
- * Just doing this as an interim fix for stable@ because the more
- * comprehensive cleanup of switching to sector_t will impact every
- * DM target that implements a ->merge hook.
- */
- if (max_size > INT_MAX)
- max_size = INT_MAX;
+ if (max_size < 0)
+ max_size = 0;
/*
* merge_bvec_fn() returns number of bytes
@@ -1761,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q,
* max is precomputed maximal io size
*/
if (max_size && ti->type->merge)
- max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
+ max_size = ti->type->merge(ti, bvm, biovec, max_size);
/*
* If the target doesn't support merge method and some of the devices
- * provided their merge_bvec method (we know this by looking for the
- * max_hw_sectors that dm_set_device_limits may set), then we can't
- * allow bios with multiple vector entries. So always set max_size
- * to 0, and the code below allows just one page.
+ * provided their merge_bvec method (we know this by looking at
+ * queue_max_hw_sectors), then we can't allow bios with multiple vector
+ * entries. So always set max_size to 0, and the code below allows
+ * just one page.
*/
else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
max_size = 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0c2a4e8b873c..e25f00f0138a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
char *ptr;
int err;
- file = kmalloc(sizeof(*file), GFP_NOIO);
+ file = kzalloc(sizeof(*file), GFP_NOIO);
if (!file)
return -ENOMEM;
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index bf2b80d5c470..8731b6ea026b 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
extern struct dm_block_validator btree_node_validator;
+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt);
+
#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 9836c0ae897c..4222f774cf36 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}
-static struct dm_btree_value_type le64_type = {
- .context = NULL,
- .size = sizeof(__le64),
- .inc = NULL,
- .dec = NULL,
- .equal = NULL
-};
-
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root)
{
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
r = remove_raw(&spine, info,
(level == last_level ?
- &info->value_type : &le64_type),
+ &info->value_type : &le64_vt),
root, keys[level], (unsigned *)&index);
if (r < 0)
break;
@@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
uint64_t k;
+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < last_level; level++) {
- r = remove_raw(&spine, info, &le64_type,
+ r = remove_raw(&spine, info, &le64_vt,
root, keys[level], (unsigned *) &index);
if (r < 0)
goto out;
@@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
value_ptr(n, index));
delete_at(n, index);
+ keys[last_level] = k + 1ull;
} else
r = -ENODATA;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 1b5e13ec7f96..0dee514ba4c5 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
{
return s->root;
}
+
+static void le64_inc(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
+{
+ __le64 v1_le, v2_le;
+
+ memcpy(&v1_le, value1_le, sizeof(v1_le));
+ memcpy(&v2_le, value2_le, sizeof(v2_le));
+ return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt)
+{
+ vt->context = tm;
+ vt->size = sizeof(__le64);
+ vt->inc = le64_inc;
+ vt->dec = le64_dec;
+ vt->equal = le64_equal;
+}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index fdd3793e22f9..c7726cebc495 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
struct btree_node *n;
struct dm_btree_value_type le64_type;
- le64_type.context = NULL;
- le64_type.size = sizeof(__le64);
- le64_type.inc = NULL;
- le64_type.dec = NULL;
- le64_type.equal = NULL;
-
+ init_le64_type(info->tm, &le64_type);
init_shadow_spine(&spine, info);
for (level = 0; level < (info->levels - 1); level++) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 94f5b55069e0..967a4ed73929 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
+ unsigned long flags;
/*
* If it is not operational, then we have already marked it as dead
@@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
return;
}
set_bit(Blocked, &rdev->flags);
+ spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
- spin_unlock_irqrestore(&conf->device_lock, flags);
} else
set_bit(Faulty, &rdev->flags);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
/*
* if recovery is running, make sure it aborts.
*/
@@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
* Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
+ * device_lock used to avoid races with raid1_end_read_request
+ * which expects 'In_sync' flags and ->degraded to be consistent.
*/
+ spin_lock_irqsave(&conf->device_lock, flags);
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
}
- spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded -= count;
spin_unlock_irqrestore(&conf->device_lock, flags);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 643d217bfa13..f757023fc458 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
static int drop_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
- int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
+ int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
spin_lock_irq(conf->hash_locks + hash);
sh = get_free_stripe(conf, hash);
@@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
if (mutex_trylock(&conf->cache_size_mutex)) {
ret= 0;
- while (ret < sc->nr_to_scan) {
+ while (ret < sc->nr_to_scan &&
+ conf->max_nr_stripes > conf->min_nr_stripes) {
if (drop_one_stripe(conf) == 0) {
ret = SHRINK_STOP;
break;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0d35f5850ff1..5ab90f36a6a6 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -240,7 +240,7 @@ config DVB_SI21XX
config DVB_TS2020
tristate "Montage Tehnology TS2020 based tuners"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 3be1b2c3c386..6a1c0089bb62 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_COBALT
tristate "Cisco Cobalt support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
+ depends on SND
select I2C_ALGOBIT
select VIDEO_ADV7604
select VIDEO_ADV7511
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index dd4bff9cf339..d1f5898d11ba 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -139,7 +139,7 @@ done:
also know about dropped frames. */
cb->vb.v4l2_buf.sequence = s->sequence++;
vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
- VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
+ VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
}
irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c
index 1d59c7e039f7..87990ece5848 100644
--- a/drivers/media/pci/mantis/mantis_dma.c
+++ b/drivers/media/pci/mantis/mantis_dma.c
@@ -130,10 +130,11 @@ err:
int mantis_dma_init(struct mantis_pci *mantis)
{
- int err = 0;
+ int err;
dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
- if (mantis_alloc_buffers(mantis) < 0) {
+ err = mantis_alloc_buffers(mantis);
+ if (err < 0) {
dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
/* Stop RISC Engine */
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 8939ebd74391..84fa6e9b59a1 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -184,125 +184,9 @@ out:
return -EINVAL;
}
-static struct ir_raw_timings_manchester ir_rc5_timings = {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
-};
-
-static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
- {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5X_SPACE,
- },
- {
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
- },
-};
-
-static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
-};
-
-static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
- unsigned int important_bits)
-{
- /* all important bits of scancode should be set in mask */
- if (~scancode->mask & important_bits)
- return -EINVAL;
- /* extra bits in mask should be zero in data */
- if (scancode->mask & scancode->data & ~important_bits)
- return -EINVAL;
- return 0;
-}
-
-/**
- * ir_rc5_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode: scancode filter describing scancode (helps distinguish between
- * protocol subtypes when scancode is ambiguous)
- * @events: array of raw ir events to write into
- * @max: maximum size of @events
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc5_encode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- int ret;
- struct ir_raw_event *e = events;
- unsigned int data, xdata, command, commandx, system;
-
- /* Detect protocol and convert scancode to raw data */
- if (protocols & RC_BIT_RC5 &&
- !ir_rc5_validate_filter(scancode, 0x1f7f)) {
- /* decode scancode */
- command = (scancode->data & 0x003f) >> 0;
- commandx = (scancode->data & 0x0040) >> 6;
- system = (scancode->data & 0x1f00) >> 8;
- /* encode data */
- data = !commandx << 12 | system << 6 | command;
-
- /* Modulate the data */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
- data);
- if (ret < 0)
- return ret;
- } else if (protocols & RC_BIT_RC5X &&
- !ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
- /* decode scancode */
- xdata = (scancode->data & 0x00003f) >> 0;
- command = (scancode->data & 0x003f00) >> 8;
- commandx = (scancode->data & 0x004000) >> 14;
- system = (scancode->data & 0x1f0000) >> 16;
- /* commandx and system overlap, bits must match when encoded */
- if (commandx == (system & 0x1))
- return -EINVAL;
- /* encode data */
- data = 1 << 18 | system << 12 | command << 6 | xdata;
-
- /* Modulate the data */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
- CHECK_RC5X_NBITS,
- data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
- if (ret < 0)
- return ret;
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc5x_timings[1],
- RC5X_NBITS - CHECK_RC5X_NBITS,
- data);
- if (ret < 0)
- return ret;
- } else if (protocols & RC_BIT_RC5_SZ &&
- !ir_rc5_validate_filter(scancode, 0x2fff)) {
- /* RC5-SZ scancode is raw enough for Manchester as it is */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
- RC5_SZ_NBITS, scancode->data & 0x2fff);
- if (ret < 0)
- return ret;
- } else {
- return -EINVAL;
- }
-
- return e - events;
-}
-
static struct ir_raw_handler rc5_handler = {
.protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
.decode = ir_rc5_decode,
- .encode = ir_rc5_encode,
};
static int __init ir_rc5_decode_init(void)
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f9c70baf6e0c..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -291,133 +291,11 @@ out:
return -EINVAL;
}
-static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
- {
- .leader = RC6_PREFIX_PULSE,
- .pulse_space_start = 0,
- .clock = RC6_UNIT,
- .invert = 1,
- .trailer_space = RC6_PREFIX_SPACE,
- },
- {
- .clock = RC6_UNIT,
- .invert = 1,
- },
- {
- .clock = RC6_UNIT * 2,
- .invert = 1,
- },
- {
- .clock = RC6_UNIT,
- .invert = 1,
- .trailer_space = RC6_SUFFIX_SPACE,
- },
-};
-
-static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
- unsigned int important_bits)
-{
- /* all important bits of scancode should be set in mask */
- if (~scancode->mask & important_bits)
- return -EINVAL;
- /* extra bits in mask should be zero in data */
- if (scancode->mask & scancode->data & ~important_bits)
- return -EINVAL;
- return 0;
-}
-
-/**
- * ir_rc6_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode: scancode filter describing scancode (helps distinguish between
- * protocol subtypes when scancode is ambiguous)
- * @events: array of raw ir events to write into
- * @max: maximum size of @events
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc6_encode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- int ret;
- struct ir_raw_event *e = events;
-
- if (protocols & RC_BIT_RC6_0 &&
- !ir_rc6_validate_filter(scancode, 0xffff)) {
-
- /* Modulate the preamble */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate the header (Start Bit & Mode-0) */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[1],
- RC6_HEADER_NBITS, (1 << 3));
- if (ret < 0)
- return ret;
-
- /* Modulate Trailer Bit */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[2], 1, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate rest of the data */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[3], RC6_0_NBITS,
- scancode->data);
- if (ret < 0)
- return ret;
-
- } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
- RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
- !ir_rc6_validate_filter(scancode, 0x8fffffff)) {
-
- /* Modulate the preamble */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate the header (Start Bit & Header-version 6 */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[1],
- RC6_HEADER_NBITS, (1 << 3 | 6));
- if (ret < 0)
- return ret;
-
- /* Modulate Trailer Bit */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[2], 1, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate rest of the data */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[3],
- fls(scancode->mask),
- scancode->data);
- if (ret < 0)
- return ret;
-
- } else {
- return -EINVAL;
- }
-
- return e - events;
-}
-
static struct ir_raw_handler rc6_handler = {
.protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
- .encode = ir_rc6_encode,
};
static int __init ir_rc6_decode_init(void)
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index baeb5971fd52..85af7a869167 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
return 0;
}
-static int nvt_write_wakeup_codes(struct rc_dev *dev,
- const u8 *wakeup_sample_buf, int count)
-{
- int i = 0;
- u8 reg, reg_learn_mode;
- unsigned long flags;
- struct nvt_dev *nvt = dev->priv;
-
- nvt_dbg_wake("writing wakeup samples");
-
- reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
- reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
- reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
-
- /* Lock the learn area to prevent racing with wake-isr */
- spin_lock_irqsave(&nvt->nvt_lock, flags);
-
- /* Enable fifo writes */
- nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
-
- /* Clear cir wake rx fifo */
- nvt_clear_cir_wake_fifo(nvt);
-
- if (count > WAKE_FIFO_LEN) {
- nvt_dbg_wake("HW FIFO too small for all wake samples");
- count = WAKE_FIFO_LEN;
- }
-
- if (count)
- pr_info("Wake samples (%d) =", count);
- else
- pr_info("Wake sample fifo cleared");
-
- /* Write wake samples to fifo */
- for (i = 0; i < count; i++) {
- pr_cont(" %02x", wakeup_sample_buf[i]);
- nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
- CIR_WAKE_WR_FIFO_DATA);
- }
- pr_cont("\n");
-
- /* Switch cir to wakeup mode and disable fifo writing */
- nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
-
- /* Set number of bytes needed for wake */
- nvt_cir_wake_reg_write(nvt, count ? count :
- CIR_WAKE_FIFO_CMP_BYTES,
- CIR_WAKE_FIFO_CMP_DEEP);
-
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
- return 0;
-}
-
-static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
- struct rc_scancode_filter *sc_filter)
-{
- u8 *reg_buf;
- u8 buf_val;
- int i, ret, count;
- unsigned int val;
- struct ir_raw_event *raw;
- bool complete;
-
- /* Require both mask and data to be set before actually committing */
- if (!sc_filter->mask || !sc_filter->data)
- return 0;
-
- raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
- if (!raw)
- return -ENOMEM;
-
- ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
- raw, WAKE_FIFO_LEN);
- complete = (ret != -ENOBUFS);
- if (!complete)
- ret = WAKE_FIFO_LEN;
- else if (ret < 0)
- goto out_raw;
-
- reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
- if (!reg_buf) {
- ret = -ENOMEM;
- goto out_raw;
- }
-
- /* Inspect the ir samples */
- for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
- val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
-
- /* Split too large values into several smaller ones */
- while (val > 0 && count < WAKE_FIFO_LEN) {
-
- /* Skip last value for better comparison tolerance */
- if (complete && i == ret - 1 && val < BUF_LEN_MASK)
- break;
-
- /* Clamp values to BUF_LEN_MASK at most */
- buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
-
- reg_buf[count] = buf_val;
- val -= buf_val;
- if ((raw[i]).pulse)
- reg_buf[count] |= BUF_PULSE_BIT;
- count++;
- }
- }
-
- ret = nvt_write_wakeup_codes(dev, reg_buf, count);
-
- kfree(reg_buf);
-out_raw:
- kfree(raw);
-
- return ret;
-}
-
-/* Dummy implementation. nuvoton is agnostic to the protocol used */
-static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
- u64 *rc_type)
-{
- return 0;
-}
-
/*
* nvt_tx_ir
*
@@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* Set up the rc device */
rdev->priv = nvt;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->encode_wakeup = true;
rdev->allowed_protocols = RC_BIT_ALL;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
rdev->s_tx_carrier = nvt_set_tx_carrier;
- rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
- rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
rdev->input_phys = "nuvoton/cir0";
rdev->input_id.bustype = BUS_HOST;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 9d0e161c2a88..e1cf23c3875b 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -63,7 +63,6 @@ static int debug;
*/
#define TX_BUF_LEN 256
#define RX_BUF_LEN 32
-#define WAKE_FIFO_LEN 67
struct nvt_dev {
struct pnp_dev *pdev;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 4b994aa2f2a7..b68d4f762734 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -25,8 +25,6 @@ struct ir_raw_handler {
u64 protocols; /* which are handled by this handler */
int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
- int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max);
/* These two should only be used by the lirc decoder */
int (*raw_register)(struct rc_dev *dev);
@@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
-/* functions for IR encoders */
-
-static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
- unsigned int pulse,
- u32 duration)
-{
- init_ir_raw_event(ev);
- ev->duration = duration;
- ev->pulse = pulse;
-}
-
-/**
- * struct ir_raw_timings_manchester - Manchester coding timings
- * @leader: duration of leader pulse (if any) 0 if continuing
- * existing signal (see @pulse_space_start)
- * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
- * @clock: duration of each pulse/space in ns
- * @invert: if set clock logic is inverted
- * (0 = space + pulse, 1 = pulse + space)
- * @trailer_space: duration of trailer space in ns
- */
-struct ir_raw_timings_manchester {
- unsigned int leader;
- unsigned int pulse_space_start:1;
- unsigned int clock;
- unsigned int invert:1;
- unsigned int trailer_space;
-};
-
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
- const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data);
-
/*
* Routines from rc-raw.c to be used internally and by decoders
*/
u64 ir_raw_get_allowed_protocols(void);
-u64 ir_raw_get_encode_protocols(void);
int ir_raw_event_register(struct rc_dev *dev);
void ir_raw_event_unregister(struct rc_dev *dev);
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index b9e4645c731c..b732ac6a26d8 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
-static u64 encode_protocols;
static int ir_raw_event_thread(void *data)
{
@@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
return protocols;
}
-/* used internally by the sysfs interface */
-u64
-ir_raw_get_encode_protocols(void)
-{
- u64 protocols;
-
- mutex_lock(&ir_raw_handler_lock);
- protocols = encode_protocols;
- mutex_unlock(&ir_raw_handler_lock);
- return protocols;
-}
-
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
{
/* the caller will update dev->enabled_protocols */
return 0;
}
-/**
- * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
- * @ev: Pointer to pointer to next free event. *@ev is incremented for
- * each raw event filled.
- * @max: Maximum number of raw events to fill.
- * @timings: Manchester modulation timings.
- * @n: Number of bits of data.
- * @data: Data bits to encode.
- *
- * Encodes the @n least significant bits of @data using Manchester (bi-phase)
- * modulation with the timing characteristics described by @timings, writing up
- * to @max raw IR events using the *@ev pointer.
- *
- * Returns: 0 on success.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * full encoded data. In this case all @max events will have been
- * written.
- */
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
- const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data)
-{
- bool need_pulse;
- unsigned int i;
- int ret = -ENOBUFS;
-
- i = 1 << (n - 1);
-
- if (timings->leader) {
- if (!max--)
- return ret;
- if (timings->pulse_space_start) {
- init_ir_raw_event_duration((*ev)++, 1, timings->leader);
-
- if (!max--)
- return ret;
- init_ir_raw_event_duration((*ev), 0, timings->leader);
- } else {
- init_ir_raw_event_duration((*ev), 1, timings->leader);
- }
- i >>= 1;
- } else {
- /* continue existing signal */
- --(*ev);
- }
- /* from here on *ev will point to the last event rather than the next */
-
- while (n && i > 0) {
- need_pulse = !(data & i);
- if (timings->invert)
- need_pulse = !need_pulse;
- if (need_pulse == !!(*ev)->pulse) {
- (*ev)->duration += timings->clock;
- } else {
- if (!max--)
- goto nobufs;
- init_ir_raw_event_duration(++(*ev), need_pulse,
- timings->clock);
- }
-
- if (!max--)
- goto nobufs;
- init_ir_raw_event_duration(++(*ev), !need_pulse,
- timings->clock);
- i >>= 1;
- }
-
- if (timings->trailer_space) {
- if (!(*ev)->pulse)
- (*ev)->duration += timings->trailer_space;
- else if (!max--)
- goto nobufs;
- else
- init_ir_raw_event_duration(++(*ev), 0,
- timings->trailer_space);
- }
-
- ret = 0;
-nobufs:
- /* point to the next event rather than last event before returning */
- ++(*ev);
- return ret;
-}
-EXPORT_SYMBOL(ir_raw_gen_manchester);
-
-/**
- * ir_raw_encode_scancode() - Encode a scancode as raw events
- *
- * @protocols: permitted protocols
- * @scancode: scancode filter describing a single scancode
- * @events: array of raw events to write into
- * @max: max number of raw events
- *
- * Attempts to encode the scancode as raw events.
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid, or if no
- * compatible encoder was found.
- */
-int ir_raw_encode_scancode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- struct ir_raw_handler *handler;
- int ret = -EINVAL;
-
- mutex_lock(&ir_raw_handler_lock);
- list_for_each_entry(handler, &ir_raw_handler_list, list) {
- if (handler->protocols & protocols && handler->encode) {
- ret = handler->encode(protocols, scancode, events, max);
- if (ret >= 0 || ret == -ENOBUFS)
- break;
- }
- }
- mutex_unlock(&ir_raw_handler_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(ir_raw_encode_scancode);
-
/*
* Used to (un)register raw event clients
*/
@@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->dev);
available_protocols |= ir_raw_handler->protocols;
- if (ir_raw_handler->encode)
- encode_protocols |= ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
return 0;
@@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_unregister(raw->dev);
available_protocols &= ~ir_raw_handler->protocols;
- if (ir_raw_handler->encode)
- encode_protocols &= ~ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index d8bdf63ce985..63dace8198b0 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -26,7 +26,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <media/rc-core.h>
#define DRIVER_NAME "rc-loopback"
@@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
return 0;
}
-static int loop_set_wakeup_filter(struct rc_dev *dev,
- struct rc_scancode_filter *sc_filter)
-{
- static const unsigned int max = 512;
- struct ir_raw_event *raw;
- int ret;
- int i;
-
- /* fine to disable filter */
- if (!sc_filter->mask)
- return 0;
-
- /* encode the specified filter and loop it back */
- raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
- ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
- raw, max);
- /* still loop back the partial raw IR even if it's incomplete */
- if (ret == -ENOBUFS)
- ret = max;
- if (ret >= 0) {
- /* do the loopback */
- for (i = 0; i < ret; ++i)
- ir_raw_event_store(dev, &raw[i]);
- ir_raw_event_handle(dev);
-
- ret = 0;
- }
-
- kfree(raw);
-
- return ret;
-}
-
static int __init loop_init(void)
{
struct rc_dev *rc;
@@ -229,7 +195,6 @@ static int __init loop_init(void)
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->encode_wakeup = true;
rc->allowed_protocols = RC_BIT_ALL;
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
@@ -244,7 +209,6 @@ static int __init loop_init(void)
rc->s_idle = loop_set_idle;
rc->s_learning_mode = loop_set_learning_mode;
rc->s_carrier_report = loop_set_carrier_report;
- rc->s_wakeup_filter = loop_set_wakeup_filter;
loopdev.txmask = RXMASK_REGULAR;
loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 9d015db65280..0ff388a16168 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
} else {
enabled = dev->enabled_wakeup_protocols;
allowed = dev->allowed_wakeup_protocols;
- if (dev->encode_wakeup && !allowed)
- allowed = ir_raw_get_encode_protocols();
}
mutex_unlock(&dev->lock);
@@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
path ? path : "N/A");
kfree(path);
- if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) {
+ if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* Load raw decoders, if they aren't already */
if (!raw_init) {
IR_dprintk(1, "Loading raw decoders\n");
ir_raw_init();
raw_init = true;
}
- }
-
- if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* calls ir_register_device so unlock mutex here*/
mutex_unlock(&dev->lock);
rc = ir_raw_event_register(dev);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 93b315459098..a14c428f70e9 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
break;
case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
+ case VB2_BUF_STATE_REQUEUEING:
/* nothing */
break;
}
@@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
if (WARN_ON(state != VB2_BUF_STATE_DONE &&
state != VB2_BUF_STATE_ERROR &&
- state != VB2_BUF_STATE_QUEUED))
+ state != VB2_BUF_STATE_QUEUED &&
+ state != VB2_BUF_STATE_REQUEUEING))
state = VB2_BUF_STATE_ERROR;
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
- /* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
- vb->state = state;
- if (state != VB2_BUF_STATE_QUEUED)
+ if (state == VB2_BUF_STATE_QUEUED ||
+ state == VB2_BUF_STATE_REQUEUEING) {
+ vb->state = VB2_BUF_STATE_QUEUED;
+ } else {
+ /* Add the buffer to the done buffers list */
list_add_tail(&vb->done_entry, &q->done_list);
+ vb->state = state;
+ }
atomic_dec(&q->owned_by_drv_count);
spin_unlock_irqrestore(&q->done_lock, flags);
- if (state == VB2_BUF_STATE_QUEUED) {
+ switch (state) {
+ case VB2_BUF_STATE_QUEUED:
+ return;
+ case VB2_BUF_STATE_REQUEUEING:
if (q->start_streaming_called)
__enqueue_in_driver(vb);
return;
+ default:
+ /* Inform any processes that may be waiting for buffers */
+ wake_up(&q->done_wq);
+ break;
}
-
- /* Inform any processes that may be waiting for buffers */
- wake_up(&q->done_wq);
}
EXPORT_SYMBOL_GPL(vb2_buffer_done);
@@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
{
- static bool __check_once __read_mostly;
+ static bool check_once;
- if (__check_once)
+ if (check_once)
return;
- __check_once = true;
- __WARN();
+ check_once = true;
+ WARN_ON(1);
- pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+ pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
if (vb->vb2_queue->allow_zero_bytesused)
- pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+ pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
else
- pr_warn_once("use the actual size instead.\n");
+ pr_warn("use the actual size instead.\n");
}
/**
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 3a27a84ad3ec..9426276dbe14 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index 9f579589e800..8053f70dbfd1 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -9,8 +9,6 @@
#include <linux/of.h>
#include <linux/mm.h>
-#include <asm/cacheflush.h>
-
#include <dt-bindings/memory/tegra114-mc.h>
#include "mc.h"
@@ -914,20 +912,6 @@ static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
{ .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
};
-static void tegra114_flush_dcache(struct page *page, unsigned long offset,
- size_t size)
-{
- phys_addr_t phys = page_to_phys(page) + offset;
- void *virt = page_address(page) + offset;
-
- __cpuc_flush_dcache_area(virt, size);
- outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra114_smmu_ops = {
- .flush_dcache = tegra114_flush_dcache,
-};
-
static const struct tegra_smmu_soc tegra114_smmu_soc = {
.clients = tegra114_mc_clients,
.num_clients = ARRAY_SIZE(tegra114_mc_clients),
@@ -935,8 +919,8 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = {
.num_swgroups = ARRAY_SIZE(tegra114_swgroups),
.supports_round_robin_arbitration = false,
.supports_request_limit = false,
+ .num_tlb_lines = 32,
.num_asids = 4,
- .ops = &tegra114_smmu_ops,
};
const struct tegra_mc_soc tegra114_mc_soc = {
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 966e1557e6f4..7d734befe0ed 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -9,8 +9,6 @@
#include <linux/of.h>
#include <linux/mm.h>
-#include <asm/cacheflush.h>
-
#include <dt-bindings/memory/tegra124-mc.h>
#include "mc.h"
@@ -1002,20 +1000,6 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
};
#ifdef CONFIG_ARCH_TEGRA_124_SOC
-static void tegra124_flush_dcache(struct page *page, unsigned long offset,
- size_t size)
-{
- phys_addr_t phys = page_to_phys(page) + offset;
- void *virt = page_address(page) + offset;
-
- __cpuc_flush_dcache_area(virt, size);
- outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra124_smmu_ops = {
- .flush_dcache = tegra124_flush_dcache,
-};
-
static const struct tegra_smmu_soc tegra124_smmu_soc = {
.clients = tegra124_mc_clients,
.num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1024,7 +1008,6 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = {
.supports_round_robin_arbitration = true,
.supports_request_limit = true,
.num_asids = 128,
- .ops = &tegra124_smmu_ops,
};
const struct tegra_mc_soc tegra124_mc_soc = {
@@ -1039,18 +1022,6 @@ const struct tegra_mc_soc tegra124_mc_soc = {
#endif /* CONFIG_ARCH_TEGRA_124_SOC */
#ifdef CONFIG_ARCH_TEGRA_132_SOC
-static void tegra132_flush_dcache(struct page *page, unsigned long offset,
- size_t size)
-{
- void *virt = page_address(page) + offset;
-
- __flush_dcache_area(virt, size);
-}
-
-static const struct tegra_smmu_ops tegra132_smmu_ops = {
- .flush_dcache = tegra132_flush_dcache,
-};
-
static const struct tegra_smmu_soc tegra132_smmu_soc = {
.clients = tegra124_mc_clients,
.num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1058,8 +1029,8 @@ static const struct tegra_smmu_soc tegra132_smmu_soc = {
.num_swgroups = ARRAY_SIZE(tegra124_swgroups),
.supports_round_robin_arbitration = true,
.supports_request_limit = true,
+ .num_tlb_lines = 32,
.num_asids = 128,
- .ops = &tegra132_smmu_ops,
};
const struct tegra_mc_soc tegra132_mc_soc = {
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index 1abcd8f6f3ba..7e0694d80edb 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -9,8 +9,6 @@
#include <linux/of.h>
#include <linux/mm.h>
-#include <asm/cacheflush.h>
-
#include <dt-bindings/memory/tegra30-mc.h>
#include "mc.h"
@@ -936,20 +934,6 @@ static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
{ .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
};
-static void tegra30_flush_dcache(struct page *page, unsigned long offset,
- size_t size)
-{
- phys_addr_t phys = page_to_phys(page) + offset;
- void *virt = page_address(page) + offset;
-
- __cpuc_flush_dcache_area(virt, size);
- outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra30_smmu_ops = {
- .flush_dcache = tegra30_flush_dcache,
-};
-
static const struct tegra_smmu_soc tegra30_smmu_soc = {
.clients = tegra30_mc_clients,
.num_clients = ARRAY_SIZE(tegra30_mc_clients),
@@ -957,8 +941,8 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = {
.num_swgroups = ARRAY_SIZE(tegra30_swgroups),
.supports_round_robin_arbitration = false,
.supports_request_limit = false,
+ .num_tlb_lines = 16,
.num_asids = 4,
- .ops = &tegra30_smmu_ops,
};
const struct tegra_mc_soc tegra30_mc_soc = {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 653815950aa2..3f68dd251ce8 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C
config MFD_CROS_EC_SPI
tristate "ChromeOS Embedded Controller (SPI)"
- depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF
+ depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
---help---
If you say Y here, you get support for talking to the ChromeOS EC
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bebf58a06a6b..a72ddb295078 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev)
arizona->has_fully_powered_off = true;
- disable_irq(arizona->irq);
+ disable_irq_nosync(arizona->irq);
arizona_enable_reset(arizona);
regulator_bulk_disable(arizona->num_core_supplies,
arizona->core_supplies);
@@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona)
arizona->pdata.gpio_defaults[i]);
}
- pm_runtime_set_autosuspend_delay(arizona->dev, 100);
- pm_runtime_use_autosuspend(arizona->dev);
- pm_runtime_enable(arizona->dev);
-
/* Chip default */
if (!arizona->pdata.clk32k_src)
arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona)
arizona->pdata.spk_fmt[i]);
}
+ pm_runtime_set_active(arizona->dev);
+ pm_runtime_enable(arizona->dev);
+
/* Set up for interrupts */
ret = arizona_irq_init(arizona);
if (ret != 0)
goto err_reset;
+ pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+ pm_runtime_use_autosuspend(arizona->dev);
+
arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
arizona_clkgen_err, arizona);
arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona)
goto err_irq;
}
-#ifdef CONFIG_PM
- regulator_disable(arizona->dcvdd);
-#endif
-
return 0;
err_irq:
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2d3db81be099..6ded3dc36644 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
{
struct at24_data *at24;
- if (unlikely(off >= attr->size))
- return -EFBIG;
-
at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
return at24_write(at24, buf, off, count);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e1ccefce9a9d..a98dd4f1b0e3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_peer_notif ||
+ !netif_carrier_ok(bond->dev) ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 2d1ce3c5d0dd..753887d02b46 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
- int j;
pr_emerg("%s: no memory for rx ring\n", dev->name);
- for (j = 0; j < i; j++) {
- if (vp->rx_skbuff[j]) {
- dev_kfree_skb(vp->rx_skbuff[j]);
- vp->rx_skbuff[j] = NULL;
- }
- }
retval = -ENOMEM;
- goto err_free_irq;
+ goto err_free_skb;
}
/* Wrap the ring. */
vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
if (!retval)
goto out;
-err_free_irq:
+err_free_skb:
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (vp->rx_skbuff[i]) {
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
free_irq(dev->irq, dev);
err:
if (vortex_debug > 1)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d7364334f..f7fbdc9d1325 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
if (likely(skb)) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
+ dev_kfree_skb_any(skb);
}
- dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052a961c..5907c821d131 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
offset += sizeof(u32);
data_buf += sizeof(u32);
written_so_far += sizeof(u32);
+
+ /* At end of each 4Kb page, release nvram lock to allow MFW
+ * chance to take it for its own use.
+ */
+ if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+ (written_so_far < buf_size)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Releasing NVM lock after offset 0x%x\n",
+ (u32)(offset - sizeof(u32)));
+ bnx2x_release_nvram_lock(bp);
+ usleep_range(1000, 2000);
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+ }
+
cmd_flags = 0;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19f6313..506047c38607 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
if (!next_cmpl->valid)
break;
}
+ packets++;
/* TODO: BNA_CQ_EF_LOCAL ? */
if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- packets++;
rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += totlen;
ccb->bytes_per_intr += totlen;
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe9458d..02e23e6f1424 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
config THUNDER_NIC_PF
tristate "Thunder Physical function driver"
depends on 64BIT
- default ARCH_THUNDER
select THUNDER_NIC_BGX
---help---
This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
depends on 64BIT
- default ARCH_THUNDER
---help---
This driver supports Thunder's NIC virtual function
config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT
- default ARCH_THUNDER
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index a11485fbb33f..c3c7db41819d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2332,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
EXT_MEM1_SIZE_G(size));
}
} else {
- if (i & EXT_MEM_ENABLE_F)
+ if (i & EXT_MEM_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_G(size));
+ }
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f30d9a..00e3a6b6b822 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -620,6 +620,11 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_MCAST_PROMISCUOUS)
+#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f642426308c..6ca693b03f33 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
return 0;
+ /* if device is not running, copy MAC to netdev->dev_addr */
+ if (!netif_running(netdev))
+ goto done;
+
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
status = -EPERM;
goto err;
}
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dev_info(dev, "MAC address changed to %pM\n", mac);
+done:
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
}
-static void be_rx_cq_clean(struct be_rx_obj *rxo)
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
{
- struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = 0;
+ rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
/* After cleanup, leave the CQ in unarmed state */
be_cq_notify(adapter, rx_cq->id, false, 0);
-
- /* Then free posted rx buffers that were not used */
- while (atomic_read(&rxq->used) > 0) {
- page_info = get_rx_page_info(rxo);
- put_page(page_info->page);
- memset(page_info, 0, sizeof(*page_info));
- }
- BUG_ON(atomic_read(&rxq->used));
- rxq->tail = 0;
- rxq->head = 0;
}
static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
+ free_cpumask_var(eqo->affinity_mask);
}
- free_cpumask_var(eqo->affinity_mask);
be_queue_free(adapter, &eqo->q);
}
}
@@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
- if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_set_cpu(cpumask_local_spread(i, numa_node),
- eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
- napi_hash_add(&eqo->napi);
+
aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
eqo->idx = i;
@@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
rc = be_cmd_eq_create(adapter, eqo);
if (rc)
return rc;
+
+ if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ eqo->affinity_mask);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
+ napi_hash_add(&eqo->napi);
}
return 0;
}
@@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
for_all_rx_queues(adapter, rxo, i) {
q = &rxo->q;
if (q->created) {
+ /* If RXQs are destroyed while in an "out of buffer"
+ * state, there is a possibility of an HW stall on
+ * Lancer. So, post 64 buffers to each queue to relieve
+ * the "out of buffer" condition.
+ * Make sure there's space in the RXQ before posting.
+ */
+ if (lancer_chip(adapter)) {
+ be_rx_cq_clean(rxo);
+ if (atomic_read(&q->used) == 0)
+ be_post_rx_frags(rxo, GFP_KERNEL,
+ MAX_RX_POST);
+ }
+
be_cmd_rxq_destroy(adapter, q);
be_rx_cq_clean(rxo);
+ be_rxq_clean(rxo);
}
be_queue_free(adapter, q);
}
}
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[0], 0);
+
+ be_clear_uc_list(adapter);
+
+ /* The IFACE flags are enabled in the open path and cleared
+ * in the close path. When a VF gets detached from the host and
+ * assigned to a VM the following happens:
+ * - VF's IFACE flags get cleared in the detach path
+ * - IFACE create is issued by the VF in the attach path
+ * Due to a bug in the BE3/Skyhawk-R FW
+ * (Lancer FW doesn't have the bug), the IFACE capability flags
+ * specified along with the IFACE create cmd issued by a VF are not
+ * honoured by FW. As a consequence, if a *new* driver
+ * (that enables/disables IFACE flags in open/close)
+ * is loaded in the host and an *old* driver is * used by a VM/VF,
+ * the IFACE gets created *without* the needed flags.
+ * To avoid this, disable RX-filter flags only for Lancer.
+ */
+ if (lancer_chip(adapter)) {
+ be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+ adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+ }
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
return 0;
+ be_disable_if_filters(adapter);
+
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
- be_clear_uc_list(adapter);
for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
return 0;
}
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+ if (status)
+ return status;
+
+ /* For BE3 VFs, the PF programs the initial MAC address */
+ if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
+ adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status)
+ return status;
+ }
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
+
+ be_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
static int be_open(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
if (status)
goto err;
+ status = be_enable_if_filters(adapter);
+ if (status)
+ goto err;
+
status = be_irq_register(adapter);
if (status)
goto err;
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
-static void be_mac_clear(struct be_adapter *adapter)
-{
- if (adapter->pmac_id) {
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[0], 0);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
- }
-}
-
#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
#endif
- /* delete the primary mac along with the uc-mac list */
- be_mac_clear(adapter);
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
-static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
- u32 cap_flags, u32 vf)
-{
- u32 en_flags;
-
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
-
- en_flags &= cap_flags;
-
- return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
-}
-
static int be_vfs_if_create(struct be_adapter *adapter)
{
struct be_resources res = {0};
+ u32 cap_flags, en_flags, vf;
struct be_vf_cfg *vf_cfg;
- u32 cap_flags, vf;
int status;
/* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
}
}
- status = be_if_create(adapter, &vf_cfg->if_handle,
- cap_flags, vf + 1);
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
return status;
}
@@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- } else {
- /* Maybe the HW was reset; dev_addr must be re-programmed */
- memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
}
- /* For BE3-R VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter)))
- be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
return 0;
}
@@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 en_flags;
int status;
status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- status = be_if_create(adapter, &adapter->if_handle,
- be_if_cap_flags(adapter), 0);
+ /* will enable all the needed filter flags in be_open() */
+ en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+ en_flags = en_flags & be_if_cap_flags(adapter);
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
if (status)
goto err;
@@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
}
- if (adapter->vlans_added)
- be_vid_config(adapter);
-
- be_set_rx_mode(adapter->netdev);
-
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
if (status)
@@ -5121,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
struct device *dev = &adapter->pdev->dev;
int status;
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5168,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->vxlan_port != port)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 32e3807c650e..271bb5862346 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3433,6 +3433,7 @@ fec_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db6c5a6..cf8e54652df9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
frag = skb_shinfo(skb)->frags;
while (nr_frags) {
CBDC_SC(bdp,
- BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+ BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY);
if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2df5f..016743e355de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
}
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
#define FEC_RX_EVENT (FEC_ENET_RXF)
#define FEC_TX_EVENT (FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2b7610f341b0..10b3bbbbac8e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2102,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
+ /* force link state update after mac reset */
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
phy_start(priv->phydev);
enable_napi(priv);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3c0a8f825b63..5b90fcf96265 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -900,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
return 0;
}
-static int gfar_comp_asc(const void *a, const void *b)
-{
- return memcmp(a, b, 4);
-}
-
-static int gfar_comp_desc(const void *a, const void *b)
-{
- return -memcmp(a, b, 4);
-}
-
-static void gfar_swap(void *a, void *b, int size)
-{
- u32 *_a = a;
- u32 *_b = b;
-
- swap(_a[0], _b[0]);
- swap(_a[1], _b[1]);
- swap(_a[2], _b[2]);
- swap(_a[3], _b[3]);
-}
-
/* Write a mask to filer cache */
static void gfar_set_mask(u32 mask, struct filer_table *tab)
{
@@ -1270,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
return 0;
}
-/* Copy size filer entries */
-static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
-{
- while (size > 0) {
- size--;
- dst[size].ctrl = src[size].ctrl;
- dst[size].prop = src[size].prop;
- }
-}
-
-/* Delete the contents of the filer-table between start and end
- * and collapse them
- */
-static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-{
- int length;
-
- if (end > MAX_FILER_CACHE_IDX || end < begin)
- return -EINVAL;
-
- end++;
- length = end - begin;
-
- /* Copy */
- while (end < tab->index) {
- tab->fe[begin].ctrl = tab->fe[end].ctrl;
- tab->fe[begin++].prop = tab->fe[end++].prop;
-
- }
- /* Fill up with don't cares */
- while (begin < tab->index) {
- tab->fe[begin].ctrl = 0x60;
- tab->fe[begin].prop = 0xFFFFFFFF;
- begin++;
- }
-
- tab->index -= length;
- return 0;
-}
-
-/* Make space on the wanted location */
-static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
-{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
- begin > MAX_FILER_CACHE_IDX)
- return -EINVAL;
-
- gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
-
- tab->index += length;
- return 0;
-}
-
-static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_AND | RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-/* Uses hardwares clustering option to reduce
- * the number of filer table entries
- */
-static void gfar_cluster_filer(struct filer_table *tab)
-{
- s32 i = -1, j, iend, jend;
-
- while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
- j = i;
- while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /* The cluster entries self and the previous one
- * (a mask) must be identical!
- */
- if (tab->fe[i].ctrl != tab->fe[j].ctrl)
- break;
- if (tab->fe[i].prop != tab->fe[j].prop)
- break;
- if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
- break;
- if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
- break;
- iend = gfar_get_next_cluster_end(i, tab);
- jend = gfar_get_next_cluster_end(j, tab);
- if (jend == -1 || iend == -1)
- break;
-
- /* First we make some free space, where our cluster
- * element should be. Then we copy it there and finally
- * delete in from its old location.
- */
- if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
- -EINVAL)
- break;
-
- gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
-
- if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j),
- tab) == -EINVAL)
- return;
-
- /* Mask out cluster bit */
- tab->fe[iend].ctrl &= ~(RQFCR_CLE);
- }
- }
-}
-
-/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2,
- struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
-{
- u32 temp[4];
- temp[0] = a1->ctrl & mask;
- temp[1] = a2->ctrl & mask;
- temp[2] = b1->ctrl & mask;
- temp[3] = b2->ctrl & mask;
-
- a1->ctrl &= ~mask;
- a2->ctrl &= ~mask;
- b1->ctrl &= ~mask;
- b2->ctrl &= ~mask;
-
- a1->ctrl |= temp[1];
- a2->ctrl |= temp[0];
- b1->ctrl |= temp[3];
- b2->ctrl |= temp[2];
-}
-
-/* Generate a list consisting of masks values with their start and
- * end of validity and block as indicator for parts belonging
- * together (glued by ANDs) in mask_table
- */
-static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
-{
- u32 i, and_index = 0, block_index = 1;
-
- for (i = 0; i < tab->index; i++) {
-
- /* LSByte of control = 0 sets a mask */
- if (!(tab->fe[i].ctrl & 0xF)) {
- mask_table[and_index].mask = tab->fe[i].prop;
- mask_table[and_index].start = i;
- mask_table[and_index].block = block_index;
- if (and_index >= 1)
- mask_table[and_index - 1].end = i - 1;
- and_index++;
- }
- /* cluster starts and ends will be separated because they should
- * hold their position
- */
- if (tab->fe[i].ctrl & RQFCR_CLE)
- block_index++;
- /* A not set AND indicates the end of a depended block */
- if (!(tab->fe[i].ctrl & RQFCR_AND))
- block_index++;
- }
-
- mask_table[and_index - 1].end = i - 1;
-
- return and_index;
-}
-
-/* Sorts the entries of mask_table by the values of the masks.
- * Important: The 0xFF80 flags of the first and last entry of a
- * block must hold their position (which queue, CLusterEnable, ReJEct,
- * AND)
- */
-static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
-{
- /* Pointer to compare function (_asc or _desc) */
- int (*gfar_comp)(const void *, const void *);
-
- u32 i, size = 0, start = 0, prev = 1;
- u32 old_first, old_last, new_first, new_last;
-
- gfar_comp = &gfar_comp_desc;
-
- for (i = 0; i < and_index; i++) {
- if (prev != mask_table[i].block) {
- old_first = mask_table[start].start + 1;
- old_last = mask_table[i - 1].end;
- sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
-
- /* Toggle order for every block. This makes the
- * thing more efficient!
- */
- if (gfar_comp == gfar_comp_desc)
- gfar_comp = &gfar_comp_asc;
- else
- gfar_comp = &gfar_comp_desc;
-
- new_first = mask_table[start].start + 1;
- new_last = mask_table[i - 1].end;
-
- gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND);
-
- start = i;
- size = 0;
- }
- size++;
- prev = mask_table[i].block;
- }
-}
-
-/* Reduces the number of masks needed in the filer table to save entries
- * This is done by sorting the masks of a depended block. A depended block is
- * identified by gluing ANDs or CLE. The sorting order toggles after every
- * block. Of course entries in scope of a mask must change their location with
- * it.
- */
-static int gfar_optimize_filer_masks(struct filer_table *tab)
-{
- struct filer_table *temp_table;
- struct gfar_mask_entry *mask_table;
-
- u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
- s32 ret = 0;
-
- /* We need a copy of the filer table because
- * we want to change its order
- */
- temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
- if (temp_table == NULL)
- return -ENOMEM;
-
- mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
-
- if (mask_table == NULL) {
- ret = -ENOMEM;
- goto end;
- }
-
- and_index = gfar_generate_mask_table(mask_table, tab);
-
- gfar_sort_mask_table(mask_table, temp_table, and_index);
-
- /* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says
- */
- for (i = 0; i < and_index; i++) {
- size = mask_table[i].end - mask_table[i].start + 1;
- gfar_copy_filer_entries(&(tab->fe[j]),
- &(temp_table->fe[mask_table[i].start]), size);
- j += size;
- }
-
- /* And finally we just have to check for duplicated masks and drop the
- * second ones
- */
- for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- previous_mask = i++;
- break;
- }
- }
- for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
- /* Two identical ones found!
- * So drop the second one!
- */
- gfar_trim_filer_entries(i, i, tab);
- } else
- /* Not identical! */
- previous_mask = i;
- }
- }
-
- kfree(mask_table);
-end: kfree(temp_table);
- return ret;
-}
-
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
struct filer_table *tab)
@@ -1583,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
return -EBUSY;
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
- i++)
+ for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
- for (; i < MAX_FILER_IDX - 1; i++)
+ for (; i < MAX_FILER_IDX; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
* because that's what people expect
@@ -1621,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
{
struct ethtool_flow_spec_container *j;
struct filer_table *tab;
- s32 i = 0;
s32 ret = 0;
/* So index is set to zero, too! */
@@ -1646,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
}
}
- i = tab->index;
-
- /* Optimizations to save entries */
- gfar_cluster_filer(tab);
- gfar_optimize_filer_masks(tab);
-
- pr_debug("\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
-
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
if (ret == -EBUSY) {
@@ -1722,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
}
process:
+ priv->rx_list.count++;
ret = gfar_process_filer_changes(priv);
if (ret)
goto clean_list;
- priv->rx_list.count++;
return ret;
clean_list:
+ priv->rx_list.count--;
list_del(&temp->list);
clean_mem:
kfree(temp);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 982fdcdc795b..b5b2925103ec 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
static inline bool fm10k_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2f70a9b152bd..830466c49987 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
static inline bool igb_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9aa6104e34ea..ae21e0b06c3a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
static inline bool ixgbe_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde9cb01..1d7b00b038a2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
static inline bool ixgbevf_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1f2e..d9884fd15b45 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -299,6 +301,7 @@
/* Coalescing */
#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 100
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
u64 tx_bytes;
};
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+ struct hrtimer tx_done_timer;
+ bool timer_scheduled;
+ /* Tasklet for egress finalization */
+ struct tasklet_struct tx_done_tasklet;
+};
+
struct mvpp2_port {
u8 id;
@@ -679,6 +690,9 @@ struct mvpp2_port {
u32 pending_cause_rx;
struct napi_struct napi;
+ /* Per-CPU port control */
+ struct mvpp2_port_pcpu __percpu *pcpu;
+
/* Flags */
unsigned long flags;
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
/* Array of transmitted skb */
struct sk_buff **tx_skb;
+ /* Array of transmitted buffers' physical addresses */
+ dma_addr_t *tx_buffs;
+
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
/* Occupied buffers indicator */
atomic_t in_use;
int in_use_thresh;
-
- spinlock_t lock;
};
struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
}
static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct mvpp2_tx_desc *tx_desc)
{
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+ if (skb)
+ txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+ tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
atomic_set(&bm_pool->in_use, 0);
- spin_lock_init(&bm_pool->lock);
return 0;
}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
int pkt_size)
{
- unsigned long flags = 0;
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
return NULL;
}
- spin_lock_irqsave(&new_pool->lock, flags);
-
if (new_pool->type == MVPP2_BM_FREE)
new_pool->type = type;
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, pkts_num);
- /* We need to undo the bufs_add() allocations */
- spin_unlock_irqrestore(&new_pool->lock, flags);
return NULL;
}
}
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
- spin_unlock_irqrestore(&new_pool->lock, flags);
-
return new_pool;
}
/* Initialize pools for swf */
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
- unsigned long flags = 0;
int rxq;
if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_long)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_long->lock, flags);
port->pool_long->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_long->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_short)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_short->lock, flags);
port->pool_short->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_short->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
(MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
}
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
rxq->time_coal = usec;
}
-/* Set threshold for TX_DONE pkts coalescing */
-static void mvpp2_tx_done_pkts_coal_set(void *arg)
-{
- struct mvpp2_port *port = arg;
- int queue;
- u32 val;
-
- for (queue = 0; queue < txq_number; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
- val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
- MVPP2_TRANSMITTED_THRESH_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
- }
-}
-
/* Free Tx queue skbuffs */
static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
int i;
for (i = 0; i < num; i++) {
- struct mvpp2_tx_desc *tx_desc = txq->descs +
- txq_pcpu->txq_get_index;
+ dma_addr_t buf_phys_addr =
+ txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
if (!skb)
continue;
- dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
+ dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
u32 cause)
{
- int queue = fls(cause >> 16) - 1;
+ int queue = fls(cause) - 1;
return port->txqs[queue];
}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
netif_tx_wake_queue(nq);
}
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int tx_todo = 0;
+
+ while (cause) {
+ txq = mvpp2_get_tx_queue(port, cause);
+ if (!txq)
+ break;
+
+ txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+ if (txq_pcpu->count) {
+ mvpp2_txq_done(port, txq, txq_pcpu);
+ tx_todo += txq_pcpu->count;
+ }
+
+ cause &= ~(1 << txq->log_id);
+ }
+ return tx_todo;
+}
+
/* Rx/Tx queue initialization/cleanup methods */
/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
sizeof(*txq_pcpu->tx_skb),
GFP_KERNEL);
- if (!txq_pcpu->tx_skb) {
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
- return -ENOMEM;
- }
+ if (!txq_pcpu->tx_skb)
+ goto error;
+
+ txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!txq_pcpu->tx_buffs)
+ goto error;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
}
return 0;
+
+error:
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
+ }
+
+ dma_free_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ return -ENOMEM;
}
/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
}
if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
goto err_cleanup;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
return 0;
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
}
}
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+ ktime_t interval;
+
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ hrtimer_start(&port_pcpu->tx_done_timer, interval,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ unsigned int tx_todo, cause;
+
+ if (!netif_running(dev))
+ return;
+ port_pcpu->timer_scheduled = false;
+
+ /* Process all the Tx queues */
+ cause = (1 << txq_number) - 1;
+ tx_todo = mvpp2_tx_done(port, cause);
+
+ /* Set the timer in case not all the packets were processed */
+ if (tx_todo)
+ mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu,
+ tx_done_timer);
+
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+ return HRTIMER_NORESTART;
+}
+
/* Main RX/TX processing routines */
/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
tx_desc->command = MVPP2_TXD_L_DESC;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
}
}
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
dev_kfree_skb_any(skb);
}
+ /* Finalize TX processing */
+ if (txq_pcpu->count >= txq->done_pkts_coal)
+ mvpp2_txq_done(port, txq, txq_pcpu);
+
+ /* Set the timer in case not all frags were processed */
+ if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+ mvpp2_timer_set(port_pcpu);
+ }
+
return NETDEV_TX_OK;
}
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
netdev_err(dev, "tx fifo underrun error\n");
}
-static void mvpp2_txq_done_percpu(void *arg)
+static int mvpp2_poll(struct napi_struct *napi, int budget)
{
- struct mvpp2_port *port = arg;
- u32 cause_rx_tx, cause_tx, cause_misc;
+ u32 cause_rx_tx, cause_rx, cause_misc;
+ int rx_done = 0;
+ struct mvpp2_port *port = netdev_priv(napi->dev);
/* Rx/Tx cause register
*
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
*/
cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- /* Release TX descriptors */
- if (cause_tx) {
- struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
- if (txq_pcpu->count)
- mvpp2_txq_done(port, txq, txq_pcpu);
- }
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
- u32 cause_rx_tx, cause_rx;
- int rx_done = 0;
- struct mvpp2_port *port = netdev_priv(napi->dev);
-
- on_each_cpu(mvpp2_txq_done_percpu, port, 1);
-
- cause_rx_tx = mvpp2_read(port->priv,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
/* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
static int mvpp2_stop(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
mvpp2_stop_dev(port);
mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
on_each_cpu(mvpp2_interrupts_mask, port, 1);
free_irq(port->irq, port);
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ port_pcpu->timer_scheduled = false;
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port);
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
txq->done_pkts_coal = c->tx_max_coalesced_frames;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
return 0;
}
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
{
struct device_node *phy_node;
struct mvpp2_port *port;
+ struct mvpp2_port_pcpu *port_pcpu;
struct net_device *dev;
struct resource *res;
const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int features;
int phy_mode;
int priv_common_regs_num = 2;
- int err, i;
+ int err, i, cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
mvpp2_port_power_up(port);
+ port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+ if (!port->pcpu) {
+ err = -ENOMEM;
+ goto err_free_txq_pcpu;
+ }
+
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ port_pcpu->timer_scheduled = false;
+
+ tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+ (unsigned long)dev);
+ }
+
netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
- goto err_free_txq_pcpu;
+ goto err_free_port_pcpu;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
priv->port_list[id] = port;
return 0;
+err_free_port_pcpu:
+ free_percpu(port->pcpu);
err_free_txq_pcpu:
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i;
unregister_netdev(port->dev);
+ free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529838de..06e3e1e54c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
/* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
err = set_caps(dev, set_ctx, set_sz);
query_ex:
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a00f15..09d2e16fd6b0 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
- err = dma_mapping_error(adapter->dev,
- sg_dma_address(&tx_ctl->sg));
- if (err) {
+ if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+ err = -ENOMEM;
sg_dma_address(&tx_ctl->sg) = 0;
goto err;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51faf18ae..f790f61ea78a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade03..2e7f9a2834be 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
+ free_netdev(rocker_port->dev);
}
kfree(rocker->ports);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e7f143..f0e4bb4e3ec5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
#define NSS_COMMON_CLK_DIV_MASK 0x7f
#define NSS_COMMON_CLK_SRC_CTRL 0x14
-#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x)
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
/* Mode is coded on 1 bit but is different depending on the MAC ID:
* MAC0: QSGMII=0 RGMII=1
* MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
/* Configure the clock src according to the mode */
regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
- val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index a8a730641bbb..bb1bb72121c0 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
struct list_head rxhook_list_head;
unsigned int rx_queue_id;
void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
- u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
struct napi_struct rx_napi;
struct napi_struct tx_napi;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 9749dfd78c43..4755838c6137 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
#define NETCP_NAPI_WEIGHT 64
#define NETCP_TX_TIMEOUT (5 * HZ)
+#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
#define NETCP_MAX_MCAST_ADDR 16
@@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
if (likely(fdq == 0)) {
unsigned int primary_buf_len;
/* Allocate a primary receive queue entry */
- buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+ buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
primary_buf_len = SKB_DATA_ALIGN(buf_len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (primary_buf_len <= PAGE_SIZE) {
- bufptr = netdev_alloc_frag(primary_buf_len);
- pad[1] = primary_buf_len;
- } else {
- bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
- GFP_DMA32 | __GFP_COLD);
- pad[1] = 0;
- }
+ bufptr = netdev_alloc_frag(primary_buf_len);
+ pad[1] = primary_buf_len;
if (unlikely(!bufptr)) {
- dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+ dev_warn_ratelimited(netcp->ndev_dev,
+ "Primary RX buffer alloc failed\n");
goto fail;
}
dma = dma_map_single(netcp->dev, bufptr, buf_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(netcp->dev, dma)))
+ goto fail;
+
pad[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
- page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail;
@@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
/* Map the linear buffer */
dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
- if (unlikely(!dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
return NULL;
}
@@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
knav_queue_disable_notify(netcp->rx_queue);
/* open Rx FDQs */
- for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
- netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+ ++i) {
snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
netcp->rx_queue_depths[0] = 128;
}
- ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
- netcp->rx_buffer_sizes,
- KNAV_DMA_FDQ_PER_CHAN);
- if (ret) {
- dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
- netcp->rx_buffer_sizes[0] = 1536;
- }
-
ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
if (ret < 0) {
dev_err(dev, "missing \"rx-pool\" parameter\n");
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2ffbf13471d0..216bfd350169 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
dev->type = ARPHRD_AX25;
/* Perform the low-level AX25 initialization. */
- if ((err = ax_open(ax->dev))) {
+ err = ax_open(ax->dev);
+ if (err)
goto out_free_netdev;
- }
- if (register_netdev(dev))
+ err = register_netdev(dev);
+ if (err)
goto out_free_buffers;
/* after register_netdev() - because else printk smashes the kernel */
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 3cc316cb7e6b..d8757bf9ad75 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
+ if (len < 0) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_length_errors++;
+ goto enqueue_again;
+ }
+
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
return;
}
+enqueue_again:
rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
if (rc) {
dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
ndev->mtu + ETH_HLEN);
- if (rc == -EINVAL) {
+ if (rc) {
dev_kfree_skb(skb);
goto err;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b2197b506acb..1e1fbb049ec6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
int err = 0;
+ int old_link;
mutex_lock(&phydev->lock);
@@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev);
break;
case PHY_RUNNING:
- /* Only register a CHANGE if we are
- * polling or ignoring interrupts
+ /* Only register a CHANGE if we are polling or ignoring
+ * interrupts and link changed since latest checking.
*/
- if (!phy_interrupt_is_valid(phydev))
- phydev->state = PHY_CHANGELINK;
+ if (!phy_interrupt_is_valid(phydev)) {
+ old_link = phydev->link;
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (old_link != phydev->link)
+ phydev->state = PHY_CHANGELINK;
+ }
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c0f6479e19d4..70b08958763a 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
}
/*
- * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
- * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
- * does send the pulses within this interval, the PHY will remained powered
- * down.
- *
- * This workaround will manually toggle the PHY on/off upon calls to read_status
- * in order to generate link test pulses if the link is down. If a link partner
- * is present, it will respond to the pulses, which will cause the ENERGYON bit
- * to be set and will cause the EDPD mode to be exited.
+ * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
+ * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
+ * unstable detection of plugging in Ethernet cable.
+ * This workaround disables Energy Detect Power-Down mode and waiting for
+ * response on link pulses to detect presence of plugged Ethernet cable.
+ * The Energy Detect Power-Down mode is enabled again in the end of procedure to
+ * save approximately 220 mW of power if cable is unplugged.
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
int err = genphy_read_status(phydev);
+ int i;
if (!phydev->link) {
/* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
if (rc < 0)
return rc;
- /* Sleep 64 ms to allow ~5 link test pulses to be sent */
- msleep(64);
+ /* Wait max 640 ms to detect energy */
+ for (i = 0; i < 64; i++) {
+ /* Sleep to allow link test pulses to be sent */
+ msleep(10);
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+ if (rc & MII_LAN83C185_ENERGYON)
+ break;
+ }
/* Re-enable EDPD */
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
+ .read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9d15566521a7..fa8f5046afe9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp);
static void init_ppp_file(struct ppp_file *pf, int kind);
-static void ppp_shutdown_interface(struct ppp *ppp);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_long_read(&file->f_count) < 2) {
ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
/* Create a new ppp unit */
if (get_user(unit, p))
break;
- ppp = ppp_create_interface(net, unit, &err);
+ ppp = ppp_create_interface(net, unit, file, &err);
if (!ppp)
break;
file->private_data = &ppp->file;
- ppp->owner = file;
err = -EFAULT;
if (put_user(ppp->file.index, p))
break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
static __net_exit void ppp_exit_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
+ struct ppp *ppp;
+ LIST_HEAD(list);
+ int id;
+
+ rtnl_lock();
+ idr_for_each_entry(&pn->units_idr, ppp, id)
+ unregister_netdevice_queue(ppp->dev, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
idr_destroy(&pn->units_idr);
}
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
return 0;
}
+static void ppp_dev_uninit(struct net_device *dev)
+{
+ struct ppp *ppp = netdev_priv(dev);
+ struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+ ppp_lock(ppp);
+ ppp->closing = 1;
+ ppp_unlock(ppp);
+
+ mutex_lock(&pn->all_ppp_mutex);
+ unit_put(&pn->units_idr, ppp->file.index);
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ ppp->owner = NULL;
+
+ ppp->file.dead = 1;
+ wake_up_interruptible(&ppp->file.rwait);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
+ .ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
-static struct ppp *
-ppp_create_interface(struct net *net, int unit, int *retp)
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp)
{
struct ppp *ppp;
struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+ ppp->owner = file;
for (i = 0; i < NUM_NP; ++i)
ppp->npmode[i] = NPMODE_PASS;
INIT_LIST_HEAD(&ppp->channels);
@@ -2776,34 +2810,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
}
/*
- * Take down a ppp interface unit - called when the owning file
- * (the one that created the unit) is closed or detached.
- */
-static void ppp_shutdown_interface(struct ppp *ppp)
-{
- struct ppp_net *pn;
-
- pn = ppp_pernet(ppp->ppp_net);
- mutex_lock(&pn->all_ppp_mutex);
-
- /* This will call dev_close() for us. */
- ppp_lock(ppp);
- if (!ppp->closing) {
- ppp->closing = 1;
- ppp_unlock(ppp);
- unregister_netdev(ppp->dev);
- unit_put(&pn->units_idr, ppp->file.index);
- } else
- ppp_unlock(ppp);
-
- ppp->file.dead = 1;
- ppp->owner = NULL;
- wake_up_interruptible(&ppp->file.rwait);
-
- mutex_unlock(&pn->all_ppp_mutex);
-}
-
-/*
* Free the memory used by a ppp unit. This is only called once
* there are no channels connected to the unit and no file structs
* that reference the unit.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9d43460ce3c7..64a60afbe50c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -785,6 +785,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
/* 4. Gobi 1000 devices */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7fbca37a1adf..237f8e5e493d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Do we support "hardware" checksums? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
- dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (csum)
- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7193b7304fdd..848ea6a399f2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
chan->netdev->base_addr = chan->cosa->datareg;
chan->netdev->irq = chan->cosa->irq;
chan->netdev->dma = chan->cosa->dma;
- if (register_hdlc_device(chan->netdev)) {
+ err = register_hdlc_device(chan->netdev);
+ if (err) {
netdev_warn(chan->netdev,
"register_hdlc_device() failed\n");
free_netdev(chan->netdev);
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 25d1cbd34306..b2f0d245bcf3 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
switch (phy->rev) {
case 6:
case 5:
- if (sprom->fem.ghz5.extpa_gain == 3)
+ if (sprom->fem.ghz2.extpa_gain == 3)
return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
/* fall through */
case 4:
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5000bfcded61..5514ad6d4e54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->scan_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
- if (iwl_mvm_scan_total_iterations(params) == 0)
+ if (iwl_mvm_scan_total_iterations(params) == 1)
cmd->ooc_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
else
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 6203c4ad9bba..9e144e71da0b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE |
CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ mdelay(1);
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ }
mdelay(5);
}
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
if (ret >= 0)
return 0;
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ msleep(1);
+
for (iter = 0; iter < 10; iter++) {
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
do {
ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0)
- return 0;
+ if (ret >= 0) {
+ ret = 0;
+ goto out;
+ }
usleep_range(200, 1000);
t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
IWL_ERR(trans, "Couldn't prepare the card\n");
+out:
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b86c2135de3..607acb53c847 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr) {
- if (txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ if (txq->wd_timeout) {
+ /*
+ * If the TXQ is active, then set the timer, if not,
+ * set the timer in remainder so that the timer will
+ * be armed with the right value when the station will
+ * wake up.
+ */
+ if (!txq->frozen)
+ mod_timer(&txq->stuck_timer,
+ jiffies + txq->wd_timeout);
+ else
+ txq->frozen_expiry_remainder = txq->wd_timeout;
+ }
IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
iwl_trans_pcie_ref(trans);
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index b6cc9ff47fc2..1c6788aecc62 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
u32 len;
u32 num_blocks;
+ const u8 *fw;
const struct firmware *fw_entry = NULL;
u32 block_size = dev->tx_blk_size;
int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status;
}
+ /* Copy firmware into DMA-accessible memory */
+ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
len = fw_entry->size;
if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
- status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
+ status = rsi_copy_to_card(common, fw, len, num_blocks);
+ kfree(fw);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 1106ce76707e..30c2cf7fa93b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status;
}
+ /* Copy firmware into DMA-accessible memory */
fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
len = fw_entry->size;
if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
status = rsi_copy_to_card(common, fw, len, num_blocks);
+ kfree(fw);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3b3a88b53b11..585d0883c7e5 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+ struct rtl_tcb_desc tcb_desc;
- if (skb)
- rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
+ if (skb) {
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+ rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+ }
}
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 1017f02d7bf7..7bf88d9dcdc3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e190fc15..28577a31549d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
atomic_dec(&queue->inflight_packets);
+
+ /* Wake the dealloc thread _after_ decrementing inflight_packets so
+ * that if kthread_stop() has already been called, the dealloc thread
+ * does not wait forever with nothing to wake it.
+ */
+ wake_up(&queue->dealloc_wq);
}
int xenvif_schedulable(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7d50711476fe..3f44b522b831 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *gop)
+ struct gnttab_map_grant_ref *gop,
+ unsigned int frag_overflow,
+ struct sk_buff *nskb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
pending_ring_idx_t index;
- unsigned int nr_slots, frag_overflow = 0;
+ unsigned int nr_slots;
- /* At this point shinfo->nr_frags is in fact the number of
- * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
- */
- if (shinfo->nr_frags > MAX_SKB_FRAGS) {
- frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
- BUG_ON(frag_overflow > MAX_SKB_FRAGS);
- shinfo->nr_frags = MAX_SKB_FRAGS;
- }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
}
if (frag_overflow) {
- struct sk_buff *nskb = xenvif_alloc_skb(0);
- if (unlikely(nskb == NULL)) {
- if (net_ratelimit())
- netdev_err(queue->vif->dev,
- "Can't allocate the frag_list skb.\n");
- return NULL;
- }
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
- struct sk_buff *skb;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+ struct sk_buff *skb, *nskb;
int ret;
+ unsigned int frag_overflow;
while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
+ skb_shinfo(skb)->nr_frags = ret;
+ if (data_len < txreq.size)
+ skb_shinfo(skb)->nr_frags++;
+ /* At this point shinfo->nr_frags is in fact the number of
+ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+ */
+ frag_overflow = 0;
+ nskb = NULL;
+ if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+ nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ kfree_skb(skb);
+ xenvif_tx_err(queue, &txreq, idx);
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ break;
+ }
+ }
+
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
+ kfree_skb(nskb);
break;
}
}
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
(*copy_ops)++;
- skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
- skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->pending_cons++;
- request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
- if (request_gop == NULL) {
- kfree_skb(skb);
- xenvif_tx_err(queue, &txreq, idx);
- break;
- }
- gop = request_gop;
+ gop = xenvif_get_requests(queue, skb, txfrags, gop,
+ frag_overflow, nskb);
__skb_queue_tail(&queue->tx_queue, skb);
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
smp_wmb();
queue->dealloc_prod++;
} while (ubuf);
- wake_up(&queue->dealloc_wq);
spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success))
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 23435f2a5486..2e2530743831 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
ntb->dev.bus = &ntb_bus;
ntb->dev.parent = &ntb->pdev->dev;
ntb->dev.release = ntb_dev_release;
- dev_set_name(&ntb->dev, pci_name(ntb->pdev));
+ dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
ntb->ctx = NULL;
ntb->ctx_ops = NULL;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index efe3ad4122f2..1c6386d5f79c 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -142,10 +142,11 @@ struct ntb_transport_qp {
void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len);
+ struct list_head rx_post_q;
struct list_head rx_pend_q;
struct list_head rx_free_q;
- spinlock_t ntb_rx_pend_q_lock;
- spinlock_t ntb_rx_free_q_lock;
+ /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+ spinlock_t ntb_rx_q_lock;
void *rx_buff;
unsigned int rx_index;
unsigned int rx_max_entry;
@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
bool link_is_up;
struct delayed_work link_work;
struct work_struct link_cleanup;
+
+ struct dentry *debugfs_node_dir;
};
enum {
@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
char *buf;
ssize_t ret, out_offset, out_count;
+ qp = filp->private_data;
+
+ if (!qp || !qp->link_is_up)
+ return 0;
+
out_count = 1000;
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- qp = filp->private_data;
out_offset = 0;
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"NTB QP stats\n");
@@ -534,6 +541,27 @@ out:
return entry;
}
+static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
+ struct list_head *list,
+ struct list_head *to_list)
+{
+ struct ntb_queue_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+
+ if (list_empty(list)) {
+ entry = NULL;
+ } else {
+ entry = list_first_entry(list, struct ntb_queue_entry, entry);
+ list_move_tail(&entry->entry, to_list);
+ }
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return entry;
+}
+
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
}
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
- unsigned int size)
+ resource_size_t size)
{
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
- unsigned int xlat_size, buff_size;
+ size_t xlat_size, buff_size;
int rc;
+ if (!size)
+ return -EINVAL;
+
xlat_size = round_up(size, mw->xlat_align_size);
buff_size = round_up(size, mw->xlat_align);
@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
if (!mw->virt_addr) {
mw->xlat_size = 0;
mw->buff_size = 0;
- dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+ dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
buff_size);
return -ENOMEM;
}
@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
if (qp->event_handler)
qp->event_handler(qp->cb_data, qp->link_is_up);
+
+ tasklet_schedule(&qp->rxc_db_work);
} else if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
- if (nt_debugfs_dir) {
+ if (nt->debugfs_node_dir) {
char debugfs_name[4];
snprintf(debugfs_name, 4, "qp%d", qp_num);
qp->debugfs_dir = debugfs_create_dir(debugfs_name,
- nt_debugfs_dir);
+ nt->debugfs_node_dir);
qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
- spin_lock_init(&qp->ntb_rx_pend_q_lock);
- spin_lock_init(&qp->ntb_rx_free_q_lock);
+ spin_lock_init(&qp->ntb_rx_q_lock);
spin_lock_init(&qp->ntb_tx_free_q_lock);
+ INIT_LIST_HEAD(&qp->rx_post_q);
INIT_LIST_HEAD(&qp->rx_pend_q);
INIT_LIST_HEAD(&qp->rx_free_q);
INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
goto err2;
}
+ if (nt_debugfs_dir) {
+ nt->debugfs_node_dir =
+ debugfs_create_dir(pci_name(ndev->pdev),
+ nt_debugfs_dir);
+ }
+
for (i = 0; i < qp_count; i++) {
rc = ntb_transport_init_queue(nt, i);
if (rc)
@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
kfree(nt);
}
-static void ntb_rx_copy_callback(void *data)
+static void ntb_complete_rxc(struct ntb_transport_qp *qp)
{
- struct ntb_queue_entry *entry = data;
- struct ntb_transport_qp *qp = entry->qp;
- void *cb_data = entry->cb_data;
- unsigned int len = entry->len;
- struct ntb_payload_header *hdr = entry->rx_hdr;
+ struct ntb_queue_entry *entry;
+ void *cb_data;
+ unsigned int len;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+
+ while (!list_empty(&qp->rx_post_q)) {
+ entry = list_first_entry(&qp->rx_post_q,
+ struct ntb_queue_entry, entry);
+ if (!(entry->flags & DESC_DONE_FLAG))
+ break;
+
+ entry->rx_hdr->flags = 0;
+ iowrite32(entry->index, &qp->rx_info->entry);
- hdr->flags = 0;
+ cb_data = entry->cb_data;
+ len = entry->len;
- iowrite32(entry->index, &qp->rx_info->entry);
+ list_move_tail(&entry->entry, &qp->rx_free_q);
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+ spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
- if (qp->rx_handler && qp->client_ready)
- qp->rx_handler(qp, qp->cb_data, cb_data, len);
+ if (qp->rx_handler && qp->client_ready)
+ qp->rx_handler(qp, qp->cb_data, cb_data, len);
+
+ spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+ }
+
+ spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+}
+
+static void ntb_rx_copy_callback(void *data)
+{
+ struct ntb_queue_entry *entry = data;
+
+ entry->flags |= DESC_DONE_FLAG;
+
+ ntb_complete_rxc(entry->qp);
}
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
ntb_rx_copy_callback(entry);
}
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
- size_t len)
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
{
struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp;
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
- size_t pay_off, buff_off;
+ size_t pay_off, buff_off, len;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
- entry->len = len;
+ len = entry->len;
if (!chan)
goto err;
@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
struct ntb_payload_header *hdr;
struct ntb_queue_entry *entry;
void *offset;
- int rc;
offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
return -EIO;
}
- entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
if (!entry) {
dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
qp->rx_err_no_buf++;
-
- rc = -ENOMEM;
- goto err;
+ return -EAGAIN;
}
+ entry->rx_hdr = hdr;
+ entry->index = qp->rx_index;
+
if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev,
"receive buffer overflow! Wanted %d got %d\n",
hdr->len, entry->len);
qp->rx_err_oflow++;
- rc = -EIO;
- goto err;
- }
+ entry->len = -EIO;
+ entry->flags |= DESC_DONE_FLAG;
- dev_dbg(&qp->ndev->pdev->dev,
- "RX OK index %u ver %u size %d into buf size %d\n",
- qp->rx_index, hdr->ver, hdr->len, entry->len);
+ ntb_complete_rxc(qp);
+ } else {
+ dev_dbg(&qp->ndev->pdev->dev,
+ "RX OK index %u ver %u size %d into buf size %d\n",
+ qp->rx_index, hdr->ver, hdr->len, entry->len);
- qp->rx_bytes += hdr->len;
- qp->rx_pkts++;
+ qp->rx_bytes += hdr->len;
+ qp->rx_pkts++;
- entry->index = qp->rx_index;
- entry->rx_hdr = hdr;
+ entry->len = hdr->len;
- ntb_async_rx(entry, offset, hdr->len);
+ ntb_async_rx(entry, offset);
+ }
qp->rx_index++;
qp->rx_index %= qp->rx_max_entry;
return 0;
-
-err:
- /* FIXME: if this syncrhonous update of the rx_index gets ahead of
- * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
- * scenarios:
- *
- * 1) The peer might miss this update, but observe the update
- * from the memcpy completion callback. In this case, the buffer will
- * not be freed on the peer to be reused for a different packet. The
- * successful rx of a later packet would clear the condition, but the
- * condition could persist if several rx fail in a row.
- *
- * 2) The peer may observe this update before the asyncrhonous copy of
- * prior packets is completed. The peer may overwrite the buffers of
- * the prior packets before they are copied.
- *
- * 3) Both: the peer may observe the update, and then observe the index
- * decrement by the asynchronous completion callback. Who knows what
- * badness that will cause.
- */
- hdr->flags = 0;
- iowrite32(qp->rx_index, &qp->rx_info->entry);
-
- return rc;
}
static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
break;
}
- if (qp->dma_chan)
+ if (i && qp->dma_chan)
dma_async_issue_pending(qp->dma_chan);
if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
goto err1;
entry->qp = qp;
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q);
}
@@ -1634,7 +1674,7 @@ err2:
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
err1:
- while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
if (qp->dma_chan)
dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
- struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev;
struct ntb_queue_entry *entry;
u64 qp_bit;
@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
qp->tx_handler = NULL;
qp->event_handler = NULL;
- while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
- while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
- dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
+ dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
+ kfree(entry);
+ }
+
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
+ dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
kfree(entry);
}
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
- nt->qp_bitmap_free |= qp_bit;
+ qp->transport->qp_bitmap_free |= qp_bit;
dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
}
@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
if (!qp || qp->client_ready)
return NULL;
- entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
if (!entry)
return NULL;
buf = entry->cb_data;
*len = entry->len;
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
return buf;
}
@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
if (!qp)
return -EINVAL;
- entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+ entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
if (!entry)
return -ENOMEM;
entry->cb_data = cb;
entry->buf = data;
entry->len = len;
+ entry->flags = 0;
+
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+ tasklet_schedule(&qp->rxc_db_work);
return 0;
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 73de4efcbe6e..944f50015ed0 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -2,7 +2,7 @@
# PCI configuration
#
config PCI_BUS_ADDR_T_64BIT
- def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+ def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
depends on PCI
config PCI_MSI
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index cefd636681b6..b978bbfe044c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
else if (type == PCI_EXP_TYPE_UPSTREAM ||
type == PCI_EXP_TYPE_DOWNSTREAM) {
parent = pci_upstream_bridge(pdev);
- if (!parent->has_secondary_link)
+
+ /*
+ * Usually there's an upstream device (Root Port or Switch
+ * Downstream Port), but we can't assume one exists.
+ */
+ if (parent && !parent->has_secondary_link)
pdev->has_secondary_link = 1;
}
}
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index e17c539e4f6f..2dad7e820ff0 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
}
+EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
static struct phy_ops sun4i_usb_phy_ops = {
.init = sun4i_usb_phy_init,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 3510b81db3fa..08020dc2c7c8 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,6 +28,8 @@
#include <linux/delay.h>
#include <linux/phy/omap_control_phy.h>
#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define PLL_STATUS 0x00000004
#define PLL_GO 0x00000008
@@ -52,6 +54,8 @@
#define PLL_LOCK 0x2
#define PLL_IDLE 0x1
+#define SATA_PLL_SOFT_RESET BIT(18)
+
/*
* This is an Empirical value that works, need to confirm the actual
* value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -82,6 +86,9 @@ struct ti_pipe3 {
struct clk *refclk;
struct clk *div_clk;
struct pipe3_dpll_map *dpll_map;
+ struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */
+ unsigned int dpll_reset_reg; /* reg. index within syscon */
+ bool sata_refclk_enabled;
};
static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -249,8 +256,11 @@ static int ti_pipe3_exit(struct phy *x)
u32 val;
unsigned long timeout;
- /* SATA DPLL can't be powered down due to Errata i783 */
- if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
+ /* If dpll_reset_syscon is not present we wont power down SATA DPLL
+ * due to Errata i783
+ */
+ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
+ !phy->dpll_reset_syscon)
return 0;
/* PCIe doesn't have internal DPLL */
@@ -276,6 +286,14 @@ static int ti_pipe3_exit(struct phy *x)
}
}
+ /* i783: SATA needs control bit toggle after PLL unlock */
+ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
+ regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+ SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
+ regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+ SATA_PLL_SOFT_RESET, 0);
+ }
+
ti_pipe3_disable_clocks(phy);
return 0;
@@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev)
}
} else {
phy->wkupclk = ERR_PTR(-ENODEV);
+ phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
+ "syscon-pllreset");
+ if (IS_ERR(phy->dpll_reset_syscon)) {
+ dev_info(&pdev->dev,
+ "can't get syscon-pllreset, sata dpll won't idle\n");
+ phy->dpll_reset_syscon = NULL;
+ } else {
+ if (of_property_read_u32_index(node,
+ "syscon-pllreset", 1,
+ &phy->dpll_reset_reg)) {
+ dev_err(&pdev->dev,
+ "couldn't get pllreset reg. offset\n");
+ return -EINVAL;
+ }
+ }
}
if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
@@ -402,10 +435,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, phy);
pm_runtime_enable(phy->dev);
- /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */
- if (of_device_is_compatible(node, "ti,phy-pipe3-sata"))
- if (!IS_ERR(phy->refclk))
+
+ /*
+ * Prevent auto-disable of refclk for SATA PHY due to Errata i783
+ */
+ if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+ if (!IS_ERR(phy->refclk)) {
clk_prepare_enable(phy->refclk);
+ phy->sata_refclk_enabled = true;
+ }
+ }
generic_phy = devm_phy_create(phy->dev, NULL, &ops);
if (IS_ERR(generic_phy))
@@ -472,8 +511,18 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
{
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
- if (!IS_ERR(phy->refclk))
+ if (!IS_ERR(phy->refclk)) {
clk_disable_unprepare(phy->refclk);
+ /*
+ * SATA refclk needs an additional disable as we left it
+ * on in probe to avoid Errata i783
+ */
+ if (phy->sata_refclk_enabled) {
+ clk_disable_unprepare(phy->refclk);
+ phy->sata_refclk_enabled = false;
+ }
+ }
+
if (!IS_ERR(phy->div_clk))
clk_disable_unprepare(phy->div_clk);
}
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cb1329919527..3271cd1abe7c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,6 @@
menuconfig CHROME_PLATFORMS
bool "Platform support for Chrome hardware"
- depends on X86 || ARM
---help---
Say Y here to get to see options for platform support for
various Chromebooks and Chromeboxes. This option alone does
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 26270c351624..ce129e595b55 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.17"
+#define DRV_VERSION "1.6.0.17a"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 155b286f1a9d..25436cd2860c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
unsigned long ptr;
struct fc_rport_priv *rdata;
spinlock_t *io_lock = NULL;
+ int io_lock_acquired = 0;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
spin_lock_irqsave(io_lock, flags);
/* initialize rest of io_req */
+ io_lock_acquired = 1;
io_req->port_id = rport->port_id;
io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@ out:
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
/* if only we issued IO, will we have the io lock */
- if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
+ if (io_lock_acquired)
spin_unlock_irqrestore(io_lock, flags);
atomic_dec(&fnic->in_flight);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1b3a09473452..30f9ef0c0d4f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
if (resp) {
resp(sp, fp, arg);
res = true;
- } else if (!IS_ERR(fp)) {
- fc_frame_free(fp);
}
spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
* If new exch resp handler is valid then call that
* first.
*/
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
fc_exch_release(ep);
return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
fc_exch_hold(ep);
if (!rc)
fc_exch_delete(ep);
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
if (has_rec)
fc_exch_timer_set(ep, ep->r_a_tov);
fc_exch_release(ep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c6795941b45d..2d5909c4685c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1039,11 +1039,26 @@ restart:
fc_fcp_pkt_hold(fsp);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
- if (!fc_fcp_lock_pkt(fsp)) {
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (!(fsp->state & FC_SRB_COMPL)) {
+ fsp->state |= FC_SRB_COMPL;
+ /*
+ * TODO: dropping scsi_pkt_lock and then reacquiring
+ * again around fc_fcp_cleanup_cmd() is required,
+ * since fc_fcp_cleanup_cmd() calls into
+ * fc_seq_set_resp() and that func preempts cpu using
+ * schedule. May be schedule and related code should be
+ * removed instead of unlocking here to avoid scheduling
+ * while atomic bug.
+ */
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
fc_fcp_cleanup_cmd(fsp, error);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
fc_io_compl(fsp);
- fc_fcp_unlock_pkt(fsp);
}
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_pkt_release(fsp);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8053f24f0349..98d9bb6ff725 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- unsigned long flags;
del_timer_sync(&conn->transport_timer);
+ mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->frwd_lock);
- /*
- * Block until all in-progress commands for this connection
- * time out or fail.
- */
- for (;;) {
- spin_lock_irqsave(session->host->host_lock, flags);
- if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
- spin_unlock_irqrestore(session->host->host_lock, flags);
- break;
- }
- spin_unlock_irqrestore(session->host->host_lock, flags);
- msleep_interruptible(500);
- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
- "host_busy %d host_failed %d\n",
- atomic_read(&session->host->host_busy),
- session->host->host_failed);
- /*
- * force eh_abort() to unblock
- */
- wake_up(&conn->ehwait);
- }
-
/* flush queued up work because we free the connection below */
iscsi_suspend_tx(conn);
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
if (session->leadconn == conn)
session->leadconn = NULL;
spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn);
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index cfadccef045c..6457a8a0db9c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -26,7 +26,6 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
-#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
}
}
EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- * formatted sense data buffer
- * @buf: Where to build sense data
- * @info: 64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
- if ((buf[0] & 0x7f) == 0x72) {
- u8 *ucp, len;
-
- len = buf[7];
- ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
- if (!ucp) {
- buf[7] = len + 0xa;
- ucp = buf + 8 + len;
- }
- ucp[0] = 0;
- ucp[1] = 0xa;
- ucp[2] = 0x80; /* Valid bit */
- ucp[3] = 0;
- put_unaligned_be64(info, &ucp[4]);
- } else if ((buf[0] & 0x7f) == 0x70) {
- buf[0] |= 0x80;
- put_unaligned_be64(info, &buf[3]);
- }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 9e43ae1d2163..e4b799837948 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
struct scsi_device *sdev = to_scsi_device(dev);
- int err;
+ int err = 0;
- err = blk_pre_runtime_suspend(sdev->request_queue);
- if (err)
- return err;
- if (pm && pm->runtime_suspend)
+ if (pm && pm->runtime_suspend) {
+ err = blk_pre_runtime_suspend(sdev->request_queue);
+ if (err)
+ return err;
err = pm->runtime_suspend(dev);
- blk_post_runtime_suspend(sdev->request_queue, err);
-
+ blk_post_runtime_suspend(sdev->request_queue, err);
+ }
return err;
}
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
- blk_pre_runtime_resume(sdev->request_queue);
- if (pm && pm->runtime_resume)
+ if (pm && pm->runtime_resume) {
+ blk_pre_runtime_resume(sdev->request_queue);
err = pm->runtime_resume(dev);
- blk_post_runtime_resume(sdev->request_queue, err);
-
+ blk_post_runtime_resume(sdev->request_queue, err);
+ }
return err;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3b2fcb4fada0..a20da8c25b4f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
max_xfer = sdkp->max_xfer_blocks;
max_xfer <<= ilog2(sdp->sector_size) - 9;
- max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
- max_xfer);
- blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+ sdkp->disk->queue->limits.max_sectors =
+ min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp);
kfree(buffer);
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index bfa42620a3f6..940781183fac 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev)
if (index == das1801hc || index == das1802hc)
return board;
index = das1801hc;
+ break;
default:
dev_err(dev->class_dev,
"Board model: probe returned 0x%x (unknown, please report)\n",
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 9c934e6d2ea1..c61add46b426 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,7 +40,7 @@
#define DEBUG_SUBSYSTEM D_OTHER
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
#include "../include/obd_support.h"
#include "../include/lustre_debug.h"
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b0c8e235b982..69bdc8f29b59 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
- if (conf->assoc) {
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
+ priv->op_mode != NL80211_IFTYPE_AP) {
+ if (conf->assoc && conf->beacon_rate) {
CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
conf->sync_tsf);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index cd77a064c772..fd092909a457 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
- if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
- } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c2e9fea90b4a..860e84046177 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
if (!strcmp(t->tf_ops->name, fo->name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
+ mutex_unlock(&g_tf_lock);
+ /*
+ * Wait for any outstanding fabric se_deve_entry->rcu_head
+ * callbacks to complete post kfree_rcu(), before allowing
+ * fabric driver unload of TFO->module to proceed.
+ */
+ rcu_barrier();
kfree(t);
- break;
+ return;
}
}
mutex_unlock(&g_tf_lock);
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 62ea4e8e70a8..be9cefc07407 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
list_for_each_entry(tb, &backend_list, list) {
if (tb->ops == ops) {
list_del(&tb->list);
+ mutex_unlock(&backend_mutex);
+ /*
+ * Wait for any outstanding backend driver ->rcu_head
+ * callbacks to complete post TBO->free_device() ->
+ * call_rcu(), before allowing backend driver module
+ * unload of target_backend_ops->owner to proceed.
+ */
+ rcu_barrier();
kfree(tb);
- break;
+ return;
}
}
mutex_unlock(&backend_mutex);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b5ba1ec3c354..f87d4cef6d39 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
+ struct scsi_lun slun;
unsigned char *buf;
u32 lun_count = 0, offset = 8;
-
- if (cmd->data_length < 16) {
- pr_warn("REPORT LUNS allocation length %u too small\n",
- cmd->data_length);
- return TCM_INVALID_CDB_FIELD;
- }
+ __be32 len;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (cmd->data_length && !buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
@@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
+ if (!sess)
goto done;
- }
+
nacl = sess->se_node_acl;
rcu_read_lock();
@@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC2-R20 7.19.
*/
lun_count++;
- if ((offset + 8) > cmd->data_length)
+ if (offset >= cmd->data_length)
continue;
- int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ int_to_scsilun(deve->mapped_lun, &slun);
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
offset += 8;
}
rcu_read_unlock();
@@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC3 r07, page 159.
*/
done:
- lun_count *= 8;
- buf[0] = ((lun_count >> 24) & 0xff);
- buf[1] = ((lun_count >> 16) & 0xff);
- buf[2] = ((lun_count >> 8) & 0xff);
- buf[3] = (lun_count & 0xff);
- transport_kunmap_data_sg(cmd);
+ /*
+ * If no LUNs are accessible, report virtual LUN 0.
+ */
+ if (lun_count == 0) {
+ int_to_scsilun(0, &slun);
+ if (cmd->data_length > 8)
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
+ lun_count = 1;
+ }
+
+ if (buf) {
+ len = cpu_to_be32(lun_count * 8);
+ memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
return 0;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6509c61b9648..620dcd405ff6 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -68,7 +68,7 @@ struct power_table {
* registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
- unsigned int cpufreq_val;
+ unsigned int clipped_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
/**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
struct cpufreq_cooling_device *cpufreq_dev;
- mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
return get_level(cpufreq_dev, freq);
}
}
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- unsigned long max_freq = 0;
+ unsigned long clipped_freq;
struct cpufreq_cooling_device *cpufreq_dev;
- switch (event) {
+ if (event != CPUFREQ_ADJUST)
+ return NOTIFY_DONE;
- case CPUFREQ_ADJUST:
- mutex_lock(&cooling_cpufreq_lock);
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (!cpumask_test_cpu(policy->cpu,
- &cpufreq_dev->allowed_cpus))
- continue;
+ mutex_lock(&cooling_list_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+ continue;
- max_freq = cpufreq_dev->cpufreq_val;
+ /*
+ * policy->max is the maximum allowed frequency defined by user
+ * and clipped_freq is the maximum that thermal constraints
+ * allow.
+ *
+ * If clipped_freq is lower than policy->max, then we need to
+ * readjust policy->max.
+ *
+ * But, if clipped_freq is greater than policy->max, we don't
+ * need to do anything.
+ */
+ clipped_freq = cpufreq_dev->clipped_freq;
- if (policy->max != max_freq)
- cpufreq_verify_within_limits(policy, 0,
- max_freq);
- }
- mutex_unlock(&cooling_cpufreq_lock);
+ if (policy->max > clipped_freq)
+ cpufreq_verify_within_limits(policy, 0, clipped_freq);
break;
- default:
- return NOTIFY_DONE;
}
+ mutex_unlock(&cooling_list_lock);
return NOTIFY_OK;
}
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
clip_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_state = state;
- cpufreq_device->cpufreq_val = clip_freq;
+ cpufreq_device->clipped_freq = clip_freq;
cpufreq_update_policy(cpu);
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
- cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+ cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
+ list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+ mutex_unlock(&cooling_list_lock);
+
/* Register the notifier for first cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ if (!cpufreq_dev_count++)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
return;
cpufreq_dev = cdev->devdata;
- mutex_lock(&cooling_cpufreq_lock);
- list_del(&cpufreq_dev->node);
/* Unregister the notifier for the last cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ mutex_lock(&cooling_cpufreq_lock);
+ if (!--cpufreq_dev_count)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
+
+ mutex_lock(&cooling_list_lock);
+ list_del(&cpufreq_dev->node);
+ mutex_unlock(&cooling_list_lock);
+
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index d5dd357ba57c..b49f97c734d0 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
static struct platform_driver hisi_thermal_driver = {
.driver = {
.name = "hisi_thermal",
- .owner = THIS_MODULE,
.pm = &hisi_thermal_pm_ops,
.of_match_table = of_hisi_thermal_match,
},
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 4672250b329f..7006860f2f36 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz,
struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
u32 *req_power, *max_power, *granted_power, *extra_actor_power;
- u32 total_req_power, max_allocatable_power;
+ u32 *weighted_req_power;
+ u32 total_req_power, max_allocatable_power, total_weighted_req_power;
u32 total_granted_power, power_range;
int i, num_actors, total_weight, ret = 0;
int trip_max_desired_temperature = params->trip_max_desired_temperature;
@@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz,
}
/*
- * We need to allocate three arrays of the same size:
- * req_power, max_power and granted_power. They are going to
- * be needed until this function returns. Allocate them all
- * in one go to simplify the allocation and deallocation
- * logic.
+ * We need to allocate five arrays of the same size:
+ * req_power, max_power, granted_power, extra_actor_power and
+ * weighted_req_power. They are going to be needed until this
+ * function returns. Allocate them all in one go to simplify
+ * the allocation and deallocation logic.
*/
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
- req_power = devm_kcalloc(&tz->device, num_actors * 4,
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
+ req_power = devm_kcalloc(&tz->device, num_actors * 5,
sizeof(*req_power), GFP_KERNEL);
if (!req_power) {
ret = -ENOMEM;
@@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz,
max_power = &req_power[num_actors];
granted_power = &req_power[2 * num_actors];
extra_actor_power = &req_power[3 * num_actors];
+ weighted_req_power = &req_power[4 * num_actors];
i = 0;
+ total_weighted_req_power = 0;
total_req_power = 0;
max_allocatable_power = 0;
@@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz,
else
weight = instance->weight;
- req_power[i] = frac_to_int(weight * req_power[i]);
+ weighted_req_power[i] = frac_to_int(weight * req_power[i]);
if (power_actor_get_max_power(cdev, tz, &max_power[i]))
continue;
total_req_power += req_power[i];
max_allocatable_power += max_power[i];
+ total_weighted_req_power += weighted_req_power[i];
i++;
}
@@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz,
power_range = pid_controller(tz, current_temp, control_temp,
max_allocatable_power);
- divvy_up_power(req_power, max_power, num_actors, total_req_power,
- power_range, granted_power, extra_actor_power);
+ divvy_up_power(weighted_req_power, max_power, num_actors,
+ total_weighted_req_power, power_range, granted_power,
+ extra_actor_power);
total_granted_power = 0;
i = 0;
@@ -328,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
max_allocatable_power, current_temp,
(s32)control_temp - (s32)current_temp);
- devm_kfree(&tz->device, req_power);
+ kfree(req_power);
unlock:
mutex_unlock(&tz->lock);
@@ -420,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return -EINVAL;
}
- params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -462,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return 0;
free:
- devm_kfree(&tz->device, params);
+ kfree(params);
return ret;
}
static void power_allocator_unbind(struct thermal_zone_device *tz)
{
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
- devm_kfree(&tz->device, tz->governor_data);
+ kfree(tz->governor_data);
tz->governor_data = NULL;
}
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c8e35c1a43dc..e0da3865e060 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
config EXYNOS_THERMAL
tristate "Exynos thermal management unit driver"
- depends on OF
+ depends on THERMAL_OF
help
If you say yes here you get support for the TMU (Thermal Management
Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 531f4b179871..c96ff10b869e 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
static int exynos_tmu_probe(struct platform_device *pdev)
{
- struct exynos_tmu_platform_data *pdata;
struct exynos_tmu_data *data;
int ret;
@@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
if (ret)
goto err_sensor;
- pdata = data->pdata;
-
INIT_WORK(&data->irq_work, exynos_tmu_work);
data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1392,6 +1389,8 @@ err_clk_sec:
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
err_sensor:
+ if (!IS_ERR_OR_NULL(data->regulator))
+ regulator_disable(data->regulator);
thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
return ret;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 04659bfb888b..4ca211be4c0f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
return -ENODEV;
unbind:
+ device_remove_file(&tz->device, &pos->weight_attr);
device_remove_file(&tz->device, &pos->attr);
sysfs_remove_link(&tz->device.kobj, pos->name);
release_idr(&tz->idr, &tz->lock, pos->id);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 74fea4fa41b1..3ad48e1c0c57 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
},
};
-module_platform_driver(ci_hdrc_driver);
+static int __init ci_hdrc_platform_register(void)
+{
+ ci_hdrc_host_driver_init();
+ return platform_driver_register(&ci_hdrc_driver);
+}
+module_init(ci_hdrc_platform_register);
+
+static void __exit ci_hdrc_platform_unregister(void)
+{
+ platform_driver_unregister(&ci_hdrc_driver);
+}
+module_exit(ci_hdrc_platform_unregister);
MODULE_ALIAS("platform:ci_hdrc");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6cf87b8b13a8..7161439def19 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
rdrv->name = "host";
ci->roles[CI_ROLE_HOST] = rdrv;
+ return 0;
+}
+
+void ci_hdrc_host_driver_init(void)
+{
ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
-
- return 0;
}
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 5707bf379bfb..0f12f131bdd3 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -5,6 +5,7 @@
int ci_hdrc_host_init(struct ci_hdrc *ci);
void ci_hdrc_host_destroy(struct ci_hdrc *ci);
+void ci_hdrc_host_driver_init(void);
#else
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
}
+static void ci_hdrc_host_driver_init(void)
+{
+
+}
+
#endif
#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index f7f35a36c09a..6df9715a4bcd 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -699,6 +699,10 @@ static inline int hidg_get_minor(void)
int ret;
ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
+ if (ret >= HIDG_MINORS) {
+ ida_simple_remove(&hidg_ida, ret);
+ ret = -ENODEV;
+ }
return ret;
}
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 44173df27273..357f63f47b42 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = {
static inline int gprinter_get_minor(void)
{
- return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+ int ret;
+
+ ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+ if (ret >= PRINTER_MINORS) {
+ ida_simple_remove(&printer_ida, ret);
+ ret = -ENODEV;
+ }
+
+ return ret;
}
static inline void gprinter_put_minor(int minor)
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 6d3eb8b00a48..531861547253 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
factor = 1000;
} else {
ep_desc = &hs_epin_desc;
- factor = 125;
+ factor = 8000;
}
/* pre-compute some values for iso_complete() */
uac2->p_framesize = opts->p_ssize *
num_channels(opts->p_chmask);
rate = opts->p_srate * uac2->p_framesize;
- uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
+ uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
prm->max_psize);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index b04980cf6dc4..1efa61265d8d 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
/* The current hw dequeue pointer */
tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
deq_ptr_64 = tmp_32;
- tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1));
+ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
deq_ptr_64 |= ((u64)tmp_32 << 32);
/* we have the dma addr of next bd that will be fetched by hardware */
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 362ee8af5fce..89ed5e71a199 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -323,6 +323,7 @@ err4:
err3:
put_device(&udc->dev);
+ device_del(&gadget->dev);
err2:
put_device(&gadget->dev);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 3e442f77a2b9..9a8c936cd42c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
int size;
int i, j, num_ports;
- del_timer_sync(&xhci->cmd_timer);
+ if (timer_pending(&xhci->cmd_timer))
+ del_timer_sync(&xhci->cmd_timer);
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6a8fc52aed58..32f4d564494a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return 0;
/* offset in TRBs */
segment_offset = trb - seg->trbs;
- if (segment_offset > TRBS_PER_SEGMENT)
+ if (segment_offset >= TRBS_PER_SEGMENT)
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 19b85ee98a72..876423b8892c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
.driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
+ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
+ .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9c63897b3a56..d156545728c2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
- {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
{DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */
{DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 46179a0828eb..07d1ecd564f7 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
/* AT&T Direct IP LTE modems */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 658c34bb9076..1aaf89300621 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
int y;
int c = scr_readw((u16 *) vc->vc_pos);
+ ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+
if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
return;
- ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
if (vc->vc_cursor_type & 0x10)
fbcon_del_cursor_timer(info);
else
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d98de535e0f..f888561568d9 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -298,7 +298,7 @@ config FB_ARMCLCD
# Helper logic selected only by the ARM Versatile platform family.
config PLAT_VERSATILE_CLCD
- def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
+ def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
depends on ARM
depends on FB_ARMCLCD && FB=y
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index 928ee639c0c1..bf407b6ba15c 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
}
prev = port;
} while (of_node_cmp(port->name, "port") != 0);
+
+ of_node_put(ports);
}
return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
if (!port)
return NULL;
- np = of_get_next_parent(port);
+ np = of_get_parent(port);
for (i = 0; i < 2 && np; ++i) {
struct property *prop;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 86bd457d039d..50bce45e7f3d 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
goto err_free_dma;
}
- ret = clk_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
if (ret < 0) {
dev_err(dev, "failed to enable clock\n");
goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
misc_deregister(&priv->misc_dev);
err_disable_clk:
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 111c2d1911d3..b5102aa6090d 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
index = disp->native_mode;
ret = videomode_from_timings(disp, vm, index);
- if (ret)
- return ret;
display_timings_release(disp);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 60e2a1677563..c96944b59856 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -313,6 +313,7 @@ err_init_vq:
static void virtinput_remove(struct virtio_device *vdev)
{
struct virtio_input *vi = vdev->priv;
+ void *buf;
unsigned long flags;
spin_lock_irqsave(&vi->lock, flags);
@@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev)
spin_unlock_irqrestore(&vi->lock, flags);
input_unregister_device(vi->idev);
+ vdev->config->reset(vdev);
+ while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
+ kfree(buf);
vdev->config->del_vqs(vdev);
kfree(vi);
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fd933695f232..bf4a23c7c591 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
/*
- * We avoid multiple worker processes conflicting via the balloon mutex.
+ * As this is a work item it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
enum bp_state state = BP_DONE;
long credit;
- mutex_lock(&balloon_mutex);
do {
+ mutex_lock(&balloon_mutex);
+
credit = current_credit();
if (credit > 0) {
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
state = update_schedule(state);
-#ifndef CONFIG_PREEMPT
- if (need_resched())
- schedule();
-#endif
+ mutex_unlock(&balloon_mutex);
+
+ cond_resched();
+
} while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */
if (state == BP_EAGAIN)
schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
-
- mutex_unlock(&balloon_mutex);
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 67b9163db718..0dbb222daaf1 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
pr_debug("priv %p\n", priv);
+ mutex_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
list_del(&map->next);
gntdev_put_map(NULL /* already removed */, map);
}
WARN_ON(!list_empty(&priv->freeable_maps));
+ mutex_unlock(&priv->lock);
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9ad327238ba9..e30353575d5d 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
addrs);
- if (!rv)
+ if (!rv) {
vunmap(vaddr);
+ free_xenballooned_pages(node->nr_handles, node->hvm.pages);
+ }
else
WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
node->nr_handles);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index e9ace099162c..8a8202956576 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
/* Exclusive -> exclusive, nothing changed */
}
}
+
+ /* For exclusive extent, free its reserved bytes too */
+ if (nr_old_roots == 0 && nr_new_roots == 1 &&
+ cur_new_count == nr_new_roots)
+ qg->reserved -= num_bytes;
if (dirty)
qgroup_dirty(fs_info, qg);
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dc10c9dd36c1..ddd5e9471290 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode,
swap(cf, ci->i_prealloc_cap_flush);
cf->caps = flushing;
- cf->kick = false;
spin_lock(&mdsc->cap_dirty_lock);
list_del_init(&ci->i_dirty_item);
@@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
- struct ceph_inode_info *ci,
- bool kick_all)
+ struct ceph_inode_info *ci)
{
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
@@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
cf = rb_entry(n, struct ceph_cap_flush, i_node);
- if (cf->tid < first_tid)
- continue;
- if (kick_all || cf->kick)
+ if (cf->tid >= first_tid)
break;
}
if (!n) {
@@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
}
cf = rb_entry(n, struct ceph_cap_flush, i_node);
- cf->kick = false;
first_tid = cf->tid + 1;
@@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
{
struct ceph_inode_info *ci;
struct ceph_cap *cap;
- struct ceph_cap_flush *cf;
- struct rb_node *n;
dout("early_kick_flushing_caps mds%d\n", session->s_mds);
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
@@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
if ((cap->issued & ci->i_flushing_caps) !=
ci->i_flushing_caps) {
spin_unlock(&ci->i_ceph_lock);
- if (!__kick_flushing_caps(mdsc, session, ci, true))
+ if (!__kick_flushing_caps(mdsc, session, ci))
continue;
spin_lock(&ci->i_ceph_lock);
}
- for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
- cf->kick = true;
- }
-
spin_unlock(&ci->i_ceph_lock);
}
}
@@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
dout("kick_flushing_caps mds%d\n", session->s_mds);
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
- int delayed = __kick_flushing_caps(mdsc, session, ci, false);
+ int delayed = __kick_flushing_caps(mdsc, session, ci);
if (delayed) {
spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
@@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
spin_unlock(&ci->i_ceph_lock);
- delayed = __kick_flushing_caps(mdsc, session, ci, true);
+ delayed = __kick_flushing_caps(mdsc, session, ci);
if (delayed) {
spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 4347039ecc18..6706bde9ad1b 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
return 0;
spin_lock(&ctx->flc_lock);
- list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+ list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
++seen_fcntl;
if (seen_fcntl > num_fcntl_locks) {
err = -ENOSPC;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 860cc016e70d..2f2460d23a06 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
struct ceph_cap_flush {
u64 tid;
int caps;
- bool kick;
struct rb_node g_node; // global
union {
struct rb_node i_node; // inode
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c8ea15e73a5..9b5fe503f6cb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void)
inode_init_early();
}
-void __init vfs_caches_init(unsigned long mempages)
+void __init vfs_caches_init(void)
{
- unsigned long reserve;
-
- /* Base hash sizes on available memory, with a reserve equal to
- 150% of current kernel size */
-
- reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
- mempages -= reserve;
-
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
dcache_init();
inode_init();
- files_init(mempages);
+ files_init();
+ files_maxfiles_init();
mnt_init();
bdev_cache_init();
chrdev_init();
diff --git a/fs/file_table.c b/fs/file_table.c
index 7f9d407c7595..ad17e05ebf95 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -25,6 +25,7 @@
#include <linux/hardirq.h>
#include <linux/task_work.h>
#include <linux/ima.h>
+#include <linux/swap.h>
#include <linux/atomic.h>
@@ -308,19 +309,24 @@ void put_filp(struct file *file)
}
}
-void __init files_init(unsigned long mempages)
+void __init files_init(void)
{
- unsigned long n;
-
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+ percpu_counter_init(&nr_files, 0, GFP_KERNEL);
+}
- /*
- * One file with associated inode and dcache is very roughly 1K.
- * Per default don't use more than 10% of our memory for files.
- */
+/*
+ * One file with associated inode and dcache is very roughly 1K. Per default
+ * do not use more than 10% of our memory for files.
+ */
+void __init files_maxfiles_init(void)
+{
+ unsigned long n;
+ unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
+
+ memreserve = min(memreserve, totalram_pages - 1);
+ n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
- n = (mempages * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
- percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 80cc1b35d460..ebb5e37455a0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
err = -EINVAL;
if (old) {
- struct fuse_dev *fud = fuse_get_dev(old);
+ struct fuse_dev *fud = NULL;
+
+ /*
+ * Check against file->f_op because CUSE
+ * uses the same ioctl handler.
+ */
+ if (old->f_op == file->f_op &&
+ old->f_cred->user_ns == file->f_cred->user_ns)
+ fud = fuse_get_dev(old);
if (fud) {
mutex_lock(&fuse_mutex);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0cf74df68617..973c24ce59ad 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
if (!inode)
goto out_dentry;
+ if (creat_flags == HUGETLB_SHMFS_INODE)
+ inode->i_flags |= S_PRIVATE;
file = ERR_PTR(-ENOMEM);
if (hugetlb_reserve_pages(inode, 0,
diff --git a/fs/namei.c b/fs/namei.c
index fbbcf0993312..1c2105ed20c5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd)
return 0;
/* Allowed if parent directory not sticky and world-writable. */
- parent = nd->path.dentry->d_inode;
+ parent = nd->inode;
if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
return 0;
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 6904213a4363..ebf90e487c75 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
BUG_ON(!ls->ls_file);
if (nfsd4_layout_setlease(ls)) {
+ fput(ls->ls_file);
put_nfs4_file(fp);
kmem_cache_free(nfs4_layout_stateid_cache, ls);
return NULL;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 61dfb33f0559..95202719a1fd 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry)
queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
}
-static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
+static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
{
- if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
+ if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
return nfserr_bad_stateid;
return nfs_ok;
}
@@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
{
__be32 status;
- status = nfs4_check_fh(fhp, ols);
- if (status)
- return status;
status = nfsd4_check_openowner_confirmed(ols);
if (status)
return status;
@@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
status = nfserr_bad_stateid;
break;
}
+ if (status)
+ goto out;
+ status = nfs4_check_fh(fhp, s);
done:
if (!status && filpp)
@@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status)
return status;
- return nfs4_check_fh(current_fh, stp);
+ return nfs4_check_fh(current_fh, &stp->st_stid);
}
/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 54633858733a..75e0563c09d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
FATTR4_WORD0_RDATTR_ERROR)
#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
+#define WORD2_ABSENT_FS_ATTRS 0
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
static inline __be32
@@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
{ return 0; }
#endif
-static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
+static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
{
/* As per referral draft: */
if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
}
*bmval0 &= WORD0_ABSENT_FS_ATTRS;
*bmval1 &= WORD1_ABSENT_FS_ATTRS;
+ *bmval2 &= WORD2_ABSENT_FS_ATTRS;
return 0;
}
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
if (exp->ex_fslocs.migrated) {
- BUG_ON(bmval[2]);
- status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
+ status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
if (status)
goto out;
}
@@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
}
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
- bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+ if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
+ bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
err = security_inode_getsecctx(d_inode(dentry),
&context, &contextlen);
contextsupport = (err == 0);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 92e48c70f0f0..39ddcaf0918f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
unsigned int flags)
{
struct fsnotify_mark *lmark, *mark;
+ LIST_HEAD(to_free);
+ /*
+ * We have to be really careful here. Anytime we drop mark_mutex, e.g.
+ * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
+ * to_free list so we have to use mark_mutex even when accessing that
+ * list. And freeing mark requires us to drop mark_mutex. So we can
+ * reliably free only the first mark in the list. That's why we first
+ * move marks to free to to_free list in one go and then free marks in
+ * to_free list one by one.
+ */
mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
- if (mark->flags & flags) {
- fsnotify_get_mark(mark);
- fsnotify_destroy_mark_locked(mark, group);
- fsnotify_put_mark(mark);
- }
+ if (mark->flags & flags)
+ list_move(&mark->g_list, &to_free);
}
mutex_unlock(&group->mark_mutex);
+
+ while (1) {
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+ if (list_empty(&to_free)) {
+ mutex_unlock(&group->mark_mutex);
+ break;
+ }
+ mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
+ fsnotify_get_mark(mark);
+ fsnotify_destroy_mark_locked(mark, group);
+ mutex_unlock(&group->mark_mutex);
+ fsnotify_put_mark(mark);
+ }
}
/*
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1a35c6139656..0f5fd9db8194 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
u64 s = i_size_read(inode);
- sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
+ sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
(do_div(s, osb->s_clustersize) >> 9);
ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
ret = blkdev_issue_zeroout(osb->sb->s_bdev,
- p_cpos << (osb->s_clustersize_bits - 9),
+ (u64)p_cpos << (osb->s_clustersize_bits - 9),
zero_len_head >> 9, GFP_NOFS, false);
if (ret < 0)
mlog_errno(ret);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8b23aa2f52dd..23157e40dd74 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
osb->dc_work_sequence = osb->dc_wake_sequence;
processed = osb->blocked_lock_count;
- while (processed) {
- BUG_ON(list_empty(&osb->blocked_lock_list));
-
+ /*
+ * blocked lock processing in this loop might call iput which can
+ * remove items off osb->blocked_lock_list. Downconvert up to
+ * 'processed' number of locks, but stop short if we had some
+ * removed in ocfs2_mark_lockres_freeing when downconverting.
+ */
+ while (processed && !list_empty(&osb->blocked_lock_list)) {
lockres = list_entry(osb->blocked_lock_list.next,
struct ocfs2_lock_res, l_blocked_list);
list_del_init(&lockres->l_blocked_list);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7e412ad74836..270221fcef42 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
* Other callers might not initialize the si_lsb field,
* so check explicitly for the right codes here.
*/
- if (kinfo->si_code == BUS_MCEERR_AR ||
- kinfo->si_code == BUS_MCEERR_AO)
+ if (kinfo->si_signo == SIGBUS &&
+ (kinfo->si_code == BUS_MCEERR_AR ||
+ kinfo->si_code == BUS_MCEERR_AO))
err |= __put_user((short) kinfo->si_addr_lsb,
&uinfo->ssi_addr_lsb);
#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 48db6a56975f..5aa519711e0b 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -691,7 +691,7 @@ struct drm_vblank_crtc {
struct timer_list disable_timer; /* delayed disable timer */
/* vblank counter, protected by dev->vblank_time_lock for writes */
- unsigned long count;
+ u32 count;
/* vblank timestamps, protected by dev->vblank_time_lock for writes */
struct timeval time[DRM_VBLANKTIME_RBSIZE];
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 57ca8cc383a6..3b4d8a4a23fb 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -743,8 +743,6 @@ struct drm_connector {
uint8_t num_h_tile, num_v_tile;
uint8_t tile_h_loc, tile_v_loc;
uint16_t tile_h_size, tile_v_size;
-
- struct list_head destroy_list;
};
/**
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c8fc187061de..918aa68b5199 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs {
* @get_modes: get mode list for this connector
* @mode_valid: is this mode valid on the given connector? (optional)
* @best_encoder: return the preferred encoder for this connector
+ * @atomic_best_encoder: atomic version of @best_encoder
*
* The helper operations are called by the mid-layer CRTC helper.
*/
@@ -176,6 +177,8 @@ struct drm_connector_helper_funcs {
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+ struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
+ struct drm_connector_state *connector_state);
};
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 799050198323..53c53c459b15 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -348,6 +348,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
}
/**
+ * drm_eld_sad - Get ELD SAD structures.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
+{
+ unsigned int ver, mnl;
+
+ ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
+ if (ver != 2 && ver != 31)
+ return NULL;
+
+ mnl = drm_eld_mnl(eld);
+ if (mnl > 16)
+ return NULL;
+
+ return eld + DRM_ELD_CEA_SAD(mnl, 0);
+}
+
+/**
* drm_eld_sad_count - Get ELD SAD count.
* @eld: pointer to an eld memory structure with sad_count set
*/
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 45c39a37f924..8bc073d297db 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -172,6 +172,7 @@
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6c78956aa470..d2992bfa1706 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -385,8 +385,6 @@ enum {
SATA_SSP = 0x06, /* Software Settings Preservation */
SATA_DEVSLP = 0x09, /* Device Sleep */
- SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
-
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
ATA_SET_MAX_PASSWD = 0x01,
@@ -530,8 +528,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -720,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
return false;
}
-static inline bool ata_id_has_sense_reporting(const u16 *id)
-{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
- return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
-}
-
-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
-{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
- return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
-}
-
/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cc008c338f5a..84b783f277f7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -55,7 +55,8 @@ struct vm_fault;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
-extern void __init files_init(unsigned long);
+extern void __init files_init(void);
+extern void __init files_maxfiles_init(void);
extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
@@ -2245,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
-extern void __init vfs_caches_init(unsigned long);
+extern void __init vfs_caches_init(void);
extern struct kmem_cache *names_cachep;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d9a366d24e3b..6240063bdcac 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -344,7 +344,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
- struct dmar_domain **domains; /* ptr to domains */
+ struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 92188b0225bb..51744bcf74ee 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
void *vcpu_info);
+extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
#endif
/* Handling of unhandled and spurious interrupts: */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2e872f92dbac..bf6f117fcf4d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1003,6 +1003,34 @@ static inline int page_mapped(struct page *page)
}
/*
+ * Return true only if the page has been allocated with
+ * ALLOC_NO_WATERMARKS and the low watermark was not
+ * met implying that the system is under some pressure.
+ */
+static inline bool page_is_pfmemalloc(struct page *page)
+{
+ /*
+ * Page index cannot be this large so this must be
+ * a pfmemalloc page.
+ */
+ return page->index == -1UL;
+}
+
+/*
+ * Only to be called by the page allocator on a freshly allocated
+ * page.
+ */
+static inline void set_page_pfmemalloc(struct page *page)
+{
+ page->index = -1UL;
+}
+
+static inline void clear_page_pfmemalloc(struct page *page)
+{
+ page->index = 0;
+}
+
+/*
* Different kinds of faults, as returned by handle_mm_fault().
* Used to decide whether a process gets delivered SIGBUS or
* just gets major/minor fault counters bumped up.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0038ac7466fd..15549578d559 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -63,15 +63,6 @@ struct page {
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* sl[aou]b first free object */
- bool pfmemalloc; /* If set by the page allocator,
- * ALLOC_NO_WATERMARKS was set
- * and the low watermark was not
- * met implying that the system
- * is under some pressure. The
- * caller should try ensure
- * this page is only used to
- * free other pages.
- */
};
union {
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f34e040b34e9..41c93844fb1d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
1 << PG_private | 1 << PG_private_2 | \
1 << PG_writeback | 1 << PG_reserved | \
1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
- 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
+ 1 << PG_unevictable | __PG_MLOCKED | \
__PG_COMPOUND_LOCK)
/*
* Flags checked when a page is prepped for return by the page allocator.
- * Pages being prepped should not have any flags set. It they are set,
+ * Pages being prepped should not have these flags set. It they are set,
* there has been a kernel bug or struct page corruption.
+ *
+ * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
+ * alloc-free cycle to prevent from reusing the page.
*/
-#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
+#define PAGE_FLAGS_CHECK_AT_PREP \
+ (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
#define PAGE_FLAGS_PRIVATE \
(1 << PG_private | 1 << PG_private_2)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d6cdd6e87d53..9b88536487e6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/*
- * Propagate page->pfmemalloc to the skb if we can. The problem is
- * that not all callers have unique ownership of the page. If
- * pfmemalloc is set, we check the mapping as a mapping implies
- * page->index is set (index and pfmemalloc share space).
- * If it's a valid mapping, we cannot use page->pfmemalloc but we
- * do not lose pfmemalloc information as the pages would not be
- * allocated using __GFP_MEMALLOC.
+ * Propagate page pfmemalloc to the skb if we can. The problem is
+ * that not all callers have unique ownership of the page but rely
+ * on page_is_pfmemalloc doing the right thing(tm).
*/
frag->page.p = page;
frag->page_offset = off;
skb_frag_size_set(frag, size);
page = compound_head(page);
- if (page->pfmemalloc && !page->mapping)
+ if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
@@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void)
static inline void skb_propagate_pfmemalloc(struct page *page,
struct sk_buff *skb)
{
- if (page && page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
@@ -2884,11 +2880,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
*
* PHY drivers may accept clones of transmitted packets for
* timestamping via their phy_driver.txtstamp method. These drivers
- * must call this function to return the skb back to the stack, with
- * or without a timestamp.
+ * must call this function to return the skb back to the stack with a
+ * timestamp.
*
* @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps, may be NULL if not available
+ * @hwtstamps: hardware time stamps
*
*/
void skb_complete_tx_timestamp(struct sk_buff *skb,
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 45534da57759..644bdc61c387 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -74,8 +74,6 @@ enum rc_filter_type {
* @input_dev: the input child device used to communicate events to userspace
* @driver_type: specifies if protocol decoding is done in hardware or software
* @idle: used to keep track of RX state
- * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
- * wakeup protocols is the set of all raw encoders
* @allowed_protocols: bitmask with the supported RC_BIT_* protocols
* @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
* @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -136,7 +134,6 @@ struct rc_dev {
struct input_dev *input_dev;
enum rc_driver_type driver_type;
bool idle;
- bool encode_wakeup;
u64 allowed_protocols;
u64 enabled_protocols;
u64 allowed_wakeup_protocols;
@@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
#define US_TO_NS(usec) ((usec) * 1000)
#define MS_TO_US(msec) ((msec) * 1000)
#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
-#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
void ir_raw_event_handle(struct rc_dev *dev);
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
int ir_raw_event_store_with_filter(struct rc_dev *dev,
struct ir_raw_event *ev);
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
-int ir_raw_encode_scancode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max);
static inline void ir_raw_event_reset(struct rc_dev *dev)
{
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 22a44c2f5963..c192e1b46cdc 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -139,6 +139,7 @@ enum vb2_io_modes {
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
* @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
* @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
+ * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
* in a hardware operation
* @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
@@ -152,6 +153,7 @@ enum vb2_buffer_state {
VB2_BUF_STATE_PREPARING,
VB2_BUF_STATE_PREPARED,
VB2_BUF_STATE_QUEUED,
+ VB2_BUF_STATE_REQUEUEING,
VB2_BUF_STATE_ACTIVE,
VB2_BUF_STATE_DONE,
VB2_BUF_STATE_ERROR,
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 4942710ef720..8d1d7fa67ec4 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out);
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-extern void scsi_set_sense_information(u8 *buf, u64 info);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 1ab2813273cd..8cb3a7ecd6f8 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -51,11 +51,6 @@ struct tegra_smmu_swgroup {
unsigned int reg;
};
-struct tegra_smmu_ops {
- void (*flush_dcache)(struct page *page, unsigned long offset,
- size_t size);
-};
-
struct tegra_smmu_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
@@ -66,9 +61,8 @@ struct tegra_smmu_soc {
bool supports_round_robin_arbitration;
bool supports_request_limit;
+ unsigned int num_tlb_lines;
unsigned int num_asids;
-
- const struct tegra_smmu_ops *ops;
};
struct tegra_mc;
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 865a141b118b..427bc41df3ae 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
int io_ops_count;
};
+#ifdef CONFIG_SND_SOC_TOPOLOGY
+
/* gets a pointer to data from the firmware block header */
static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
{
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
const struct snd_soc_tplg_widget_events *events, int num_events,
u16 event_type);
+#else
+
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
+ u32 index)
+{
+ return 0;
+}
+
+#endif
+
#endif
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index efe3443572ba..413417f3707b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,6 +319,7 @@
#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
+#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
/* MSI-X Table entry format */
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 785c5ca0994b..247c50bd60f0 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -18,6 +18,12 @@
#include <linux/types.h>
#include <sound/asound.h>
+#ifndef __KERNEL__
+#error This API is an early revision and not enabled in the current
+#error kernel release, it will be enabled in a future kernel version
+#error with incompatible changes to what is here.
+#endif
+
/*
* Maximum number of channels topology kcontrol can represent.
*/
@@ -77,7 +83,7 @@
#define SND_SOC_TPLG_NUM_TEXTS 16
/* ABI version */
-#define SND_SOC_TPLG_ABI_VERSION 0x2
+#define SND_SOC_TPLG_ABI_VERSION 0x3
/* Max size of TLV data */
#define SND_SOC_TPLG_TLV_SIZE 32
@@ -97,7 +103,8 @@
#define SND_SOC_TPLG_TYPE_PCM 7
#define SND_SOC_TPLG_TYPE_MANIFEST 8
#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
-#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK
+#define SND_SOC_TPLG_TYPE_PDATA 10
+#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
/* vendor block IDs - please add new vendor types to end */
#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -137,11 +144,19 @@ struct snd_soc_tplg_private {
/*
* Kcontrol TLV data.
*/
+struct snd_soc_tplg_tlv_dbscale {
+ __le32 min;
+ __le32 step;
+ __le32 mute;
+} __attribute__((packed));
+
struct snd_soc_tplg_ctl_tlv {
- __le32 size; /* in bytes aligned to 4 */
- __le32 numid; /* control element numeric identification */
- __le32 count; /* number of elem in data array */
- __le32 data[SND_SOC_TPLG_TLV_SIZE];
+ __le32 size; /* in bytes of this structure */
+ __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */
+ union {
+ __le32 data[SND_SOC_TPLG_TLV_SIZE];
+ struct snd_soc_tplg_tlv_dbscale scale;
+ };
} __attribute__((packed));
/*
@@ -155,9 +170,11 @@ struct snd_soc_tplg_channel {
} __attribute__((packed));
/*
- * Kcontrol Operations IDs
+ * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
+ * Kcontrol ops need get/put/info.
+ * Bytes ext ops need get/put.
*/
-struct snd_soc_tplg_kcontrol_ops_id {
+struct snd_soc_tplg_io_ops {
__le32 get;
__le32 put;
__le32 info;
@@ -171,8 +188,8 @@ struct snd_soc_tplg_ctl_hdr {
__le32 type;
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
__le32 access;
- struct snd_soc_tplg_kcontrol_ops_id ops;
- __le32 tlv_size; /* non zero means control has TLV data */
+ struct snd_soc_tplg_io_ops ops;
+ struct snd_soc_tplg_ctl_tlv tlv;
} __attribute__((packed));
/*
@@ -238,6 +255,7 @@ struct snd_soc_tplg_manifest {
__le32 graph_elems; /* number of graph elements */
__le32 dai_elems; /* number of DAI elements */
__le32 dai_link_elems; /* number of DAI link elements */
+ struct snd_soc_tplg_private priv;
} __attribute__((packed));
/*
@@ -259,7 +277,6 @@ struct snd_soc_tplg_mixer_control {
__le32 invert;
__le32 num_channels;
struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
- struct snd_soc_tplg_ctl_tlv tlv;
struct snd_soc_tplg_private priv;
} __attribute__((packed));
@@ -303,6 +320,7 @@ struct snd_soc_tplg_bytes_control {
__le32 mask;
__le32 base;
__le32 num_regs;
+ struct snd_soc_tplg_io_ops ext_ops;
struct snd_soc_tplg_private priv;
} __attribute__((packed));
@@ -347,6 +365,7 @@ struct snd_soc_tplg_dapm_widget {
__le32 reg; /* negative reg = no direct dapm */
__le32 shift; /* bits to shift */
__le32 mask; /* non-shifted mask */
+ __le32 subseq; /* sort within widget type */
__u32 invert; /* invert the power bit */
__u32 ignore_suspend; /* kept enabled over suspend */
__u16 event_flags;
diff --git a/init/main.c b/init/main.c
index c5d5626289ce..56506553d4d8 100644
--- a/init/main.c
+++ b/init/main.c
@@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void)
key_init();
security_init();
dbg_late_init();
- vfs_caches_init(totalram_pages);
+ vfs_caches_init();
signals_init();
/* rootfs populating might need page-writeback */
page_writeback_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a24ba9fe5bb8..161a1807e6ef 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
if (!leaf)
return -ENOMEM;
INIT_LIST_HEAD(&leaf->msg_list);
- info->qsize += sizeof(*leaf);
}
leaf->priority = msg->m_type;
rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
"lazy leaf delete!\n");
rb_erase(&leaf->rb_node, &info->msg_tree);
if (info->node_cache) {
- info->qsize -= sizeof(*leaf);
kfree(leaf);
} else {
info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
if (list_empty(&leaf->msg_list)) {
rb_erase(&leaf->rb_node, &info->msg_tree);
if (info->node_cache) {
- info->qsize -= sizeof(*leaf);
kfree(leaf);
} else {
info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
/* Save our speculative allocation into the cache */
INIT_LIST_HEAD(&new_leaf->msg_list);
info->node_cache = new_leaf;
- info->qsize += sizeof(*new_leaf);
new_leaf = NULL;
} else {
kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
/* Save our speculative allocation into the cache */
INIT_LIST_HEAD(&new_leaf->msg_list);
info->node_cache = new_leaf;
- info->qsize += sizeof(*new_leaf);
} else {
kfree(new_leaf);
}
diff --git a/ipc/sem.c b/ipc/sem.c
index bc3d530cb23e..b471e5a3863d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
}
/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
+
+/*
* Wait until all currently ongoing simple ops have completed.
* Caller must own sem_perm.lock.
* New simple ops cannot start, because simple ops first check
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
+ ipc_smp_acquire__after_spin_is_unlocked();
}
/*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
/* Then check that the global lock is free */
if (!spin_is_locked(&sma->sem_perm.lock)) {
/*
- * The ipc object lock check must be visible on all
- * cores before rechecking the complex count. Otherwise
- * we can race with another thread that does:
+ * We need a memory barrier with acquire semantics,
+ * otherwise we can race with another thread that does:
* complex_count++;
* spin_unlock(sem_perm.lock);
*/
- smp_rmb();
+ ipc_smp_acquire__after_spin_is_unlocked();
/*
* Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
struct sem_undo, list_proc);
- if (&un->list_proc == &ulp->list_proc)
- semid = -1;
- else
- semid = un->semid;
+ if (&un->list_proc == &ulp->list_proc) {
+ /*
+ * We must wait for freeary() before freeing this ulp,
+ * in case we raced with last sem_undo. There is a small
+ * possibility where we exit while freeary() didn't
+ * finish unlocking sem_undo_list.
+ */
+ spin_unlock_wait(&ulp->lock);
+ rcu_read_unlock();
+ break;
+ }
+ spin_lock(&ulp->lock);
+ semid = un->semid;
+ spin_unlock(&ulp->lock);
+ /* exit_sem raced with IPC_RMID, nothing to do */
if (semid == -1) {
rcu_read_unlock();
- break;
+ continue;
}
- sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
/* exit_sem raced with IPC_RMID, nothing to do */
if (IS_ERR(sma)) {
rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
- spin_lock(&ulp->lock);
+ /* we are the last process using this ulp, acquiring ulp->lock
+ * isn't required. Besides that, we are also protected against
+ * IPC_RMID as we hold sma->sem_perm lock now
+ */
list_del_rcu(&un->list_proc);
- spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/ipc/shm.c b/ipc/shm.c
index 06e5cf2fe019..4aef24d91b63 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
if ((shmflg & SHM_NORESERVE) &&
sysctl_overcommit_memory != OVERCOMMIT_NEVER)
acctflag = VM_NORESERVE;
- file = shmem_file_setup(name, size, acctflag);
+ file = shmem_kernel_file_setup(name, size, acctflag);
}
error = PTR_ERR(file);
if (IS_ERR(file))
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ee14e3a35a29..f0acff0f66c9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
- update_nodemasks_hier(cs, &cs->mems_allowed);
+ update_nodemasks_hier(cs, &trialcs->mems_allowed);
done:
return retval;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3dae3419b99..e6feb5114134 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
perf_pmu_disable(event->pmu);
- event->tstamp_running += tstamp - event->tstamp_stopped;
-
perf_set_shadow_time(event, ctx, tstamp);
perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
goto out;
}
+ event->tstamp_running += tstamp - event->tstamp_stopped;
+
if (!is_software_event(event))
cpuctx->active_oncpu++;
if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
perf_event_for_each_child(sibling, func);
}
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
- struct perf_event_context *ctx = event->ctx;
- int ret = 0, active;
+struct period_event {
+ struct perf_event *event;
u64 value;
+};
- if (!is_sampling_event(event))
- return -EINVAL;
-
- if (copy_from_user(&value, arg, sizeof(value)))
- return -EFAULT;
-
- if (!value)
- return -EINVAL;
+static int __perf_event_period(void *info)
+{
+ struct period_event *pe = info;
+ struct perf_event *event = pe->event;
+ struct perf_event_context *ctx = event->ctx;
+ u64 value = pe->value;
+ bool active;
- raw_spin_lock_irq(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
if (event->attr.freq) {
- if (value > sysctl_perf_event_sample_rate) {
- ret = -EINVAL;
- goto unlock;
- }
-
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
event->pmu->start(event, PERF_EF_RELOAD);
perf_pmu_enable(ctx->pmu);
}
+ raw_spin_unlock(&ctx->lock);
-unlock:
+ return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+ struct period_event pe = { .event = event, };
+ struct perf_event_context *ctx = event->ctx;
+ struct task_struct *task;
+ u64 value;
+
+ if (!is_sampling_event(event))
+ return -EINVAL;
+
+ if (copy_from_user(&value, arg, sizeof(value)))
+ return -EFAULT;
+
+ if (!value)
+ return -EINVAL;
+
+ if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+ return -EINVAL;
+
+ task = ctx->task;
+ pe.value = value;
+
+ if (!task) {
+ cpu_function_call(event->cpu, __perf_event_period, &pe);
+ return 0;
+ }
+
+retry:
+ if (!task_function_call(task, __perf_event_period, &pe))
+ return 0;
+
+ raw_spin_lock_irq(&ctx->lock);
+ if (ctx->is_active) {
+ raw_spin_unlock_irq(&ctx->lock);
+ task = ctx->task;
+ goto retry;
+ }
+
+ __perf_event_period(&pe);
raw_spin_unlock_irq(&ctx->lock);
- return ret;
+ return 0;
}
static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
* to user-space before waking everybody up.
*/
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+ /* only the parent has fasync state */
+ if (event->parent)
+ event = event->parent;
+ return &event->fasync;
+}
+
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
- kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
else
perf_event_output(event, data, regs);
- if (event->fasync && event->pending_kill) {
+ if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b2be01b1aa9d..c8aa3f75bc4d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
rb->aux_priv = NULL;
}
- for (pg = 0; pg < rb->aux_nr_pages; pg++)
- rb_free_aux_page(rb, pg);
+ if (rb->aux_nr_pages) {
+ for (pg = 0; pg < rb->aux_nr_pages; pg++)
+ rb_free_aux_page(rb, pg);
- kfree(rb->aux_pages);
- rb->aux_nr_pages = 0;
+ kfree(rb->aux_pages);
+ rb->aux_nr_pages = 0;
+ }
}
void rb_free_aux(struct ring_buffer *rb)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 27f4332c7f84..ae216824e8ca 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -985,6 +985,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
}
/**
+ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
+ * @data: Pointer to interrupt specific data
+ * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
+ *
+ * Conditional, as the underlying parent chip might not implement it.
+ */
+int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
+{
+ data = data->parent_data;
+
+ if (data->chip->irq_set_type)
+ return data->chip->irq_set_type(data, type);
+
+ return -ENOSYS;
+}
+
+/**
* irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
* @data: Pointer to interrupt specific data
*
@@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
if (data->chip && data->chip->irq_retrigger)
return data->chip->irq_retrigger(data);
- return -ENOSYS;
+ return 0;
}
/**
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c448fe..fdea0bee7b5a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
{
return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
}
+EXPORT_SYMBOL_GPL(kthread_should_park);
/**
* kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
{
__kthread_parkme(to_kthread(current));
}
+EXPORT_SYMBOL_GPL(kthread_parkme);
static int kthread(void *_create)
{
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
if (kthread)
__kthread_unpark(k, kthread);
}
+EXPORT_SYMBOL_GPL(kthread_unpark);
/**
* kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
}
return ret;
}
+EXPORT_SYMBOL_GPL(kthread_park);
/**
* kthread_stop - stop a thread created by kthread_create().
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 04ab18151cc8..df19ae4debd0 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,6 +4,7 @@
#include <linux/hash.h>
#include <linux/bootmem.h>
+#include <linux/debug_locks.h>
/*
* Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
struct pv_node *node;
+ u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
/*
* We must not unlock if SLOW, because in that case we must first
* unhash. Otherwise it would be possible to have multiple @lock
* entries, which would be BAD.
*/
- if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+ if (likely(lockval == _Q_LOCKED_VAL))
return;
+ if (unlikely(lockval != _Q_SLOW_VAL)) {
+ if (debug_locks_silent)
+ return;
+ WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+ return;
+ }
+
/*
* Since the above failed to release, this must be the SLOW path.
* Therefore start by looking up the blocked node and unhashing it.
diff --git a/kernel/module.c b/kernel/module.c
index 4d2b82e610e2..b86b7bf1be38 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name,
}
EXPORT_SYMBOL_GPL(find_symbol);
-/* Search for module by name: must hold module_mutex. */
+/*
+ * Search for module by name: must hold module_mutex (or preempt disabled
+ * for read-only access).
+ */
static struct module *find_module_all(const char *name, size_t len,
bool even_unformed)
{
struct module *mod;
- module_assert_mutex();
+ module_assert_mutex_or_preempt();
list_for_each_entry(mod, &modules, list) {
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len,
struct module *find_module(const char *name)
{
+ module_assert_mutex();
return find_module_all(name, strlen(name), false);
}
EXPORT_SYMBOL_GPL(find_module);
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..0f6bbbe77b46 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
* Other callers might not initialize the si_lsb field,
* so check explicitly for the right codes here.
*/
- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+ if (from->si_signo == SIGBUS &&
+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif
#ifdef SEGV_BNDERR
- err |= __put_user(from->si_lower, &to->si_lower);
- err |= __put_user(from->si_upper, &to->si_upper);
+ if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
+ err |= __put_user(from->si_lower, &to->si_lower);
+ err |= __put_user(from->si_upper, &to->si_upper);
+ }
#endif
break;
case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info;
+ siginfo_t info = {};
int ret = copy_siginfo_from_user32(&info, uinfo);
if (unlikely(ret))
return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info;
+ siginfo_t info = {};
if (copy_siginfo_from_user32(&info, uinfo))
return -EFAULT;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 5e097fa9faf7..84190f02b521 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
spin_unlock(&base->lock);
base = new_base;
spin_lock(&base->lock);
- timer->flags &= ~TIMER_BASEMASK;
- timer->flags |= base->cpu;
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | base->cpu);
}
}
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index df30632f0bef..ff19f66d3f7f 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
unsigned long align_mask = 0;
if (align_order > 0)
- align_mask = 0xffffffffffffffffl >> (64 - align_order);
+ align_mask = ~0ul >> (BITS_PER_LONG - align_order);
/* Sanity check */
if (unlikely(npages == 0)) {
diff --git a/mm/cma.h b/mm/cma.h
index 1132d733556d..17c75a4246c8 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
extern struct cma cma_areas[MAX_CMA_AREAS];
extern unsigned cma_area_count;
-static unsigned long cma_bitmap_maxno(struct cma *cma)
+static inline unsigned long cma_bitmap_maxno(struct cma *cma)
{
return cma->count >> cma->order_per_bit;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c107094f79ba..097c7a4bfbd9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
/* after clearing PageTail the gup refcount can be released */
smp_mb__after_atomic();
- /*
- * retain hwpoison flag of the poisoned tail page:
- * fix for the unsuitable process killed on Guest Machine(KVM)
- * by the memory-failure.
- */
- page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
+ page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
page_tail->flags |= (page->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6c513a63ea84..7b28e9cdf1c7 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -2,7 +2,7 @@
* This file contains shadow memory manipulation code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <[email protected]>
+ * Author: Andrey Ryabinin <[email protected]>
*
* Some of code borrowed from https://github.com/xairy/linux by
* Andrey Konovalov <[email protected]>
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 680ceedf810a..e07c94fbd0ac 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -2,7 +2,7 @@
* This file contains error reporting code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <[email protected]>
+ * Author: Andrey Ryabinin <[email protected]>
*
* Some of code borrowed from https://github.com/xairy/linux by
* Andrey Konovalov <[email protected]>
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c53543d89282..1f4446a90cef 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page)
* directly for tail pages.
*/
if (PageTransHuge(head)) {
+ /*
+ * Non anonymous thp exists only in allocation/free time. We
+ * can't handle such a case correctly, so let's give it up.
+ * This should be better than triggering BUG_ON when kernel
+ * tries to touch the "partially handled" page.
+ */
+ if (!PageAnon(head)) {
+ pr_err("MCE: %#lx: non anonymous thp\n",
+ page_to_pfn(page));
+ return 0;
+ }
+
if (get_page_unless_zero(head)) {
if (PageTail(page))
get_page(page);
@@ -1134,17 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
}
if (!PageHuge(p) && PageTransHuge(hpage)) {
- if (!PageAnon(hpage)) {
- pr_err("MCE: %#lx: non anonymous thp\n", pfn);
- if (TestClearPageHWPoison(p))
- atomic_long_sub(nr_pages, &num_poisoned_pages);
- put_page(p);
- if (p != hpage)
- put_page(hpage);
- return -EBUSY;
- }
- if (unlikely(split_huge_page(hpage))) {
- pr_err("MCE: %#lx: thp split failed\n", pfn);
+ if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
+ if (!PageAnon(hpage))
+ pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+ else
+ pr_err("MCE: %#lx: thp split failed\n", pfn);
if (TestClearPageHWPoison(p))
atomic_long_sub(nr_pages, &num_poisoned_pages);
put_page(p);
@@ -1209,9 +1215,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
atomic_long_sub(nr_pages, &num_poisoned_pages);
+ unlock_page(hpage);
put_page(hpage);
- res = 0;
- goto out;
+ return 0;
}
if (hwpoison_filter(p)) {
if (TestClearPageHWPoison(p))
@@ -1535,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
*/
ret = __get_any_page(page, pfn, 0);
if (!PageLRU(page)) {
+ /* Drop page reference which is from __get_any_page() */
+ put_page(page);
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
pfn, page->flags);
return -EIO;
@@ -1564,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
unlock_page(hpage);
ret = isolate_huge_page(hpage, &pagelist);
- if (ret) {
- /*
- * get_any_page() and isolate_huge_page() takes a refcount each,
- * so need to drop one here.
- */
- put_page(hpage);
- } else {
+ /*
+ * get_any_page() and isolate_huge_page() takes a refcount each,
+ * so need to drop one here.
+ */
+ put_page(hpage);
+ if (!ret) {
pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
return -EBUSY;
}
@@ -1656,6 +1663,8 @@ static int __soft_offline_page(struct page *page, int flags)
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
list_add(&page->lru, &pagelist);
+ if (!TestSetPageHWPoison(page))
+ atomic_long_inc(&num_poisoned_pages);
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
@@ -1670,9 +1679,8 @@ static int __soft_offline_page(struct page *page, int flags)
pfn, ret, page->flags);
if (ret > 0)
ret = -EIO;
- } else {
- SetPageHWPoison(page);
- atomic_long_inc(&num_poisoned_pages);
+ if (TestClearPageHWPoison(page))
+ atomic_long_dec(&num_poisoned_pages);
}
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 26fbba7d888f..6da82bcb0a8b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
int nr_pages = PAGES_PER_SECTION;
int nid = pgdat->node_id;
int zone_type;
- unsigned long flags;
+ unsigned long flags, pfn;
int ret;
zone_type = zone - pgdat->node_zones;
@@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
pgdat_resize_unlock(zone->zone_pgdat, &flags);
memmap_init_zone(nr_pages, nid, zone_type,
phys_start_pfn, MEMMAP_HOTPLUG);
+
+ /* online_page_range is called later and expects pages reserved */
+ for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
+ if (!pfn_valid(pfn))
+ continue;
+
+ SetPageReserved(pfn_to_page(pfn));
+ }
return 0;
}
@@ -1269,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
/* create new memmap entry */
firmware_map_add_hotplug(start, start + size, "System RAM");
+ memblock_add_node(start, size, nid);
goto out;
@@ -2005,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
+ memblock_free(start, size);
+ memblock_remove(start, size);
arch_remove_memory(start, size);
diff --git a/mm/migrate.c b/mm/migrate.c
index ee401e4e5ef1..eb4267107d1f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
/* Establish migration ptes or remove ptes */
if (page_mapped(page)) {
try_to_unmap(page,
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
+ TTU_IGNORE_HWPOISON);
page_was_mapped = 1;
}
@@ -950,7 +951,10 @@ out:
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
- if (reason != MR_MEMORY_FAILURE)
+ /* Soft-offlined page shouldn't go through lru cache list */
+ if (reason == MR_MEMORY_FAILURE)
+ put_page(page);
+ else
putback_lru_page(page);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 22cddd3e5de8..5cccc127ef81 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = {
*/
void __init page_writeback_init(void)
{
+ BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
+
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
-
- BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
}
/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ef19f22b2b7d..5b5240b7f642 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -18,7 +18,6 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
-#include <linux/rwsem.h>
#include <linux/pagemap.h>
#include <linux/jiffies.h>
#include <linux/bootmem.h>
@@ -981,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page,
#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
-/* Only safe to use early in boot when initialisation is single-threaded */
+
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
int __meminit early_pfn_to_nid(unsigned long pfn)
{
+ static DEFINE_SPINLOCK(early_pfn_lock);
int nid;
- /* The system will behave unpredictably otherwise */
- BUG_ON(system_state != SYSTEM_BOOTING);
-
+ spin_lock(&early_pfn_lock);
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
- if (nid >= 0)
- return nid;
- /* just returns 0 */
- return 0;
+ if (nid < 0)
+ nid = 0;
+ spin_unlock(&early_pfn_lock);
+
+ return nid;
}
#endif
@@ -1060,7 +1059,15 @@ static void __init deferred_free_range(struct page *page,
__free_pages_boot_core(page, pfn, 0);
}
-static __initdata DECLARE_RWSEM(pgdat_init_rwsem);
+/* Completion tracking for deferred_init_memmap() threads */
+static atomic_t pgdat_init_n_undone __initdata;
+static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
+
+static inline void __init pgdat_init_report_one_done(void)
+{
+ if (atomic_dec_and_test(&pgdat_init_n_undone))
+ complete(&pgdat_init_all_done_comp);
+}
/* Initialise remaining memory on a node */
static int __init deferred_init_memmap(void *data)
@@ -1077,7 +1084,7 @@ static int __init deferred_init_memmap(void *data)
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
if (first_init_pfn == ULONG_MAX) {
- up_read(&pgdat_init_rwsem);
+ pgdat_init_report_one_done();
return 0;
}
@@ -1177,7 +1184,8 @@ free_range:
pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
jiffies_to_msecs(jiffies - start));
- up_read(&pgdat_init_rwsem);
+
+ pgdat_init_report_one_done();
return 0;
}
@@ -1185,14 +1193,17 @@ void __init page_alloc_init_late(void)
{
int nid;
+ /* There will be num_node_state(N_MEMORY) threads */
+ atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
for_each_node_state(nid, N_MEMORY) {
- down_read(&pgdat_init_rwsem);
kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
}
/* Block until all are initialised */
- down_write(&pgdat_init_rwsem);
- up_write(&pgdat_init_rwsem);
+ wait_for_completion(&pgdat_init_all_done_comp);
+
+ /* Reinit limits that are based on free pages after the kernel is up */
+ files_maxfiles_init();
}
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
@@ -1285,6 +1296,10 @@ static inline int check_new_page(struct page *page)
bad_reason = "non-NULL mapping";
if (unlikely(atomic_read(&page->_count) != 0))
bad_reason = "nonzero _count";
+ if (unlikely(page->flags & __PG_HWPOISON)) {
+ bad_reason = "HWPoisoned (hardware-corrupted)";
+ bad_flags = __PG_HWPOISON;
+ }
if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
@@ -1328,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
set_page_owner(page, order, gfp_flags);
/*
- * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
+ * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
* steps that will free more memory. The caller should avoid the page
* being used for !PFMEMALLOC purposes.
*/
- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
+ if (alloc_flags & ALLOC_NO_WATERMARKS)
+ set_page_pfmemalloc(page);
+ else
+ clear_page_pfmemalloc(page);
return 0;
}
@@ -3330,7 +3348,7 @@ refill:
atomic_add(size - 1, &page->_count);
/* reset page count bias and offset to start of new frag */
- nc->pfmemalloc = page->pfmemalloc;
+ nc->pfmemalloc = page_is_pfmemalloc(page);
nc->pagecnt_bias = size;
nc->offset = size;
}
@@ -5045,6 +5063,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
{
unsigned long zone_start_pfn, zone_end_pfn;
+ /* When hotadd a new node, the node should be empty */
+ if (!node_start_pfn && !node_end_pfn)
+ return 0;
+
/* Get the start and end of the zone */
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5108,6 +5130,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
unsigned long zone_start_pfn, zone_end_pfn;
+ /* When hotadd a new node, the node should be empty */
+ if (!node_start_pfn && !node_end_pfn)
+ return 0;
+
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
diff --git a/mm/shmem.c b/mm/shmem.c
index 4caf8ed24d65..dbe0c1e8349c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3363,8 +3363,8 @@ put_path:
* shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
* kernel internal. There will be NO LSM permission checks against the
* underlying inode. So users of this interface must do LSM checks at a
- * higher layer. The one user is the big_key implementation. LSM checks
- * are provided at the key level rather than the inode level.
+ * higher layer. The users are the big_key and shm implementations. LSM
+ * checks are provided at the key or shm level rather than the inode.
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
diff --git a/mm/slab.c b/mm/slab.c
index 200e22412a16..bbd0b47dc6a9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
}
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
- if (unlikely(page->pfmemalloc))
+ if (page_is_pfmemalloc(page))
pfmemalloc_active = true;
nr_pages = (1 << cachep->gfporder);
@@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
__SetPageSlab(page);
- if (page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3e5f8f29c286..86831105a09f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache;
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
SLAB_FAILSLAB)
-#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
- SLAB_CACHE_DMA | SLAB_NOTRACK)
+#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
/*
* Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 816df0016555..f68c0e50f3c0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab_cache = s;
__SetPageSlab(page);
- if (page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
start = page_address(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e61445dce04e..8286938c70de 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* caller can stall after page list has been processed.
*
* 2) Global or new memcg reclaim encounters a page that is
- * not marked for immediate reclaim or the caller does not
- * have __GFP_IO. In this case mark the page for immediate
+ * not marked for immediate reclaim, or the caller does not
+ * have __GFP_FS (or __GFP_IO if it's simply going to swap,
+ * not to fs). In this case mark the page for immediate
* reclaim and continue scanning.
*
- * __GFP_IO is checked because a loop driver thread might
+ * Require may_enter_fs because we would wait on fs, which
+ * may not have submitted IO yet. And the loop driver might
* enter reclaim, and deadlock if it waits on a page for
* which it is needed to do the write (loop masks off
* __GFP_IO|__GFP_FS for this reason); but more thought
* would probably show more reasons.
*
- * Don't require __GFP_FS, since we're not going into the
- * FS, just waiting on its writeback completion. Worryingly,
- * ext4 gfs2 and xfs allocate pages with
- * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
- * may_enter_fs here is liable to OOM on them.
- *
* 3) Legacy memcg encounters a page that is not already marked
* PageReclaim. memcg does not have any dirty pages
* throttling so we could easily OOM just because too many
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
/* Case 2 above */
} else if (sane_reclaim(sc) ||
- !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+ !PageReclaim(page) || !may_enter_fs) {
/*
* This is slightly racy - end_page_writeback()
* might have just cleared PageReclaim, then
diff --git a/net/9p/client.c b/net/9p/client.c
index 498454b3c06c..ea79ee9a7348 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int total = 0;
+ *err = 0;
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int total = 0;
+ *err = 0;
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
fid->fid, (unsigned long long) offset,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index fb54e6aed096..6d0b471eede8 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
* @hdr_size: size of the encapsulation header
+ *
+ * Returns true if the packet was snooped and consumed by DAT. False if the
+ * packet has to be delivered to the interface
*/
bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
uint16_t type;
__be32 ip_src, ip_dst;
uint8_t *hw_src, *hw_dst;
- bool ret = false;
+ bool dropped = false;
unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
/* if this REPLY is directed to a client of mine, let's deliver the
* packet to the interface
*/
- ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
+ dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
+
+ /* if this REPLY is sent on behalf of a client of mine, let's drop the
+ * packet because the client will reply by itself
+ */
+ dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
out:
- if (ret)
+ if (dropped)
kfree_skb(skb);
- /* if ret == false -> packet has to be delivered to the interface */
- return ret;
+ /* if dropped == false -> deliver to the interface */
+ return dropped;
}
/**
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index bb0158620628..cffa92dd9877 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
INIT_HLIST_NODE(&gw_node->list);
gw_node->orig_node = orig_node;
+ gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+ gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
atomic_set(&gw_node->refcount, 1);
spin_lock_bh(&bat_priv->gw.list_lock);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index c002961da75d..a2fc843c2243 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -479,6 +479,9 @@ out:
*/
void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
{
+ if (!vlan)
+ return;
+
if (atomic_dec_and_test(&vlan->refcount)) {
spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
hlist_del_rcu(&vlan->list);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b4824951010b..5809b39c1922 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -594,6 +594,12 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
+ addr, BATADV_PRINT_VID(vid))) {
+ kfree(tt_local);
+ tt_local = NULL;
+ goto out;
+ }
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1040,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
struct batadv_tt_local_entry *tt_local_entry;
uint16_t flags, curr_flags = BATADV_NO_FLAGS;
struct batadv_softif_vlan *vlan;
+ void *tt_entry_exists;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
@@ -1061,11 +1068,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
* immediately purge it
*/
batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
- hlist_del_rcu(&tt_local_entry->common.hash_entry);
+
+ tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+ batadv_compare_tt,
+ batadv_choose_tt,
+ &tt_local_entry->common);
+ if (!tt_entry_exists)
+ goto out;
+
+ /* extra call to free the local tt entry */
batadv_tt_local_entry_free_ref(tt_local_entry);
/* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan)
+ goto out;
+
batadv_softif_vlan_free_ref(vlan);
batadv_softif_vlan_free_ref(vlan);
@@ -1166,8 +1184,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
/* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv,
tt_common_entry->vid);
- batadv_softif_vlan_free_ref(vlan);
- batadv_softif_vlan_free_ref(vlan);
+ if (vlan) {
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+ }
batadv_tt_local_entry_free_ref(tt_local);
}
@@ -3207,8 +3227,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
/* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
- batadv_softif_vlan_free_ref(vlan);
- batadv_softif_vlan_free_ref(vlan);
+ if (vlan) {
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+ }
batadv_tt_local_entry_free_ref(tt_local);
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7998fb279165..92720f3fe573 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
/* Make sure we copy only the significant bytes based on the
* encryption key size, and set the rest of the value to zeroes.
*/
- memcpy(ev.key.val, key->val, sizeof(key->enc_size));
+ memcpy(ev.key.val, key->val, key->enc_size);
memset(ev.key.val + key->enc_size, 0,
sizeof(ev.key.val) - key->enc_size);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 0b39dcc65b94..1285eaf5dc22 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1591,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
break;
}
- if (skb_trimmed)
+ if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;
@@ -1636,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
break;
}
- if (skb_trimmed)
+ if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 3da5525eb8a2..4d74a0639c4c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
+ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ 0;
}
@@ -506,6 +508,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
[IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
+ [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
+ [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
};
/* Change the state of the port and notify spanning tree */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 4967262b2707..617088aee21d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,12 +131,12 @@ out_noerr:
goto out;
}
-static int skb_set_peeked(struct sk_buff *skb)
+static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
{
struct sk_buff *nskb;
if (skb->peeked)
- return 0;
+ return skb;
/* We have to unshare an skb before modifying it. */
if (!skb_shared(skb))
@@ -144,7 +144,7 @@ static int skb_set_peeked(struct sk_buff *skb)
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
skb->prev->next = nskb;
skb->next->prev = nskb;
@@ -157,7 +157,7 @@ static int skb_set_peeked(struct sk_buff *skb)
done:
skb->peeked = 1;
- return 0;
+ return skb;
}
/**
@@ -229,8 +229,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
continue;
}
- error = skb_set_peeked(skb);
- if (error)
+ skb = skb_set_peeked(skb);
+ error = PTR_ERR(skb);
+ if (IS_ERR(skb))
goto unlock_err;
atomic_inc(&skb->users);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1ebdf1c0d118..1cbd209192ea 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg)
set_freezable();
- __set_current_state(TASK_RUNNING);
-
while (!kthread_should_stop()) {
pkt_dev = next_to_run(t);
@@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg)
try_to_freeze();
}
- set_current_state(TASK_INTERRUPTIBLE);
pr_debug("%s stopping all device\n", t->tsk->comm);
pktgen_stop(t);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 87b22c0bc08c..b42f0e26f89e 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
spin_lock_bh(&queue->syn_wait_lock);
while ((req = lopt->syn_table[i]) != NULL) {
lopt->syn_table[i] = req->dl_next;
+ /* Because of following del_timer_sync(),
+ * we must release the spinlock here
+ * or risk a dead lock.
+ */
+ spin_unlock_bh(&queue->syn_wait_lock);
atomic_inc(&lopt->qlen_dec);
- if (del_timer(&req->rsk_timer))
+ if (del_timer_sync(&req->rsk_timer))
reqsk_put(req);
reqsk_put(req);
+ spin_lock_bh(&queue->syn_wait_lock);
}
spin_unlock_bh(&queue->syn_wait_lock);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6a19ca0f99e..7b84330e5d30 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
if (skb && frag_size) {
skb->head_frag = 1;
- if (virt_to_head_page(data)->pfmemalloc)
+ if (page_is_pfmemalloc(virt_to_head_page(data)))
skb->pfmemalloc = 1;
}
return skb;
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
* Otherwise returns the provided skb. Returns NULL in error cases
* (e.g. transport_len exceeds skb length or out-of-memory).
*
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
*/
static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
unsigned int len = skb_transport_offset(skb) + transport_len;
int ret;
- if (skb->len < len) {
- kfree_skb(skb);
+ if (skb->len < len)
return NULL;
- } else if (skb->len == len) {
+ else if (skb->len == len)
return skb;
- }
skb_chk = skb_clone(skb, GFP_ATOMIC);
- kfree_skb(skb);
-
if (!skb_chk)
return NULL;
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
* If the skb has data beyond the given transport length, then a
* trimmed & cloned skb is checked and returned.
*
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
*/
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
skb_chk = skb_checksum_maybe_trim(skb, transport_len);
if (!skb_chk)
- return NULL;
+ goto err;
- if (!pskb_may_pull(skb_chk, offset)) {
- kfree_skb(skb_chk);
- return NULL;
- }
+ if (!pskb_may_pull(skb_chk, offset))
+ goto err;
__skb_pull(skb_chk, offset);
ret = skb_chkf(skb_chk);
__skb_push(skb_chk, offset);
- if (ret) {
- kfree_skb(skb_chk);
- return NULL;
- }
+ if (ret)
+ goto err;
return skb_chk;
+
+err:
+ if (skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return NULL;
+
}
EXPORT_SYMBOL(skb_checksum_trimmed);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0917123790ea..35c47ddd04f0 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
return -ENODEV;
/* Use already configured phy mode */
- p->phy_interface = p->phy->interface;
+ if (p->phy_interface == PHY_INTERFACE_MODE_NA)
+ p->phy_interface = p->phy->interface;
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
p->phy_interface);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 37c4bb89a708..b0c6258ffb79 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2465,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
key = l->key + 1;
iter->pos++;
- if (pos-- <= 0)
+ if (--pos <= 0)
break;
l = NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 651cdf648ec4..9fdfd9deac11 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
struct sk_buff *skb_chk;
unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
- int ret;
+ int ret = -EINVAL;
transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
- skb_get(skb);
skb_chk = skb_checksum_trimmed(skb, transport_len,
ip_mc_validate_checksum);
if (!skb_chk)
- return -EINVAL;
+ goto err;
- if (!pskb_may_pull(skb_chk, len)) {
- kfree_skb(skb_chk);
- return -EINVAL;
- }
+ if (!pskb_may_pull(skb_chk, len))
+ goto err;
ret = ip_mc_check_igmp_msg(skb_chk);
- if (ret) {
- kfree_skb(skb_chk);
- return ret;
- }
+ if (ret)
+ goto err;
if (skb_trimmed)
*skb_trimmed = skb_chk;
- else
+ /* free now unneeded clone */
+ else if (skb_chk != skb)
kfree_skb(skb_chk);
- return 0;
+ ret = 0;
+
+err:
+ if (ret && skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return ret;
}
/**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
* @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
*
* Checks whether an IPv4 packet is a valid IGMP packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
*
* -EINVAL: A broken packet was detected, i.e. it violates some internet
* standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
* to leave the original skb and its full frame unchanged (which might be
* desirable for layer 2 frame jugglers).
*
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
*/
int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
{
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 60021d0d9326..134957159c27 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
}
spin_unlock(&queue->syn_wait_lock);
- if (del_timer(&req->rsk_timer))
+ if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
reqsk_put(req);
return found;
}
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index fe8cc183411e..95ea633e8356 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
}
static bool
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 433231ccfb17..0330ab2e2b63 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
-static int min_sndbuf = SOCK_MIN_SNDBUF;
-static int min_rcvbuf = SOCK_MIN_RCVBUF;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_tcp_wmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
+ .extra1 = &one,
},
{
.procname = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_tcp_rmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
+ .extra1 = &one,
},
{
.procname = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_udp_rmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
+ .extra1 = &one
},
{
.procname = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_udp_wmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
+ .extra1 = &one
},
{ }
};
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d7d4c2b79cf2..0ea2e1c5d395 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
if (req) {
nsk = tcp_check_req(sk, skb, req, false);
- if (!nsk)
+ if (!nsk || nsk == sk)
reqsk_put(req);
return nsk;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 83aa604f9273..1b8c5ba7d5f7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_efree;
- dst = sk->sk_rx_dst;
+ dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
- if (dst)
- skb_dst_set_noref(skb, dst);
+ if (dst) {
+ /* DST_NOCACHE can not be used without taking a reference */
+ if (dst->flags & DST_NOCACHE) {
+ if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+ skb_dst_set(skb, dst);
+ } else {
+ skb_dst_set_noref(skb, dst);
+ }
+ }
}
int udp_rcv(struct sk_buff *skb)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 55d19861ab20..548c6237b1e7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
*ppcpu_rt = NULL;
}
}
+
+ non_pcpu_rt->rt6i_pcpu = NULL;
}
static void rt6_release(struct rt6_info *rt)
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
index df8afe5ab31e..9405b04eecc6 100644
--- a/net/ipv6/mcast_snoop.c
+++ b/net/ipv6/mcast_snoop.c
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
struct sk_buff *skb_chk = NULL;
unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
- int ret;
+ int ret = -EINVAL;
transport_len = ntohs(ipv6_hdr(skb)->payload_len);
transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
- skb_get(skb);
skb_chk = skb_checksum_trimmed(skb, transport_len,
ipv6_mc_validate_checksum);
if (!skb_chk)
- return -EINVAL;
+ goto err;
- if (!pskb_may_pull(skb_chk, len)) {
- kfree_skb(skb_chk);
- return -EINVAL;
- }
+ if (!pskb_may_pull(skb_chk, len))
+ goto err;
ret = ipv6_mc_check_mld_msg(skb_chk);
- if (ret) {
- kfree_skb(skb_chk);
- return ret;
- }
+ if (ret)
+ goto err;
if (skb_trimmed)
*skb_trimmed = skb_chk;
- else
+ /* free now unneeded clone */
+ else if (skb_chk != skb)
kfree_skb(skb_chk);
- return 0;
+ ret = 0;
+
+err:
+ if (ret && skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return ret;
}
/**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
* @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
*
* Checks whether an IPv6 packet is a valid MLD packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
*
* -EINVAL: A broken packet was detected, i.e. it violates some internet
* standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
* to leave the original skb and its full frame unchanged (which might be
* desirable for layer 2 frame jugglers).
*
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
*/
int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
{
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 6edb7b106de7..ebbb754c2111 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
}
static void
-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+synproxy_send_tcp(const struct synproxy_net *snet,
+ const struct sk_buff *skb, struct sk_buff *nskb,
struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
struct ipv6hdr *niph, struct tcphdr *nth,
unsigned int tcp_hdr_size)
{
- struct net *net = nf_ct_net((struct nf_conn *)nfct);
+ struct net *net = nf_ct_net(snet->tmpl);
struct dst_entry *dst;
struct flowi6 fl6;
@@ -83,7 +84,8 @@ free_nskb:
}
static void
-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+synproxy_send_client_synack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
niph, nth, tcp_hdr_size);
}
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+ synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
niph, nth, tcp_hdr_size);
}
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
}
static void
@@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
}
static bool
@@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
XT_SYNPROXY_OPT_SACK_PERM |
XT_SYNPROXY_OPT_ECN);
- synproxy_send_client_synack(skb, th, &opts);
+ synproxy_send_client_synack(snet, skb, th, &opts);
return NF_DROP;
} else if (th->ack && !(th->fin || th->rst || th->syn)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6090969937f8..d15586490cec 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -318,8 +318,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
/* allocate dst with ip6_dst_ops */
static struct rt6_info *__ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags,
- struct fib6_table *table)
+ int flags)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -336,10 +335,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
static struct rt6_info *ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags,
- struct fib6_table *table)
+ int flags)
{
- struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+ struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
if (rt) {
rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -950,8 +948,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
ort = (struct rt6_info *)ort->dst.from;
- rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
- 0, ort->rt6i_table);
+ rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
if (!rt)
return NULL;
@@ -983,8 +980,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
struct rt6_info *pcpu_rt;
pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
- rt->dst.dev, rt->dst.flags,
- rt->rt6i_table);
+ rt->dst.dev, rt->dst.flags);
if (!pcpu_rt)
return NULL;
@@ -997,32 +993,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
/* It should be called with read_lock_bh(&tb6_lock) acquired */
static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
{
- struct rt6_info *pcpu_rt, *prev, **p;
+ struct rt6_info *pcpu_rt, **p;
p = this_cpu_ptr(rt->rt6i_pcpu);
pcpu_rt = *p;
- if (pcpu_rt)
- goto done;
+ if (pcpu_rt) {
+ dst_hold(&pcpu_rt->dst);
+ rt6_dst_from_metrics_check(pcpu_rt);
+ }
+ return pcpu_rt;
+}
+
+static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
+{
+ struct fib6_table *table = rt->rt6i_table;
+ struct rt6_info *pcpu_rt, *prev, **p;
pcpu_rt = ip6_rt_pcpu_alloc(rt);
if (!pcpu_rt) {
struct net *net = dev_net(rt->dst.dev);
- pcpu_rt = net->ipv6.ip6_null_entry;
- goto done;
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return net->ipv6.ip6_null_entry;
}
- prev = cmpxchg(p, NULL, pcpu_rt);
- if (prev) {
- /* If someone did it before us, return prev instead */
+ read_lock_bh(&table->tb6_lock);
+ if (rt->rt6i_pcpu) {
+ p = this_cpu_ptr(rt->rt6i_pcpu);
+ prev = cmpxchg(p, NULL, pcpu_rt);
+ if (prev) {
+ /* If someone did it before us, return prev instead */
+ dst_destroy(&pcpu_rt->dst);
+ pcpu_rt = prev;
+ }
+ } else {
+ /* rt has been removed from the fib6 tree
+ * before we have a chance to acquire the read_lock.
+ * In this case, don't brother to create a pcpu rt
+ * since rt is going away anyway. The next
+ * dst_check() will trigger a re-lookup.
+ */
dst_destroy(&pcpu_rt->dst);
- pcpu_rt = prev;
+ pcpu_rt = rt;
}
-
-done:
dst_hold(&pcpu_rt->dst);
rt6_dst_from_metrics_check(pcpu_rt);
+ read_unlock_bh(&table->tb6_lock);
return pcpu_rt;
}
@@ -1097,9 +1114,22 @@ redo_rt6_select:
rt->dst.lastuse = jiffies;
rt->dst.__use++;
pcpu_rt = rt6_get_pcpu_route(rt);
- read_unlock_bh(&table->tb6_lock);
+
+ if (pcpu_rt) {
+ read_unlock_bh(&table->tb6_lock);
+ } else {
+ /* We have to do the read_unlock first
+ * because rt6_make_pcpu_route() may trigger
+ * ip6_dst_gc() which will take the write_lock.
+ */
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+ pcpu_rt = rt6_make_pcpu_route(rt);
+ dst_release(&rt->dst);
+ }
return pcpu_rt;
+
}
}
@@ -1555,7 +1585,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
if (unlikely(!idev))
return ERR_PTR(-ENODEV);
- rt = ip6_dst_alloc(net, dev, 0, NULL);
+ rt = ip6_dst_alloc(net, dev, 0);
if (unlikely(!rt)) {
in6_dev_put(idev);
dst = ERR_PTR(-ENOMEM);
@@ -1742,7 +1772,8 @@ int ip6_route_add(struct fib6_config *cfg)
if (!table)
goto out;
- rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+ rt = ip6_dst_alloc(net, NULL,
+ (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
if (!rt) {
err = -ENOMEM;
@@ -1831,6 +1862,7 @@ int ip6_route_add(struct fib6_config *cfg)
int gwa_type;
gw_addr = &cfg->fc_gateway;
+ gwa_type = ipv6_addr_type(gw_addr);
/* if gw_addr is local we will fail to detect this in case
* address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1838,11 +1870,12 @@ int ip6_route_add(struct fib6_config *cfg)
* prefix route was assigned to, which might be non-loopback.
*/
err = -EINVAL;
- if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0))
+ if (ipv6_chk_addr_and_flags(net, gw_addr,
+ gwa_type & IPV6_ADDR_LINKLOCAL ?
+ dev : NULL, 0, 0))
goto out;
rt->rt6i_gateway = *gw_addr;
- gwa_type = ipv6_addr_type(gw_addr);
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
struct rt6_info *grt;
@@ -2397,7 +2430,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
{
struct net *net = dev_net(idev->dev);
struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
- DST_NOCOUNT, NULL);
+ DST_NOCOUNT);
if (!rt)
return ERR_PTR(-ENOMEM);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6748c4277aff..7a6cea5e4274 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
&ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
if (req) {
nsk = tcp_check_req(sk, skb, req, false);
- if (!nsk)
+ if (!nsk || nsk == sk)
reqsk_put(req);
return nsk;
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 247552a7f6c2..3ece7d1034c8 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
static inline void
minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
{
- int j = MAX_THR_RATES;
- struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+ int j;
+ struct minstrel_rate_stats *tmp_mrs;
struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
- while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
- minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
- j--;
+ for (j = MAX_THR_RATES; j > 0; --j) {
tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+ if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+ minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+ break;
}
if (j < MAX_THR_RATES - 1)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 651039ad1681..3c20d02aee73 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -292,7 +292,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
{
struct nf_conn *tmpl;
- tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL);
+ tmpl = kzalloc(sizeof(*tmpl), flags);
if (tmpl == NULL)
return NULL;
@@ -303,7 +303,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
if (zone) {
struct nf_conntrack_zone *nf_ct_zone;
- nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
+ nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
if (!nf_ct_zone)
goto out_free;
nf_ct_zone->id = zone;
@@ -1544,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
- if (!hash) {
- printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
+ if (!hash)
hash = vzalloc(sz);
- }
if (hash && nulls)
for (i = 0; i < nr_slots; i++)
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 71f1e9fdfa18..d7f168527903 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -353,10 +353,8 @@ static int __net_init synproxy_net_init(struct net *net)
int err = -ENOMEM;
ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
- if (IS_ERR(ct)) {
- err = PTR_ERR(ct);
+ if (!ct)
goto err1;
- }
if (!nfct_seqadj_ext_add(ct))
goto err2;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index c6630030c912..43ddeee404e9 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -202,9 +202,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
goto err1;
ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
- ret = PTR_ERR(ct);
- if (IS_ERR(ct))
+ if (!ct) {
+ ret = -ENOMEM;
goto err2;
+ }
ret = 0;
if ((info->ct_events || info->exp_events) &&
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d8e2e3918ce2..67d210477863 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1096,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
err = __netlink_insert(table, sk);
if (err) {
+ /* In case the hashtable backend returns with -EBUSY
+ * from here, it must not escape to the caller.
+ */
+ if (unlikely(err == -EBUSY))
+ err = -EOVERFLOW;
if (err == -EEXIST)
err = -EADDRINUSE;
nlk_sk(sk)->portid = 0;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 8a8c0b8b4f63..ee34f474ad14 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
return 0;
}
-static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
- __be32 *addr, __be32 new_addr)
+static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
+ __be32 addr, __be32 new_addr)
{
int transport_len = skb->len - skb_transport_offset(skb);
+ if (nh->frag_off & htons(IP_OFFSET))
+ return;
+
if (nh->protocol == IPPROTO_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
- *addr, new_addr, 1);
+ addr, new_addr, 1);
} else if (nh->protocol == IPPROTO_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&uh->check, skb,
- *addr, new_addr, 1);
+ addr, new_addr, 1);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
}
}
+}
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+ __be32 *addr, __be32 new_addr)
+{
+ update_ip_l4_checksum(skb, nh, *addr, new_addr);
csum_replace4(&nh->check, *addr, new_addr);
skb_clear_hash(skb);
*addr = new_addr;
diff --git a/net/rds/info.c b/net/rds/info.c
index 9a6b4f66187c..140a44a5f7b7 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
/* check for all kinds of wrapping and the like */
start = (unsigned long)optval;
- if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
+ if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
ret = -EINVAL;
goto out;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index a42a3b257226..268545050ddb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
return ret;
ret = ACT_P_CREATED;
} else {
+ if (bind)
+ return 0;
if (!ovr) {
tcf_hash_release(a, bind);
return -EEXIST;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 21ca33c9f036..a9ba030435a2 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -288,10 +288,26 @@ begin:
static void fq_codel_reset(struct Qdisc *sch)
{
- struct sk_buff *skb;
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ int i;
- while ((skb = fq_codel_dequeue(sch)) != NULL)
- kfree_skb(skb);
+ INIT_LIST_HEAD(&q->new_flows);
+ INIT_LIST_HEAD(&q->old_flows);
+ for (i = 0; i < q->flows_cnt; i++) {
+ struct fq_codel_flow *flow = q->flows + i;
+
+ while (flow->head) {
+ struct sk_buff *skb = dequeue_head(flow);
+
+ qdisc_qstats_backlog_dec(sch, skb);
+ kfree_skb(skb);
+ }
+
+ INIT_LIST_HEAD(&flow->flowchain);
+ codel_vars_init(&flow->cvars);
+ }
+ memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
+ sch->q.qlen = 0;
}
static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 9cb8522d8d22..f3d3fb42b873 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
my $kconfig = $ARGV[1];
my $lsmod_file = $ENV{'LSMOD'};
-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
chomp @makefiles;
my %depends;
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 9ed32502470e..5ebb89687936 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -406,6 +406,7 @@ static __init int yama_init(void)
*/
if (!security_module_enable("yama"))
return 0;
+ yama_add_hooks();
#endif
pr_info("Yama: becoming mindful.\n");
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 7bb988fa6b6d..2a153d260836 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s,
s->data_block_counter != UINT_MAX)
data_block_counter = s->data_block_counter;
- if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) ||
- (s->data_block_counter == UINT_MAX)) {
+ if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
+ data_block_counter == s->tx_first_dbc) ||
+ s->data_block_counter == UINT_MAX) {
lost = false;
} else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
lost = data_block_counter != s->data_block_counter;
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 26b909329e54..b2cf9e75693b 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -157,6 +157,8 @@ struct amdtp_stream {
/* quirk: fixed interval of dbc between previos/current packets. */
unsigned int tx_dbc_interval;
+ /* quirk: indicate the value of dbc field in a first packet. */
+ unsigned int tx_first_dbc;
bool callbacked;
wait_queue_head_t callback_wait;
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index c670db4eee70..c94a432f7cc6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -248,10 +248,16 @@ efw_probe(struct fw_unit *unit,
err = get_hardware_info(efw);
if (err < 0)
goto error;
- if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2)
- efw->is_af2 = true;
+ /* AudioFire8 (since 2009) and AudioFirePre8 */
if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
efw->is_af9 = true;
+ /* These models uses the same firmware. */
+ if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
+ entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
+ entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
+ entry->model_id == MODEL_GIBSON_RIP ||
+ entry->model_id == MODEL_GIBSON_GOLDTOP)
+ efw->is_fireworks3 = true;
snd_efw_proc_init(efw);
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index c33252b7bc84..084d414b228c 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -70,8 +70,8 @@ struct snd_efw {
bool resp_addr_changable;
/* for quirks */
- bool is_af2;
bool is_af9;
+ bool is_fireworks3;
u32 firmware_version;
unsigned int midi_in_ports;
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index a0762dd6231e..7e353f1f7bff 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -172,9 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
/* Fireworks reset dbc at bus reset. */
efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
- /* AudioFire2 starts packets with non-zero dbc. */
- if (efw->is_af2)
- efw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK;
+ /*
+ * But Recent firmwares starts packets with non-zero dbc.
+ * Driver version 5.7.6 installs firmware version 5.7.3.
+ */
+ if (efw->is_fireworks3 &&
+ (efw->firmware_version == 0x5070000 ||
+ efw->firmware_version == 0x5070300 ||
+ efw->firmware_version == 0x5080000))
+ efw->tx_stream.tx_first_dbc = 0x02;
/* AudioFire9 always reports wrong dbs. */
if (efw->is_af9)
efw->tx_stream.flags |= CIP_WRONG_DBS;
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index b2da19b60f4e..358f16195483 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus)
offset = snd_hdac_chip_readl(bus, LLCH);
- if (offset < 0)
- return -EIO;
-
/* Lets walk the linked capabilities list */
do {
cur_cap = _snd_hdac_chip_read(l, bus, offset);
- if (cur_cap < 0)
- return -EIO;
-
dev_dbg(bus->dev, "Capability version: 0x%x\n",
((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF));
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index f8ffbdbb450d..3de47dd1a76d 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
if (stream->direction != substream->stream)
continue;
- if (stream->opened) {
+ if (!stream->opened) {
if (!hstream->decoupled)
snd_hdac_ext_stream_decouple(ebus, hstream, true);
res = hstream;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c456c04e0928..374ea53288ca 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5189,6 +5189,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5290,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 6492bca8c70f..4ca12665ff73 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
int changed;
mutex_lock(&chip->mutex);
- changed = !value->value.integer.value[0] != chip->dac_mute;
+ changed = (!value->value.integer.value[0]) != chip->dac_mute;
if (changed) {
chip->dac_mute = !value->value.integer.value[0];
chip->model.update_dac_mute(chip);
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 2ae9619443d1..1d651b8a8957 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM
bool
select SND_DMAENGINE_PCM
+config SND_SOC_TOPOLOGY
+ bool
+
# All the supported SoCs
source "sound/soc/adi/Kconfig"
source "sound/soc/atmel/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index e189903fabf4..669648b41d30 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,6 +1,9 @@
snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
+
+ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
snd-soc-core-objs += soc-topology.o
+endif
ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
snd-soc-core-objs += soc-generic-dmaengine-pcm.o
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index d7ec4756e45b..8e36198474d9 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
case SND_SOC_DAIFMT_RIGHT_J:
if (params_width(params) == 16) {
snd_soc_update_bits(codec, CS4265_DAC_CTL,
- CS4265_DAC_CTL_DIF, (1 << 5));
+ CS4265_DAC_CTL_DIF, (2 << 4));
snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
- CS4265_SPDIF_CTL2_DIF, (1 << 7));
+ CS4265_SPDIF_CTL2_DIF, (2 << 6));
} else {
snd_soc_update_bits(codec, CS4265_DAC_CTL,
- CS4265_DAC_CTL_DIF, (3 << 5));
+ CS4265_DAC_CTL_DIF, (3 << 4));
snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
- CS4265_SPDIF_CTL2_DIF, (1 << 7));
+ CS4265_SPDIF_CTL2_DIF, (3 << 6));
}
break;
case SND_SOC_DAIFMT_LEFT_J:
@@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
snd_soc_update_bits(codec, CS4265_ADC_CTL,
CS4265_ADC_DIF, 0);
snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
- CS4265_SPDIF_CTL2_DIF, (1 << 6));
+ CS4265_SPDIF_CTL2_DIF, 0);
break;
default:
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index e9cc3aae5366..961bd7e5877e 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3341,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
break;
case RT5645_DMIC_DATA_GPIO5:
+ regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
+ RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO);
regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5);
regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 0353a6a273ab..278bb9f464c4 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -1693,6 +1693,10 @@
#define RT5645_GP6_PIN_SFT 6
#define RT5645_GP6_PIN_GPIO6 (0x0 << 6)
#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6)
+#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4)
+#define RT5645_I2S2_DAC_PIN_SFT 4
+#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4)
+#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4)
#define RT5645_GP8_PIN_MASK (0x1 << 3)
#define RT5645_GP8_PIN_SFT 3
#define RT5645_GP8_PIN_GPIO8 (0x0 << 3)
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 4c01bb43928d..5bbaa667bec1 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
if (byt == NULL)
return -ENOMEM;
+ byt->dev = dev;
+
ipc = &byt->ipc;
ipc->dev = dev;
ipc->ops.tx_msg = byt_tx_msg;
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f95f271aab0c..f6efa9d4acad 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
if (hsw == NULL)
return -ENOMEM;
+ hsw->dev = dev;
+
ipc = &hsw->ipc;
ipc->dev = dev;
ipc->ops.tx_msg = hsw_tx_msg;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 59ac211f8fe7..31068b8f3db0 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -33,6 +33,7 @@
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/soc-topology.h>
+#include <sound/tlv.h>
/*
* We make several passes over the data (since it wont necessarily be ordered)
@@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
k->put = bops[i].put;
if (k->get == NULL && bops[i].id == hdr->ops.get)
k->get = bops[i].get;
- if (k->info == NULL && ops[i].id == hdr->ops.info)
+ if (k->info == NULL && bops[i].id == hdr->ops.info)
k->info = bops[i].info;
}
@@ -579,28 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
return 0;
}
+
+static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg,
+ struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale)
+{
+ unsigned int item_len = 2 * sizeof(unsigned int);
+ unsigned int *p;
+
+ p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p[0] = SNDRV_CTL_TLVT_DB_SCALE;
+ p[1] = item_len;
+ p[2] = scale->min;
+ p[3] = (scale->step & TLV_DB_SCALE_MASK)
+ | (scale->mute ? TLV_DB_SCALE_MUTE : 0);
+
+ kc->tlv.p = (void *)p;
+ return 0;
+}
+
static int soc_tplg_create_tlv(struct soc_tplg *tplg,
- struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv)
+ struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc)
{
- struct snd_ctl_tlv *tlv;
- int size;
+ struct snd_soc_tplg_ctl_tlv *tplg_tlv;
- if (tplg_tlv->count == 0)
+ if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
return 0;
- size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) &
- ~(sizeof(unsigned int) - 1));
- tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL);
- if (tlv == NULL)
- return -ENOMEM;
-
- dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n",
- tplg_tlv->numid, size);
+ if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+ kc->tlv.c = snd_soc_bytes_tlv_callback;
+ } else {
+ tplg_tlv = &tc->tlv;
+ switch (tplg_tlv->type) {
+ case SNDRV_CTL_TLVT_DB_SCALE:
+ return soc_tplg_create_tlv_db_scale(tplg, kc,
+ &tplg_tlv->scale);
- tlv->numid = tplg_tlv->numid;
- tlv->length = size;
- memcpy(&tlv->tlv[0], tplg_tlv->data, size);
- kc->tlv.p = (void *)tlv;
+ /* TODO: add support for other TLV types */
+ default:
+ dev_dbg(tplg->dev, "Unsupported TLV type %d\n",
+ tplg_tlv->type);
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -772,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
}
/* create any TLV data */
- soc_tplg_create_tlv(tplg, &kc, &mc->tlv);
+ soc_tplg_create_tlv(tplg, &kc, &mc->hdr);
/* register control here */
err = soc_tplg_add_kcontrol(tplg, &kc,
@@ -1350,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
template.reg = w->reg;
template.shift = w->shift;
template.mask = w->mask;
+ template.subseq = w->subseq;
template.on_val = w->invert ? 0 : 1;
template.off_val = w->invert ? 1 : 0;
template.ignore_suspend = w->ignore_suspend;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 1fab9778807a..0450593980fd 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
int err = -ENODEV;
down_read(&chip->shutdown_rwsem);
- if (chip->probing && chip->in_pm)
+ if (chip->probing || chip->in_pm)
err = 0;
else if (!chip->shutdown)
err = usb_autopm_get_interface(chip->pm_intf);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index de165a1b9240..20b56eb987f8 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -521,6 +521,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
+ /*
+ * Normally perf_session__new would do this, but it doesn't have the
+ * evlist.
+ */
+ if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
+ pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
+ rec->tool.ordered_events = false;
+ }
+
if (!rec->evlist->nr_groups)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
@@ -965,9 +974,11 @@ static struct record record = {
.tool = {
.sample = process_sample_event,
.fork = perf_event__process_fork,
+ .exit = perf_event__process_exit,
.comm = perf_event__process_comm,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
+ .ordered_events = true,
},
};
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ecf319728f25..6135cc07213c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -601,8 +601,8 @@ static void display_sig(int sig __maybe_unused)
static void display_setup_sig(void)
{
- signal(SIGSEGV, display_sig);
- signal(SIGFPE, display_sig);
+ signal(SIGSEGV, sighandler_dump_stack);
+ signal(SIGFPE, sighandler_dump_stack);
signal(SIGINT, display_sig);
signal(SIGQUIT, display_sig);
signal(SIGTERM, display_sig);
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 094ddaee104c..d31fac19c30b 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -638,7 +638,7 @@ ifndef DESTDIR
prefix ?= $(HOME)
endif
bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(abspath $(prefix)/$(bindir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 7ff682770fdb..f1a4c833121e 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1387,6 +1387,24 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
event->fork.ptid);
int err = 0;
+ if (dump_trace)
+ perf_event__fprintf_task(event, stdout);
+
+ /*
+ * There may be an existing thread that is not actually the parent,
+ * either because we are processing events out of order, or because the
+ * (fork) event that would have removed the thread was lost. Assume the
+ * latter case and continue on as best we can.
+ */
+ if (parent->pid_ != (pid_t)event->fork.ppid) {
+ dump_printf("removing erroneous parent thread %d/%d\n",
+ parent->pid_, parent->tid);
+ machine__remove_thread(machine, parent);
+ thread__put(parent);
+ parent = machine__findnew_thread(machine, event->fork.ppid,
+ event->fork.ptid);
+ }
+
/* if a thread currently exists for the thread id remove it */
if (thread != NULL) {
machine__remove_thread(machine, thread);
@@ -1395,8 +1413,6 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
thread = machine__findnew_thread(machine, event->fork.pid,
event->fork.tid);
- if (dump_trace)
- perf_event__fprintf_task(event, stdout);
if (thread == NULL || parent == NULL ||
thread__fork(thread, parent, sample->time) < 0) {
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 53e8bb7bc852..2a5d8d7698ae 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
" # %5.2f%% aborted cycles ",
100.0 * ((total2-avg) / total));
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
- avg > 0 &&
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
- if (total)
+ if (avg)
ratio = total / avg;
fprintf(out, " # %8.0f cycles / transaction ", ratio);
} else if (perf_stat_evsel__is(evsel, ELISION_START) &&
- avg > 0 &&
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
- if (total)
+ if (avg)
ratio = total / avg;
fprintf(out, " # %8.0f cycles / elision ", ratio);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 28c4b746baa1..0a9ae8014729 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -191,6 +191,12 @@ static int thread__clone_map_groups(struct thread *thread,
if (thread->pid_ == parent->pid_)
return 0;
+ if (thread->mg == parent->mg) {
+ pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
+ thread->pid_, thread->tid, parent->pid_, parent->tid);
+ return 0;
+ }
+
/* But this one is new process, copy maps. */
for (i = 0; i < MAP__NR_TYPES; ++i)
if (map_groups__clone(thread->mg, parent->mg, i) < 0)