aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--CREDITS8
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst15
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt7
-rw-r--r--Documentation/admin-guide/pm/amd-pstate.rst78
-rw-r--r--Documentation/devicetree/bindings/.gitignore5
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml5
-rw-r--r--Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml81
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml2
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml18
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml4
-rw-r--r--Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml2
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ice.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst2
-rw-r--r--Documentation/power/suspend-and-interrupts.rst2
-rw-r--r--Documentation/virt/kvm/api.rst10
-rw-r--r--MAINTAINERS31
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-smegw01.dts3
-rw-r--r--arch/arm/boot/dts/nuvoton-wpcm450.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/stihxxx-b2120.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts11
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x.dtsi1
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c13
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c4
-rw-r--r--arch/arm64/kvm/vgic/vgic.h14
-rw-r--r--arch/ia64/kernel/sys_ia64.c7
-rw-r--r--arch/mips/include/asm/mach-loongson32/cpufreq.h18
-rw-r--r--arch/mips/include/asm/mach-loongson32/platform.h1
-rw-r--r--arch/mips/loongson32/common/platform.c16
-rw-r--r--arch/mips/loongson32/ls1b/board.c1
-rw-r--r--arch/parisc/kernel/firmware.c5
-rw-r--r--arch/parisc/kernel/ptrace.c21
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h43
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/head_85xx.S3
-rw-r--r--arch/powerpc/kernel/interrupt.c6
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kexec/file_load_64.c12
-rw-r--r--arch/powerpc/kvm/booke.c5
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c24
-rw-r--r--arch/powerpc/perf/imc-pmu.c14
-rw-r--r--arch/riscv/Makefile3
-rw-r--r--arch/riscv/include/asm/hwcap.h3
-rw-r--r--arch/riscv/include/asm/pgtable.h4
-rw-r--r--arch/riscv/include/asm/vdso/processor.h28
-rw-r--r--arch/riscv/kernel/probes/kprobes.c26
-rw-r--r--arch/riscv/kernel/stacktrace.c3
-rw-r--r--arch/riscv/mm/cacheflush.c4
-rw-r--r--arch/riscv/mm/pgtable.c20
-rw-r--r--arch/s390/boot/decompressor.c2
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86/include/asm/debugreg.h26
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/kernel/kprobes/core.c2
-rw-r--r--arch/x86/kernel/process.c1
-rw-r--r--block/bfq-cgroup.c2
-rw-r--r--block/bfq-iosched.c4
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--block/blk-mq.c5
-rw-r--r--certs/Makefile4
-rw-r--r--drivers/acpi/cppc_acpi.c67
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/base/power/domain.c5
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/block/ublk_drv.c2
-rw-r--r--drivers/bus/sunxi-rsb.c8
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c18
-rw-r--r--drivers/clk/microchip/clk-mpfs-ccc.c10
-rw-r--r--drivers/cpufreq/Kconfig10
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/amd-pstate.c705
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c5
-rw-r--r--drivers/cpufreq/cpufreq.c10
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c4
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c222
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c5
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c30
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
-rw-r--r--drivers/cpuidle/Kconfig1
-rw-r--r--drivers/cpuidle/Kconfig.arm10
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c7
-rw-r--r--drivers/cpuidle/cpuidle-psci.c3
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/cpuidle/governors/teo.c102
-rw-r--r--drivers/cpuidle/sysfs.c6
-rw-r--r--drivers/cxl/core/region.c12
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/dma-buf/dma-fence.c2
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/arm64.c9
-rw-r--r--drivers/firmware/efi/memattr.c2
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c17
-rw-r--r--drivers/fpga/stratix10-soc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c12
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c1
-rw-r--r--drivers/gpu/drm/drm_client.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c12
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c74
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c16
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c5
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c13
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h1
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-elecom.c16
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-input.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-quirks.c3
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/idle/intel_idle.c2
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c1
-rw-r--r--drivers/iio/adc/berlin2-adc.c4
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c11
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c32
-rw-r--r--drivers/iio/adc/xilinx-ams.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c1
-rw-r--r--drivers/iio/imu/fxos8700_core.c111
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig1
-rw-r--r--drivers/iio/light/cm32181.c9
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c7
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c9
-rw-r--r--drivers/infiniband/hw/irdma/cm.c3
-rw-r--r--drivers/infiniband/hw/mana/qp.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c2
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c1
-rw-r--r--drivers/net/dsa/Kconfig7
-rw-r--r--drivers/net/dsa/mt7530.c26
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c31
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c25
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c35
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c30
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c25
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c37
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c194
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c68
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c87
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c7
-rw-r--r--drivers/net/ethernet/sfc/efx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c3
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/phy/dp83822.c6
-rw-r--r--drivers/net/phy/meson-gxl.c4
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/phylink.c5
-rw-r--r--drivers/net/usb/plusb.c4
-rw-r--r--drivers/net/virtio_net.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c11
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c29
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c16
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c2
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c42
-rw-r--r--drivers/nvme/host/auth.c14
-rw-r--r--drivers/nvme/host/core.c5
-rw-r--r--drivers/nvme/target/fc.c4
-rw-r--r--drivers/nvmem/brcm_nvram.c3
-rw-r--r--drivers/nvmem/core.c60
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c1
-rw-r--r--drivers/nvmem/sunxi_sid.c15
-rw-r--r--drivers/of/address.c21
-rw-r--r--drivers/of/fdt.c6
-rw-r--r--drivers/of/platform.c12
-rw-r--r--drivers/opp/Kconfig1
-rw-r--r--drivers/opp/debugfs.c2
-rw-r--r--drivers/parisc/pdc_stable.c9
-rw-r--r--drivers/pci/pci.c7
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aspm.c111
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c16
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8195.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c2
-rw-r--r--drivers/platform/x86/amd/Kconfig1
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c9
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c14
-rw-r--r--drivers/platform/x86/amd/pmf/core.c32
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h3
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c28
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c9
-rw-r--r--drivers/powercap/idle_inject.c6
-rw-r--r--drivers/powercap/intel_rapl_common.c13
-rw-r--r--drivers/powercap/powercap_sys.c14
-rw-r--r--drivers/rtc/rtc-efi.c48
-rw-r--r--drivers/rtc/rtc-sunplus.c4
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/spi/spi-dw-core.c2
-rw-r--r--drivers/spi/spidev.c22
-rw-r--r--drivers/tty/serial/8250/8250_dma.c21
-rw-r--r--drivers/tty/serial/stm32-usart.c33
-rw-r--r--drivers/tty/vt/vc_screen.c9
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c2
-rw-r--r--drivers/usb/fotg210/fotg210-udc.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_uac2.c1
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c1
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c1
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c1
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c1
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c1
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c1
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c1
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c1
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c1
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c1
-rw-r--r--drivers/usb/typec/altmodes/displayport.c8
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c9
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c2
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c21
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c22
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c6
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c8
-rw-r--r--drivers/video/fbdev/aty/radeon_backlight.c6
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c7
-rw-r--r--drivers/video/fbdev/nvidia/nv_backlight.c8
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c81
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c8
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c3
-rw-r--r--drivers/video/fbdev/riva/fbdev.c8
-rw-r--r--drivers/watchdog/diag288_wdt.c15
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/raid56.c14
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/tree-log.c23
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c22
-rw-r--r--fs/btrfs/zlib.c2
-rw-r--r--fs/ceph/addr.c17
-rw-r--r--fs/ceph/caps.c16
-rw-r--r--fs/ceph/file.c3
-rw-r--r--fs/ceph/mds_client.c36
-rw-r--r--fs/ceph/snap.c36
-rw-r--r--fs/ceph/super.h11
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/coredump.c48
-rw-r--r--fs/freevxfs/Kconfig2
-rw-r--r--fs/fscache/volume.c14
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--fs/squashfs/squashfs_fs.h2
-rw-r--r--fs/squashfs/squashfs_fs_sb.h2
-rw-r--r--fs/squashfs/xattr.h4
-rw-r--r--fs/squashfs/xattr_id.c4
-rw-r--r--include/acpi/cppc_acpi.h12
-rw-r--r--include/drm/drm_client.h5
-rw-r--r--include/kunit/test.h6
-rw-r--r--include/kvm/arm_vgic.h2
-rw-r--r--include/linux/amd-pstate.h32
-rw-r--r--include/linux/ceph/libceph.h10
-rw-r--r--include/linux/cpufreq.h2
-rw-r--r--include/linux/efi.h3
-rw-r--r--include/linux/highmem-internal.h4
-rw-r--r--include/linux/hugetlb.h13
-rw-r--r--include/linux/memcontrol.h5
-rw-r--r--include/linux/mlx5/driver.h13
-rw-r--r--include/linux/nvmem-provider.h2
-rw-r--r--include/linux/pm.h4
-rw-r--r--include/linux/spinlock.h9
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/trace_events.h1
-rw-r--r--include/linux/util_macros.h12
-rw-r--r--include/trace/stages/stage4_event_fields.h3
-rw-r--r--include/uapi/drm/virtgpu_drm.h1
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--kernel/bpf/bpf_lsm.c1
-rw-r--r--kernel/bpf/btf.c4
-rw-r--r--kernel/bpf/memalloc.c2
-rw-r--r--kernel/bpf/verifier.c25
-rw-r--r--kernel/cgroup/cpuset.c48
-rw-r--r--kernel/events/core.c39
-rw-r--r--kernel/irq/irqdomain.c2
-rw-r--r--kernel/locking/rtmutex.c5
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/power/energy_model.c5
-rw-r--r--kernel/power/swap.c16
-rw-r--r--kernel/trace/bpf_trace.c3
-rw-r--r--kernel/trace/trace.c3
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_events.c39
-rw-r--r--kernel/trace/trace_export.c3
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/dec_and_lock.c31
-rw-r--r--lib/kunit/assert.c40
-rw-r--r--lib/kunit/test.c1
-rw-r--r--lib/maple_tree.c22
-rw-r--r--lib/test_maple_tree.c89
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/khugepaged.c22
-rw-r--r--mm/kmemleak.c5
-rw-r--r--mm/memblock.c8
-rw-r--r--mm/memcontrol.c67
-rw-r--r--mm/memory.c14
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/mprotect.c8
-rw-r--r--mm/mremap.c25
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/swapfile.c1
-rw-r--r--mm/vmscan.c9
-rw-r--r--mm/zsmalloc.c237
-rw-r--r--net/bridge/br_netfilter_hooks.c1
-rw-r--r--net/can/isotp.c69
-rw-r--r--net/can/j1939/address-claim.c40
-rw-r--r--net/can/j1939/transport.c4
-rw-r--r--net/can/raw.c47
-rw-r--r--net/core/devlink.c9
-rw-r--r--net/core/gro.c9
-rw-r--r--net/core/neighbour.c18
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c3
-rw-r--r--net/core/sock_map.c61
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/tcp_bpf.c4
-rw-r--r--net/ipv6/addrconf.c59
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/mac802154/rx.c1
-rw-r--r--net/mctp/af_mctp.c6
-rw-r--r--net/mptcp/pm_netlink.c10
-rw-r--r--net/mptcp/protocol.c9
-rw-r--r--net/mptcp/sockopt.c11
-rw-r--r--net/mptcp/subflow.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c5
-rw-r--r--net/netrom/af_netrom.c5
-rw-r--r--net/openvswitch/datapath.c12
-rw-r--r--net/qrtr/ns.c5
-rw-r--r--net/rds/message.c6
-rw-r--r--net/rose/af_rose.c8
-rw-r--r--net/sched/sch_htb.c5
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/tls/tls_sw.c2
-rw-r--r--net/xfrm/xfrm_compat.c4
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_interface_core.c54
-rw-r--r--net/xfrm/xfrm_policy.c14
-rw-r--r--net/xfrm/xfrm_state.c18
-rw-r--r--scripts/Makefile.modinst6
-rw-r--r--sound/core/memalloc.c87
-rw-r--r--sound/firewire/motu/motu-hwdep.c4
-rw-r--r--sound/pci/hda/hda_bind.c2
-rw-r--r--sound/pci/hda/hda_codec.c1
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/pci/hda/patch_via.c3
-rw-r--r--sound/pci/lx6464es/lx_core.c11
-rw-r--r--sound/soc/amd/acp-es8336.c6
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c21
-rw-r--r--sound/soc/codecs/cs42l56.c6
-rw-r--r--sound/soc/codecs/es8326.c6
-rw-r--r--sound/soc/codecs/rt715-sdca-sdw.c2
-rw-r--r--sound/soc/codecs/tas5805m.c131
-rw-r--r--sound/soc/codecs/wsa883x.c4
-rw-r--r--sound/soc/fsl/fsl_sai.c1
-rw-r--r--sound/soc/intel/avs/core.c24
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c20
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c12
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c2
-rw-r--r--sound/soc/intel/boards/bytcr_wm5102.c2
-rw-r--r--sound/soc/intel/boards/sof_cs42l42.c3
-rw-r--r--sound/soc/intel/boards/sof_es8336.c14
-rw-r--r--sound/soc/intel/boards/sof_nau8825.c5
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c5
-rw-r--r--sound/soc/intel/boards/sof_ssp_amp.c5
-rw-r--r--sound/soc/soc-topology.c8
-rw-r--r--sound/soc/sof/amd/acp.c36
-rw-r--r--sound/soc/sof/ipc4-mtrace.c7
-rw-r--r--sound/soc/sof/sof-audio.c16
-rw-r--r--sound/synth/emux/emux_nrpn.c3
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/testing/memblock/internal.h4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c81
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c36
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh2
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/filesystems/fat/run_fat_tests.sh0
-rw-r--r--tools/testing/selftests/kvm/aarch64/page_fault_test.c187
-rwxr-xr-xtools/testing/selftests/net/cmsg_ipv6.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/lib.sh4
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh22
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_vnifiltering.sh18
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh24
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c4
-rw-r--r--tools/testing/selftests/net/udpgso_bench_tx.c36
-rw-r--r--tools/testing/selftests/vm/hugetlb-madvise.c1
-rw-r--r--tools/virtio/linux/bug.h8
-rw-r--r--tools/virtio/linux/build_bug.h7
-rw-r--r--tools/virtio/linux/cpumask.h7
-rw-r--r--tools/virtio/linux/gfp.h7
-rw-r--r--tools/virtio/linux/kernel.h1
-rw-r--r--tools/virtio/linux/kmsan.h12
-rw-r--r--tools/virtio/linux/scatterlist.h1
-rw-r--r--tools/virtio/linux/topology.h7
538 files changed, 5344 insertions, 2384 deletions
diff --git a/.mailmap b/.mailmap
index 8deff4cec169..318e63f338b1 100644
--- a/.mailmap
+++ b/.mailmap
@@ -130,6 +130,7 @@ Domen Puncer <[email protected]>
Douglas Gilbert <[email protected]>
Ed L. Cashin <[email protected]>
Evgeniy Polyakov <[email protected]>
Felipe W Damasio <[email protected]>
@@ -214,6 +215,7 @@ Jisheng Zhang <[email protected]> <[email protected]>
John Paul Adrian Glaubitz <[email protected]>
John Stultz <[email protected]>
diff --git a/CREDITS b/CREDITS
index acac06b6563e..5f5d70c9c038 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1173,6 +1173,10 @@ D: Future Domain TMC-16x0 SCSI driver (author)
D: APM driver (early port)
D: DRM drivers (author of several)
+N: Veaceslav Falico
+D: Co-maintainer and co-author of the network bonding driver.
+
N: János Farkas
D: romfs, various (mostly networking) fixes
@@ -4179,6 +4183,10 @@ S: B-1206 Jingmao Guojigongyu
S: 16 Baliqiao Nanjie, Beijing 101100
S: People's Repulic of China
+N: Vlad Yasevich
+D: SCTP protocol maintainer.
+
N: Aviad Yehezkel
D: Kernel TLS implementation and offload support.
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index c8ae7c897f14..74cec76be9f2 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1245,13 +1245,17 @@ PAGE_SIZE multiple when read back.
This is a simple interface to trigger memory reclaim in the
target cgroup.
- This file accepts a string which contains the number of bytes to
- reclaim.
+ This file accepts a single key, the number of bytes to reclaim.
+ No nested keys are currently supported.
Example::
echo "1G" > memory.reclaim
+ The interface can be later extended with nested keys to
+ configure the reclaim behavior. For example, specify the
+ type of memory to reclaim from (anon, file, ..).
+
Please note that the kernel can over or under reclaim from
the target cgroup. If less bytes are reclaimed than the
specified amount, -EAGAIN is returned.
@@ -1263,13 +1267,6 @@ PAGE_SIZE multiple when read back.
This means that the networking layer will not adapt based on
reclaim induced by memory.reclaim.
- This file also allows the user to specify the nodes to reclaim from,
- via the 'nodes=' key, for example::
-
- echo "1G nodes=0,1" > memory.reclaim
-
- The above instructs the kernel to reclaim memory from nodes 0,1.
-
memory.peak
A read-only single value file which exists on non-root
cgroups.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6cfa6e3996cf..e3618dfdb36a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -7020,3 +7020,10 @@
management firmware translates the requests into actual
hardware states (core frequency, data fabric and memory
clocks etc.)
+ active
+ Use amd_pstate_epp driver instance as the scaling driver,
+ driver provides a hint to the hardware if software wants
+ to bias toward performance (0x0) or energy efficiency (0xff)
+ to the CPPC firmware. then CPPC power algorithm will
+ calculate the runtime workload and adjust the realtime cores
+ frequency.
diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst
index 5376d53faaa8..d143e72cf93e 100644
--- a/Documentation/admin-guide/pm/amd-pstate.rst
+++ b/Documentation/admin-guide/pm/amd-pstate.rst
@@ -230,8 +230,8 @@ with :c:macro:`MSR_AMD_CPPC_ENABLE` or ``cppc_set_enable``, it will respond
to the request from AMD P-States.
-User Space Interface in ``sysfs``
-==================================
+User Space Interface in ``sysfs`` - Per-policy control
+======================================================
``amd-pstate`` exposes several global attributes (files) in ``sysfs`` to
control its functionality at the system level. They are located in the
@@ -262,6 +262,25 @@ lowest non-linear performance in `AMD CPPC Performance Capability
<perf_cap_>`_.)
This attribute is read-only.
+``energy_performance_available_preferences``
+
+A list of all the supported EPP preferences that could be used for
+``energy_performance_preference`` on this system.
+These profiles represent different hints that are provided
+to the low-level firmware about the user's desired energy vs efficiency
+tradeoff. ``default`` represents the epp value is set by platform
+firmware. This attribute is read-only.
+
+``energy_performance_preference``
+
+The current energy performance preference can be read from this attribute.
+and user can change current preference according to energy or performance needs
+Please get all support profiles list from
+``energy_performance_available_preferences`` attribute, all the profiles are
+integer values defined between 0 to 255 when EPP feature is enabled by platform
+firmware, if EPP feature is disabled, driver will ignore the written value
+This attribute is read-write.
+
Other performance and frequency values can be read back from
``/sys/devices/system/cpu/cpuX/acpi_cppc/``, see :ref:`cppc_sysfs`.
@@ -280,8 +299,30 @@ module which supports the new AMD P-States mechanism on most of the future AMD
platforms. The AMD P-States mechanism is the more performance and energy
efficiency frequency management method on AMD processors.
-Kernel Module Options for ``amd-pstate``
-=========================================
+
+AMD Pstate Driver Operation Modes
+=================================
+
+``amd_pstate`` CPPC has two operation modes: CPPC Autonomous(active) mode and
+CPPC non-autonomous(passive) mode.
+active mode and passive mode can be chosen by different kernel parameters.
+When in Autonomous mode, CPPC ignores requests done in the Desired Performance
+Target register and takes into account only the values set to the Minimum requested
+performance, Maximum requested performance, and Energy Performance Preference
+registers. When Autonomous is disabled, it only considers the Desired Performance Target.
+
+Active Mode
+------------
+
+``amd_pstate=active``
+
+This is the low-level firmware control mode which is implemented by ``amd_pstate_epp``
+driver with ``amd_pstate=active`` passed to the kernel in the command line.
+In this mode, ``amd_pstate_epp`` driver provides a hint to the hardware if software
+wants to bias toward performance (0x0) or energy efficiency (0xff) to the CPPC firmware.
+then CPPC power algorithm will calculate the runtime workload and adjust the realtime
+cores frequency according to the power supply and thermal, core voltage and some other
+hardware conditions.
Passive Mode
------------
@@ -298,6 +339,35 @@ processor must provide at least nominal performance requested and go higher if c
operating conditions allow.
+User Space Interface in ``sysfs`` - General
+===========================================
+
+Global Attributes
+-----------------
+
+``amd-pstate`` exposes several global attributes (files) in ``sysfs`` to
+control its functionality at the system level. They are located in the
+``/sys/devices/system/cpu/amd-pstate/`` directory and affect all CPUs.
+
+``status``
+ Operation mode of the driver: "active", "passive" or "disable".
+
+ "active"
+ The driver is functional and in the ``active mode``
+
+ "passive"
+ The driver is functional and in the ``passive mode``
+
+ "disable"
+ The driver is unregistered and not functional now.
+
+ This attribute can be written to in order to change the driver's
+ operation mode or to unregister it. The string written to it must be
+ one of the possible values of it and, if successful, writing one of
+ these values to the sysfs file will cause the driver to switch over
+ to the operation mode represented by that string - or to be
+ unregistered in the "disable" case.
+
``cpupower`` tool support for ``amd-pstate``
===============================================
diff --git a/Documentation/devicetree/bindings/.gitignore b/Documentation/devicetree/bindings/.gitignore
index a77719968a7e..51ddb26d93f0 100644
--- a/Documentation/devicetree/bindings/.gitignore
+++ b/Documentation/devicetree/bindings/.gitignore
@@ -2,3 +2,8 @@
*.example.dts
/processed-schema*.yaml
/processed-schema*.json
+
+#
+# We don't want to ignore the following even if they are dot-files
+#
+!.yamllint
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
index 99e159bc5fb1..e4aa8c67d532 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -26,8 +26,13 @@ properties:
items:
- enum:
- qcom,qdu1000-cpufreq-epss
+ - qcom,sc7280-cpufreq-epss
+ - qcom,sc8280xp-cpufreq-epss
- qcom,sm6375-cpufreq-epss
- qcom,sm8250-cpufreq-epss
+ - qcom,sm8350-cpufreq-epss
+ - qcom,sm8450-cpufreq-epss
+ - qcom,sm8550-cpufreq-epss
- const: qcom,cpufreq-epss
reg:
diff --git a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
index 9c086eac6ca7..6f5e7904181f 100644
--- a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
@@ -17,6 +17,9 @@ description: |
on the CPU OPP in use. The CPUFreq driver sets the CPR power domain level
according to the required OPPs defined in the CPU OPP tables.
+ For old implementation efuses are parsed to select the correct opp table and
+ voltage and CPR is not supported/used.
+
select:
properties:
compatible:
@@ -33,37 +36,65 @@ select:
required:
- compatible
-properties:
- cpus:
- type: object
-
- patternProperties:
- '^cpu@[0-9a-f]+$':
- type: object
-
- properties:
- power-domains:
- maxItems: 1
-
- power-domain-names:
- items:
- - const: cpr
-
- required:
- - power-domains
- - power-domain-names
-
patternProperties:
'^opp-table(-[a-z0-9]+)?$':
- if:
+ allOf:
+ - if:
+ properties:
+ compatible:
+ const: operating-points-v2-kryo-cpu
+ then:
+ $ref: /schemas/opp/opp-v2-kryo-cpu.yaml#
+
+ - if:
+ properties:
+ compatible:
+ const: operating-points-v2-qcom-level
+ then:
+ $ref: /schemas/opp/opp-v2-qcom-level.yaml#
+
+ unevaluatedProperties: false
+
+allOf:
+ - if:
properties:
compatible:
- const: operating-points-v2-kryo-cpu
+ contains:
+ enum:
+ - qcom,qcs404
+
then:
+ properties:
+ cpus:
+ type: object
+
+ patternProperties:
+ '^cpu@[0-9a-f]+$':
+ type: object
+
+ properties:
+ power-domains:
+ maxItems: 1
+
+ power-domain-names:
+ items:
+ - const: cpr
+
+ required:
+ - power-domains
+ - power-domain-names
+
patternProperties:
- '^opp-?[0-9]+$':
- required:
- - required-opps
+ '^opp-table(-[a-z0-9]+)?$':
+ if:
+ properties:
+ compatible:
+ const: operating-points-v2-kryo-cpu
+ then:
+ patternProperties:
+ '^opp-?[0-9]+$':
+ required:
+ - required-opps
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
index 9f7d3e11aacb..8449e14af9f3 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
@@ -108,7 +108,7 @@ properties:
msi-controller:
description:
- Only present if the Message Based Interrupt functionnality is
+ Only present if the Message Based Interrupt functionality is
being exposed by the HW, and the mbi-ranges property present.
mbi-ranges:
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
index 60cf3cbde4c5..bbbad31ae4ca 100644
--- a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
@@ -50,12 +50,22 @@ patternProperties:
opp-supported-hw:
description: |
A single 32 bit bitmap value, representing compatible HW.
- Bitmap:
+ Bitmap for MSM8996 format:
0: MSM8996, speedbin 0
1: MSM8996, speedbin 1
2: MSM8996, speedbin 2
- 3-31: unused
- maximum: 0x7
+ 3: MSM8996, speedbin 3
+ 4-31: unused
+
+ Bitmap for MSM8996SG format (speedbin shifted of 4 left):
+ 0-3: unused
+ 4: MSM8996SG, speedbin 0
+ 5: MSM8996SG, speedbin 1
+ 6: MSM8996SG, speedbin 2
+ 7-31: unused
+ enum: [0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ 0x9, 0xd, 0xe, 0xf,
+ 0x10, 0x20, 0x30, 0x70]
clock-latency-ns: true
@@ -106,6 +116,7 @@ examples:
L2_0: l2-cache {
compatible = "cache";
cache-level = <2>;
+ cache-unified;
};
};
@@ -140,6 +151,7 @@ examples:
L2_1: l2-cache {
compatible = "cache";
cache-level = <2>;
+ cache-unified;
};
};
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml b/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml
index b9ce2e099ce9..a30ef93213c0 100644
--- a/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml
@@ -30,7 +30,9 @@ patternProperties:
this OPP node. Sometimes several corners/levels shares a certain fuse
corner/level. A fuse corner/level contains e.g. ref uV, min uV,
and max uV.
- $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 2
required:
- opp-level
diff --git a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
index 0a7aa29563c1..21c8ea08ff0a 100644
--- a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
@@ -40,6 +40,8 @@ properties:
description:
Indicates that the setting of RTC time is allowed by the host CPU.
+ wakeup-source: true
+
required:
- compatible
- reg
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
index dc2e60ced927..b481b81f3be5 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
@@ -819,7 +819,7 @@ NAPI
----
This driver supports NAPI (Rx polling mode).
For more information on NAPI, see
-https://www.linuxfoundation.org/collaborate/workgroups/networking/napi
+https://wiki.linuxfoundation.org/networking/napi
MACVLAN
diff --git a/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst b/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
index eaa87dbe8848..d052ef40fe36 100644
--- a/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
+++ b/Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
@@ -16,5 +16,5 @@ Contents
Support
=======
-If you got any problem, contact Wangxun support team via [email protected]
+If you got any problem, contact Wangxun support team via [email protected]
and Cc: netdev.
diff --git a/Documentation/power/suspend-and-interrupts.rst b/Documentation/power/suspend-and-interrupts.rst
index 4cda6617709a..dfbace2f4600 100644
--- a/Documentation/power/suspend-and-interrupts.rst
+++ b/Documentation/power/suspend-and-interrupts.rst
@@ -67,7 +67,7 @@ That may involve turning on a special signal handling logic within the platform
during system sleep so as to trigger a system wakeup when needed. For example,
the platform may include a dedicated interrupt controller used specifically for
handling system wakeup events. Then, if a given interrupt line is supposed to
-wake up the system from sleep sates, the corresponding input of that interrupt
+wake up the system from sleep states, the corresponding input of that interrupt
controller needs to be enabled to receive signals from the line in question.
After wakeup, it generally is better to disable that input to prevent the
dedicated controller from triggering interrupts unnecessarily.
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 9807b05a1b57..0a67cb738013 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8070,9 +8070,13 @@ considering the state as complete. VMM needs to ensure that the dirty
state is final and avoid missing dirty pages from another ioctl ordered
after the bitmap collection.
-NOTE: One example of using the backup bitmap is saving arm64 vgic/its
-tables through KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} command on
-KVM device "kvm-arm-vgic-its" when dirty ring is enabled.
+NOTE: Multiple examples of using the backup bitmap: (1) save vgic/its
+tables through command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} on
+KVM device "kvm-arm-vgic-its". (2) restore vgic/its tables through
+command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_RESTORE_TABLES} on KVM device
+"kvm-arm-vgic-its". VGICv3 LPI pending status is restored. (3) save
+vgic3 pending table through KVM_DEV_ARM_VGIC_{GRP_CTRL, SAVE_PENDING_TABLES}
+command on KVM device "kvm-arm-vgic-v3".
8.30 KVM_CAP_XEN_HVM
--------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 8a5c25c20d00..39ff1a717625 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1097,7 +1097,6 @@ S: Maintained
F: drivers/dma/ptdma/
AMD SEATTLE DEVICE TREE SUPPORT
-M: Brijesh Singh <[email protected]>
M: Suravee Suthikulpanit <[email protected]>
M: Tom Lendacky <[email protected]>
S: Supported
@@ -2212,6 +2211,9 @@ L: [email protected] (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
X: drivers/media/i2c/
+F: arch/arm64/boot/dts/freescale/
+X: arch/arm64/boot/dts/freescale/fsl-*
+X: arch/arm64/boot/dts/freescale/qoriq-*
N: imx
N: mxs
@@ -2450,11 +2452,14 @@ F: drivers/rtc/rtc-mt7622.c
ARM/Mediatek SoC support
M: Matthias Brugger <[email protected]>
+R: AngeloGioacchino Del Regno <[email protected]>
L: [email protected] (moderated for non-subscribers)
L: [email protected] (moderated for non-subscribers)
S: Maintained
W: https://mtk.wiki.kernel.org/
-C: irc://chat.freenode.net/linux-mediatek
+C: irc://irc.libera.chat/linux-mediatek
+F: arch/arm/boot/dts/mt2*
F: arch/arm/boot/dts/mt6*
F: arch/arm/boot/dts/mt7*
F: arch/arm/boot/dts/mt8*
@@ -2462,7 +2467,7 @@ F: arch/arm/mach-mediatek/
F: arch/arm64/boot/dts/mediatek/
F: drivers/soc/mediatek/
N: mtk
-N: mt[678]
+N: mt[2678]
K: mediatek
ARM/Mediatek USB3 PHY DRIVER
@@ -3766,7 +3771,6 @@ F: net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <[email protected]>
-M: Veaceslav Falico <[email protected]>
M: Andy Gospodarek <[email protected]>
S: Supported
@@ -14604,7 +14608,6 @@ F: tools/testing/selftests/net/ipsec.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <[email protected]>
-M: Hideaki YOSHIFUJI <[email protected]>
M: David Ahern <[email protected]>
S: Maintained
@@ -15661,7 +15664,7 @@ OPENRISC ARCHITECTURE
M: Jonas Bonn <[email protected]>
M: Stefan Kristiansson <[email protected]>
M: Stafford Horne <[email protected]>
S: Maintained
W: http://openrisc.io
T: git https://github.com/openrisc/linux.git
@@ -16117,7 +16120,7 @@ F: drivers/pci/controller/pci-v3-semi.c
PCI ENDPOINT SUBSYSTEM
M: Lorenzo Pieralisi <[email protected]>
-R: Krzysztof WilczyÅ„ski <[email protected]>
+M: Krzysztof WilczyÅ„ski <[email protected]>
R: Manivannan Sadhasivam <[email protected]>
R: Kishon Vijay Abraham I <[email protected]>
@@ -16125,7 +16128,7 @@ S: Supported
Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
F: Documentation/PCI/endpoint/*
F: Documentation/misc-devices/pci-endpoint-test.rst
F: drivers/misc/pci_endpoint_test.c
@@ -16160,7 +16163,7 @@ S: Supported
Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
F: Documentation/driver-api/pci/p2pdma.rst
F: drivers/pci/p2pdma.c
F: include/linux/pci-p2pdma.h
@@ -16182,14 +16185,14 @@ F: drivers/pci/controller/pci-xgene-msi.c
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
M: Lorenzo Pieralisi <[email protected]>
+M: Krzysztof WilczyÅ„ski <[email protected]>
R: Rob Herring <[email protected]>
-R: Krzysztof WilczyÅ„ski <[email protected]>
S: Supported
Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
F: Documentation/devicetree/bindings/pci/
F: drivers/pci/controller/
F: drivers/pci/pci-bridge-emul.c
@@ -16202,7 +16205,7 @@ S: Supported
Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
F: Documentation/PCI/
F: Documentation/devicetree/bindings/pci/
F: arch/x86/kernel/early-quirks.c
@@ -18687,9 +18690,9 @@ F: drivers/target/
F: include/target/
SCTP PROTOCOL
-M: Vlad Yasevich <[email protected]>
M: Neil Horman <[email protected]>
M: Marcelo Ricardo Leitner <[email protected]>
+M: Xin Long <[email protected]>
S: Maintained
W: http://lksctp.sourceforge.net
@@ -20087,6 +20090,7 @@ F: drivers/watchdog/sunplus_wdt.c
SUPERH
M: Yoshinori Sato <[email protected]>
M: Rich Felker <[email protected]>
+M: John Paul Adrian Glaubitz <[email protected]>
S: Maintained
Q: http://patchwork.kernel.org/project/linux-sh/list/
@@ -21727,6 +21731,7 @@ F: include/uapi/linux/uvcvideo.h
USB WEBCAM GADGET
M: Laurent Pinchart <[email protected]>
+M: Daniel Scally <[email protected]>
S: Maintained
F: drivers/usb/gadget/function/*uvc*
diff --git a/Makefile b/Makefile
index 749bb455546e..716a975730a6 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc8
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts
index d1971ddf06a5..7f755e5a4624 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts
@@ -751,7 +751,7 @@
};
pca9849@75 {
- compatible = "nxp,pca849";
+ compatible = "nxp,pca9849";
reg = <0x75>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx7d-smegw01.dts b/arch/arm/boot/dts/imx7d-smegw01.dts
index 546268b8d0b1..c0f00f5db11e 100644
--- a/arch/arm/boot/dts/imx7d-smegw01.dts
+++ b/arch/arm/boot/dts/imx7d-smegw01.dts
@@ -198,6 +198,7 @@
&usbotg2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbotg2>;
+ over-current-active-low;
dr_mode = "host";
status = "okay";
};
@@ -374,7 +375,7 @@
pinctrl_usbotg2: usbotg2grp {
fsl,pins = <
- MX7D_PAD_UART3_RTS_B__USB_OTG2_OC 0x04
+ MX7D_PAD_UART3_RTS_B__USB_OTG2_OC 0x5c
>;
};
diff --git a/arch/arm/boot/dts/nuvoton-wpcm450.dtsi b/arch/arm/boot/dts/nuvoton-wpcm450.dtsi
index b637241316bb..fd671c7a1e5d 100644
--- a/arch/arm/boot/dts/nuvoton-wpcm450.dtsi
+++ b/arch/arm/boot/dts/nuvoton-wpcm450.dtsi
@@ -480,6 +480,7 @@
reg = <0xc8000000 0x1000>, <0xc0000000 0x4000000>;
reg-names = "control", "memory";
clocks = <&clk 0>;
+ nuvoton,shm = <&shm>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 487b0e03d4b4..2ca76b69add7 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -1181,6 +1181,7 @@
clock-names = "dp", "pclk";
phys = <&edp_phy>;
phy-names = "dp";
+ power-domains = <&power RK3288_PD_VIO>;
resets = <&cru SRST_EDP>;
reset-names = "dp";
rockchip,grf = <&grf>;
diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
index 920a0bad7494..8d9a2dfa76f1 100644
--- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
+++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
@@ -178,7 +178,7 @@
tsin-num = <0>;
serial-not-parallel;
i2c-bus = <&ssc2>;
- reset-gpios = <&pio15 4 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio15 4 GPIO_ACTIVE_LOW>;
dvb-card = <STV0367_TDA18212_NIMA_1>;
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index 1648e67afbb6..417523dc4cc0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -1886,7 +1886,7 @@
sd_emmc_b: sd@5000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0x5000 0x0 0x800>;
- interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_B>,
<&clkc CLKID_SD_EMMC_B_CLK0>,
@@ -1898,7 +1898,7 @@
sd_emmc_c: mmc@7000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0x7000 0x0 0x800>;
- interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_C>,
<&clkc CLKID_SD_EMMC_C_CLK0>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 9dbd50820b1c..7f55d97f6c28 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -2324,7 +2324,7 @@
sd_emmc_a: sd@ffe03000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0xffe03000 0x0 0x800>;
- interrupts = <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_A>,
<&clkc CLKID_SD_EMMC_A_CLK0>,
@@ -2336,7 +2336,7 @@
sd_emmc_b: sd@ffe05000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0xffe05000 0x0 0x800>;
- interrupts = <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_B>,
<&clkc CLKID_SD_EMMC_B_CLK0>,
@@ -2348,7 +2348,7 @@
sd_emmc_c: mmc@ffe07000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0xffe07000 0x0 0x800>;
- interrupts = <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_C>,
<&clkc CLKID_SD_EMMC_C_CLK0>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index e3c12e0be99d..5eed15035b67 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -603,21 +603,21 @@
sd_emmc_a: mmc@70000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
reg = <0x0 0x70000 0x0 0x800>;
- interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sd_emmc_b: mmc@72000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
reg = <0x0 0x72000 0x0 0x800>;
- interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sd_emmc_c: mmc@74000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
reg = <0x0 0x74000 0x0 0x800>;
- interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
index 0c64b9194621..214f21bd0cb4 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
@@ -164,7 +164,7 @@
sc_pwrkey: keys {
compatible = "fsl,imx8qxp-sc-key", "fsl,imx-sc-key";
- linux,keycode = <KEY_POWER>;
+ linux,keycodes = <KEY_POWER>;
wakeup-source;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
index 752f409a30b1..9889319d4f04 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
@@ -88,6 +88,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_watchdog_gpio>;
compatible = "linux,wdt-gpio";
+ always-running;
gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
hw_algo = "level";
/* Reset triggers in 2..3 seconds */
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index 83c8f715cd90..b1f11098d248 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -602,7 +602,7 @@
#define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0
#define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0
#define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0
-#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0
+#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x1
#define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0
#define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0
#define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso
index 3ea73a6886ff..f6ad1a4b8b66 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso
@@ -33,7 +33,6 @@
pinctrl-0 = <&pinctrl_uart2>;
rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso
index 2fa635e1c1a8..1f8ea20dfafc 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso
@@ -33,7 +33,6 @@
pinctrl-0 = <&pinctrl_uart2>;
rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
index 244ef8d6cc68..7761d5671cb1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
@@ -222,7 +222,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_bten>;
cts-gpios = <&gpio5 8 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
bluetooth {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
index 6433c205f8dd..64b366e83fa1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
@@ -733,7 +733,6 @@
dtr-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
@@ -749,7 +748,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
cts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
@@ -758,7 +756,6 @@
pinctrl-0 = <&pinctrl_uart4>, <&pinctrl_uart4_gpio>;
cts-gpios = <&gpio5 11 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio5 12 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
index 32872b0b1aaf..e8bc1fccc47b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
@@ -664,7 +664,6 @@
pinctrl-0 = <&pinctrl_uart1>, <&pinctrl_uart1_gpio>;
rts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio4 24 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
@@ -681,7 +680,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
bluetooth {
@@ -699,7 +697,6 @@
dtr-gpios = <&gpio4 3 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio4 4 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio4 6 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
index 8ce562246a08..acc2ba8e00a8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
@@ -581,7 +581,6 @@
dtr-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index 0d454e0e2f7c..702d87621bb4 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -98,6 +98,7 @@
off-on-delay = <500000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_eth>;
+ regulator-always-on;
regulator-boot-on;
regulator-max-microvolt = <3300000>;
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
index b9444e4a3d2d..7c12518dbc96 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
@@ -643,7 +643,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
bluetooth {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
index ceeca4966fc5..8eb7d5ee38da 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
@@ -623,7 +623,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
cts-gpios = <&gpio3 21 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
bluetooth {
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index 5d31536f4c48..c10cfeb1214d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -2146,7 +2146,7 @@
};
vdosys0: syscon@1c01a000 {
- compatible = "mediatek,mt8195-mmsys", "syscon";
+ compatible = "mediatek,mt8195-vdosys0", "mediatek,mt8195-mmsys", "syscon";
reg = <0 0x1c01a000 0 0x1000>;
mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
#clock-cells = <1>;
@@ -2292,7 +2292,7 @@
};
vdosys1: syscon@1c100000 {
- compatible = "mediatek,mt8195-mmsys", "syscon";
+ compatible = "mediatek,mt8195-vdosys1", "syscon";
reg = <0 0x1c100000 0 0x1000>;
#clock-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index aa22a0c22265..5d5d9574088c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -96,7 +96,6 @@
linux,default-trigger = "heartbeat";
gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
default-state = "on";
- mode = <0x23>;
};
user_led: led-1 {
@@ -104,7 +103,6 @@
linux,default-trigger = "mmc1";
gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
default-state = "off";
- mode = <0x05>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
index 6e29e74f6fc6..783120e9cebe 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
@@ -111,7 +111,7 @@
};
};
- dmc_opp_table: dmc_opp_table {
+ dmc_opp_table: opp-table-3 {
compatible = "operating-points-v2";
opp00 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
index 04403a76238b..a0795a2b1cb1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
@@ -104,6 +104,13 @@
};
};
+&cpu_alert0 {
+ temperature = <65000>;
+};
+&cpu_alert1 {
+ temperature = <68000>;
+};
+
&cpu_l0 {
cpu-supply = <&vdd_cpu_l>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 4391aea25984..1881b4b71f91 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -589,7 +589,7 @@
clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>;
clock-names = "hclk_master", "hclk_slave", "sclk";
resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>;
- reset-names = "master", "lave", "crypto";
+ reset-names = "master", "slave", "crypto-rst";
};
crypto1: crypto@ff8b8000 {
@@ -599,7 +599,7 @@
clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>;
clock-names = "hclk_master", "hclk_slave", "sclk";
resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>;
- reset-names = "master", "slave", "crypto";
+ reset-names = "master", "slave", "crypto-rst";
};
i2c1: i2c@ff110000 {
@@ -2241,13 +2241,11 @@
pcfg_input_pull_up: pcfg-input-pull-up {
input-enable;
bias-pull-up;
- drive-strength = <2>;
};
pcfg_input_pull_down: pcfg-input-pull-down {
input-enable;
bias-pull-down;
- drive-strength = <2>;
};
clock {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts b/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts
index 4c7f9abd594f..d956496d5221 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts
@@ -353,6 +353,17 @@
};
};
+&pmu_io_domains {
+ pmuio2-supply = <&vcc_3v3>;
+ vccio1-supply = <&vcc_3v3>;
+ vccio3-supply = <&vcc_3v3>;
+ vccio4-supply = <&vcca_1v8>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcca_1v8>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
&pwm0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
index a1c5fdf7d68f..3c9d85257cc9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
@@ -571,6 +571,8 @@
};
&i2s1_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
rockchip,trcm-sync-tx-only;
status = "okay";
};
@@ -730,14 +732,13 @@
disable-wp;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
- sd-uhs-sdr104;
+ sd-uhs-sdr50;
vmmc-supply = <&vcc3v3_sd>;
vqmmc-supply = <&vccio_sd>;
status = "okay";
};
&sdmmc2 {
- supports-sdio;
bus-width = <4>;
disable-wp;
cap-sd-highspeed;
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
index 5706c3e24f0a..c27f1c7f072d 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
@@ -966,6 +966,7 @@
clock-names = "aclk_mst", "aclk_slv",
"aclk_dbi", "pclk", "aux";
device_type = "pci";
+ #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc 0>,
<0 0 0 2 &pcie_intc 1>,
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 94a666dd1443..2642e9ce2819 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -2187,7 +2187,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
ite->collection->collection_id;
val = cpu_to_le64(val);
- return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
+ return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
}
/**
@@ -2339,7 +2339,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
(dev->num_eventid_bits - 1));
val = cpu_to_le64(val);
- return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
+ return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
}
/**
@@ -2526,7 +2526,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
collection->collection_id);
val = cpu_to_le64(val);
- return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
+ return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
}
/*
@@ -2607,7 +2607,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
*/
val = 0;
BUG_ON(cte_esz > sizeof(val));
- ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
+ ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
return ret;
}
@@ -2743,7 +2743,6 @@ static int vgic_its_has_attr(struct kvm_device *dev,
static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
{
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
- struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
@@ -2763,9 +2762,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
vgic_its_reset(kvm, its);
break;
case KVM_DEV_ARM_ITS_SAVE_TABLES:
- dist->save_its_tables_in_progress = true;
ret = abi->save_tables(its);
- dist->save_its_tables_in_progress = false;
break;
case KVM_DEV_ARM_ITS_RESTORE_TABLES:
ret = abi->restore_tables(its);
@@ -2792,7 +2789,7 @@ bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- return dist->save_its_tables_in_progress;
+ return dist->table_write_in_progress;
}
static int vgic_its_set_attr(struct kvm_device *dev,
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 2624963cb95b..684bdfaad4a9 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -339,7 +339,7 @@ retry:
if (status) {
/* clear consumed data */
val &= ~(1 << bit_nr);
- ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
+ ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
return ret;
}
@@ -434,7 +434,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
else
val &= ~(1 << bit_nr);
- ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
+ ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
goto out;
}
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 23e280fa0a16..7f7f3c5ed85a 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -6,6 +6,7 @@
#define __KVM_ARM_VGIC_NEW_H__
#include <linux/irqchip/arm-gic-common.h>
+#include <asm/kvm_mmu.h>
#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
#define IMPLEMENTER_ARM 0x43b
@@ -131,6 +132,19 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
return vgic_irq_get_lr_count(irq) > 1;
}
+static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+ const void *data, unsigned long len)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int ret;
+
+ dist->table_write_in_progress = true;
+ ret = kvm_write_guest_lock(kvm, gpa, data, len);
+ dist->table_write_in_progress = false;
+
+ return ret;
+}
+
/*
* This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index f6a502e8f02c..6e948d015332 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -170,6 +170,9 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u
asmlinkage long
ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp)
{
+ struct timespec64 rtn_tp;
+ s64 tick_ns;
+
/*
* ia64's clock_gettime() syscall is implemented as a vdso call
* fsys_clock_gettime(). Currently it handles only
@@ -185,8 +188,8 @@ ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *
switch (which_clock) {
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
- s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
- struct timespec64 rtn_tp = ns_to_timespec64(tick_ns);
+ tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
+ rtn_tp = ns_to_timespec64(tick_ns);
return put_timespec64(&rtn_tp, tp);
}
diff --git a/arch/mips/include/asm/mach-loongson32/cpufreq.h b/arch/mips/include/asm/mach-loongson32/cpufreq.h
deleted file mode 100644
index e422a32883ae..000000000000
--- a/arch/mips/include/asm/mach-loongson32/cpufreq.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2014 Zhang, Keguang <[email protected]>
- *
- * Loongson 1 CPUFreq platform support.
- */
-
-#ifndef __ASM_MACH_LOONGSON32_CPUFREQ_H
-#define __ASM_MACH_LOONGSON32_CPUFREQ_H
-
-struct plat_ls1x_cpufreq {
- const char *clk_name; /* CPU clk */
- const char *osc_clk_name; /* OSC clk */
- unsigned int max_freq; /* in kHz */
- unsigned int min_freq; /* in kHz */
-};
-
-#endif /* __ASM_MACH_LOONGSON32_CPUFREQ_H */
diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h
index eb83e2741887..86e1a6aab4e5 100644
--- a/arch/mips/include/asm/mach-loongson32/platform.h
+++ b/arch/mips/include/asm/mach-loongson32/platform.h
@@ -12,7 +12,6 @@
#include <nand.h>
extern struct platform_device ls1x_uart_pdev;
-extern struct platform_device ls1x_cpufreq_pdev;
extern struct platform_device ls1x_eth0_pdev;
extern struct platform_device ls1x_eth1_pdev;
extern struct platform_device ls1x_ehci_pdev;
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index 311dc1580bbd..64d7979394e6 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -15,7 +15,6 @@
#include <platform.h>
#include <loongson1.h>
-#include <cpufreq.h>
#include <dma.h>
#include <nand.h>
@@ -62,21 +61,6 @@ void __init ls1x_serial_set_uartclk(struct platform_device *pdev)
p->uartclk = clk_get_rate(clk);
}
-/* CPUFreq */
-static struct plat_ls1x_cpufreq ls1x_cpufreq_pdata = {
- .clk_name = "cpu_clk",
- .osc_clk_name = "osc_clk",
- .max_freq = 266 * 1000,
- .min_freq = 33 * 1000,
-};
-
-struct platform_device ls1x_cpufreq_pdev = {
- .name = "ls1x-cpufreq",
- .dev = {
- .platform_data = &ls1x_cpufreq_pdata,
- },
-};
-
/* Synopsys Ethernet GMAC */
static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
.phy_mask = 0,
diff --git a/arch/mips/loongson32/ls1b/board.c b/arch/mips/loongson32/ls1b/board.c
index 727e06718dab..fed8d432ef20 100644
--- a/arch/mips/loongson32/ls1b/board.c
+++ b/arch/mips/loongson32/ls1b/board.c
@@ -35,7 +35,6 @@ static const struct gpio_led_platform_data ls1x_led_pdata __initconst = {
static struct platform_device *ls1b_platform_devices[] __initdata = {
&ls1x_uart_pdev,
- &ls1x_cpufreq_pdev,
&ls1x_eth0_pdev,
&ls1x_eth1_pdev,
&ls1x_ehci_pdev,
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 4dfe1f49c5c8..6817892a2c58 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -1303,7 +1303,7 @@ static char iodc_dbuf[4096] __page_aligned_bss;
*/
int pdc_iodc_print(const unsigned char *str, unsigned count)
{
- unsigned int i;
+ unsigned int i, found = 0;
unsigned long flags;
count = min_t(unsigned int, count, sizeof(iodc_dbuf));
@@ -1315,6 +1315,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
iodc_dbuf[i+0] = '\r';
iodc_dbuf[i+1] = '\n';
i += 2;
+ found = 1;
goto print;
default:
iodc_dbuf[i] = str[i];
@@ -1330,7 +1331,7 @@ print:
__pa(pdc_result), 0, __pa(iodc_dbuf), i, 0);
spin_unlock_irqrestore(&pdc_lock, flags);
- return i;
+ return i - found;
}
#if !defined(BOOTLOADER)
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 69c62933e952..ceb45f51d52e 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -126,6 +126,12 @@ long arch_ptrace(struct task_struct *child, long request,
unsigned long tmp;
long ret = -EIO;
+ unsigned long user_regs_struct_size = sizeof(struct user_regs_struct);
+#ifdef CONFIG_64BIT
+ if (is_compat_task())
+ user_regs_struct_size /= 2;
+#endif
+
switch (request) {
/* Read the word at location addr in the USER area. For ptraced
@@ -166,7 +172,7 @@ long arch_ptrace(struct task_struct *child, long request,
addr >= sizeof(struct pt_regs))
break;
if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
- data |= 3; /* ensure userspace privilege */
+ data |= PRIV_USER; /* ensure userspace privilege */
}
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
@@ -181,14 +187,14 @@ long arch_ptrace(struct task_struct *child, long request,
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
- 0, sizeof(struct user_regs_struct),
+ 0, user_regs_struct_size,
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
- 0, sizeof(struct user_regs_struct),
+ 0, user_regs_struct_size,
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
@@ -285,7 +291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr >= sizeof(struct pt_regs))
break;
if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
- data |= 3; /* ensure userspace privilege */
+ data |= PRIV_USER; /* ensure userspace privilege */
}
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
@@ -302,6 +308,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
}
break;
+ case PTRACE_GETREGS:
+ case PTRACE_SETREGS:
+ case PTRACE_GETFPREGS:
+ case PTRACE_SETFPREGS:
+ return arch_ptrace(child, request, addr, data);
default:
ret = compat_ptrace_request(child, request, addr, data);
@@ -484,7 +495,7 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
case RI(iaoq[0]):
case RI(iaoq[1]):
/* set 2 lowest bits to ensure userspace privilege: */
- regs->iaoq[num - RI(iaoq[0])] = val | 3;
+ regs->iaoq[num - RI(iaoq[0])] = val | PRIV_USER;
return;
case RI(sar): regs->sar = val;
return;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b8c4ac56bddc..7a5f8dbfbdd0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -163,7 +163,6 @@ config PPC
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
- select ARCH_WANTS_NO_INSTR
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_TABLE_SORT
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index dd39313242b4..d5cd16270c5d 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -97,6 +97,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
{
if (radix_enabled())
radix__tlb_flush(tlb);
+
+ return hash__tlb_flush(tlb);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 77fa88c2aed0..eb6d094083fd 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
return flags;
}
+static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
+{
+ unsigned long flags = irq_soft_mask_return();
+
+ irq_soft_mask_set(flags & ~mask);
+
+ return flags;
+}
+
static inline unsigned long arch_local_save_flags(void)
{
return irq_soft_mask_return();
@@ -192,7 +201,7 @@ static inline void arch_local_irq_enable(void)
static inline unsigned long arch_local_irq_save(void)
{
- return irq_soft_mask_set_return(IRQS_DISABLED);
+ return irq_soft_mask_or_return(IRQS_DISABLED);
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
@@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
* is a different soft-masked interrupt pending that requires hard
* masking.
*/
-static inline bool should_hard_irq_enable(void)
+static inline bool should_hard_irq_enable(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+ WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(mfmsr() & MSR_EE);
}
@@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
*
* TODO: Add test for 64e
*/
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
- return false;
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
+ if (!power_pmu_wants_prompt_pmi())
+ return false;
+ /*
+ * If PMIs are disabled then IRQs should be disabled as well,
+ * so we shouldn't see this condition, check for it just in
+ * case because we are about to enable PMIs.
+ */
+ if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
+ return false;
+ }
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
return false;
@@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
/*
* Do the hard enabling, only call this if should_hard_irq_enable is true.
+ * This allows PMI interrupts to profile irq handlers.
*/
static inline void do_hard_irq_enable(void)
{
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
- WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
- WARN_ON(mfmsr() & MSR_EE);
- }
/*
- * This allows PMI interrupts (and watchdog soft-NMIs) through.
- * There is no other reason to enable this way.
+ * Asynch interrupts come in with IRQS_ALL_DISABLED,
+ * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
*/
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
}
@@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
-static __always_inline bool should_hard_irq_enable(void)
+static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
{
return false;
}
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index f55c6fb34a3a..5712dd846263 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
ppc_msgsync();
- if (should_hard_irq_enable())
+ if (should_hard_irq_enable(regs))
do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id());
diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S
index d438ca74e96c..fdbee1093e2b 100644
--- a/arch/powerpc/kernel/head_85xx.S
+++ b/arch/powerpc/kernel/head_85xx.S
@@ -864,7 +864,7 @@ _GLOBAL(load_up_spe)
* SPE unavailable trap from kernel - print a message, but let
* the task use SPE in the kernel until it returns to user mode.
*/
-KernelSPE:
+SYM_FUNC_START_LOCAL(KernelSPE)
lwz r3,_MSR(r1)
oris r3,r3,MSR_SPE@h
stw r3,_MSR(r1) /* enable use of SPE after return */
@@ -881,6 +881,7 @@ KernelSPE:
#endif
.align 4,0
+SYM_FUNC_END(KernelSPE)
#endif /* CONFIG_SPE */
/*
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index fc6631a80527..0ec1581619db 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -50,16 +50,18 @@ static inline bool exit_must_hard_disable(void)
*/
static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
{
+ bool must_hard_disable = (exit_must_hard_disable() || !restartable);
+
/* This must be done with RI=1 because tracing may touch vmaps */
trace_hardirqs_on();
- if (exit_must_hard_disable() || !restartable)
+ if (must_hard_disable)
__hard_EE_RI_disable();
#ifdef CONFIG_PPC64
/* This pattern matches prep_irq_for_idle */
if (unlikely(lazy_irq_pending_nocheck())) {
- if (exit_must_hard_disable() || !restartable) {
+ if (must_hard_disable) {
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
__hard_RI_enable();
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c5b9ce887483..c9535f2760b5 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */
- if (should_hard_irq_enable())
+ if (should_hard_irq_enable(regs))
do_hard_irq_enable();
/* And finally process it */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index d68de3618741..e26eb6618ae5 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
}
/* Conditionally hard-enable interrupts. */
- if (should_hard_irq_enable()) {
+ if (should_hard_irq_enable(regs)) {
/*
* Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions.
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index af8854f9eae3..9be3e818a240 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -26,6 +26,7 @@
#include <asm/firmware.h>
#include <asm/kexec_ranges.h>
#include <asm/crashdump-ppc64.h>
+#include <asm/mmzone.h>
#include <asm/prom.h>
struct umem_info {
@@ -989,10 +990,13 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
* linux,drconf-usable-memory properties. Get an approximate on the
* number of usable memory entries and use for FDT size estimation.
*/
- usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
- (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
-
- extra_size = (unsigned int)(usm_entries * sizeof(u64));
+ if (drmem_lmb_size()) {
+ usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
+ (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
+ extra_size = (unsigned int)(usm_entries * sizeof(u64));
+ } else {
+ extra_size = 0;
+ }
/*
* Get the number of CPU nodes in the current DT. This allows to
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0dce93ccaadf..e89281d3ba28 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -912,16 +912,15 @@ static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
static void kvmppc_fill_pt_regs(struct pt_regs *regs)
{
- ulong r1, ip, msr, lr;
+ ulong r1, msr, lr;
asm("mr %0, 1" : "=r"(r1));
asm("mflr %0" : "=r"(lr));
asm("mfmsr %0" : "=r"(msr));
- asm("bl 1f; 1: mflr %0" : "=r"(ip));
memset(regs, 0, sizeof(*regs));
regs->gpr[1] = r1;
- regs->nip = ip;
+ regs->nip = _THIS_IP_;
regs->msr = msr;
regs->link = lr;
}
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index cac727b01799..26245aaf12b8 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void)
end = (unsigned long)__end_rodata;
radix__change_memory_range(start, end, _PAGE_WRITE);
+
+ for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
+ end = start + PAGE_SIZE;
+ if (overlaps_interrupt_vector_text(start, end))
+ radix__change_memory_range(start, end, _PAGE_WRITE);
+ else
+ break;
+ }
}
void radix__mark_initmem_nx(void)
@@ -262,6 +270,22 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e
static unsigned long next_boundary(unsigned long addr, unsigned long end)
{
#ifdef CONFIG_STRICT_KERNEL_RWX
+ unsigned long stext_phys;
+
+ stext_phys = __pa_symbol(_stext);
+
+ // Relocatable kernel running at non-zero real address
+ if (stext_phys != 0) {
+ // The end of interrupts code at zero is a rodata boundary
+ unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
+ if (addr < end_intr)
+ return end_intr;
+
+ // Start of relocated kernel text is a rodata boundary
+ if (addr < stext_phys)
+ return stext_phys;
+ }
+
if (addr < __pa_symbol(__srwx_boundary))
return __pa_symbol(__srwx_boundary);
#endif
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 100e97daf76b..9d229ef7f86e 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -22,7 +22,7 @@
* Used to avoid races in counting the nest-pmu units during hotplug
* register and unregister
*/
-static DEFINE_SPINLOCK(nest_init_lock);
+static DEFINE_MUTEX(nest_init_lock);
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
static struct imc_pmu **per_nest_pmu_arr;
static cpumask_t nest_imc_cpumask;
@@ -1629,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
{
if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
- spin_lock(&nest_init_lock);
+ mutex_lock(&nest_init_lock);
if (nest_pmus == 1) {
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
kfree(nest_imc_refc);
@@ -1639,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
if (nest_pmus > 0)
nest_pmus--;
- spin_unlock(&nest_init_lock);
+ mutex_unlock(&nest_init_lock);
}
/* Free core_imc memory */
@@ -1796,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
* rest. To handle the cpuhotplug callback unregister, we track
* the number of nest pmus in "nest_pmus".
*/
- spin_lock(&nest_init_lock);
+ mutex_lock(&nest_init_lock);
if (nest_pmus == 0) {
ret = init_nest_pmu_ref();
if (ret) {
- spin_unlock(&nest_init_lock);
+ mutex_unlock(&nest_init_lock);
kfree(per_nest_pmu_arr);
per_nest_pmu_arr = NULL;
goto err_free_mem;
@@ -1808,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
/* Register for cpu hotplug notification. */
ret = nest_pmu_cpumask_init();
if (ret) {
- spin_unlock(&nest_init_lock);
+ mutex_unlock(&nest_init_lock);
kfree(nest_imc_refc);
kfree(per_nest_pmu_arr);
per_nest_pmu_arr = NULL;
@@ -1816,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
}
}
nest_pmus++;
- spin_unlock(&nest_init_lock);
+ mutex_unlock(&nest_init_lock);
break;
case IMC_DOMAIN_CORE:
ret = core_imc_pmu_cpumask_init();
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index faf2c2177094..82153960ac00 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -80,6 +80,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
KBUILD_CFLAGS += -fno-omit-frame-pointer
endif
+# Avoid generating .eh_frame sections.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 86328e3acb02..64ad1937e714 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -70,7 +70,6 @@ static_assert(RISCV_ISA_EXT_ID_MAX <= RISCV_ISA_EXT_MAX);
*/
enum riscv_isa_ext_key {
RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */
- RISCV_ISA_EXT_KEY_ZIHINTPAUSE,
RISCV_ISA_EXT_KEY_SVINVAL,
RISCV_ISA_EXT_KEY_MAX,
};
@@ -91,8 +90,6 @@ static __always_inline int riscv_isa_ext2key(int num)
return RISCV_ISA_EXT_KEY_FPU;
case RISCV_ISA_EXT_d:
return RISCV_ISA_EXT_KEY_FPU;
- case RISCV_ISA_EXT_ZIHINTPAUSE:
- return RISCV_ISA_EXT_KEY_ZIHINTPAUSE;
case RISCV_ISA_EXT_SVINVAL:
return RISCV_ISA_EXT_KEY_SVINVAL;
default:
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 4eba9a98d0e3..3e01f4f3ab08 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -721,6 +721,10 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
+
+#define pmdp_collapse_flush pmdp_collapse_flush
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
index fa70cfe507aa..14f5d27783b8 100644
--- a/arch/riscv/include/asm/vdso/processor.h
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -4,30 +4,26 @@
#ifndef __ASSEMBLY__
-#include <linux/jump_label.h>
#include <asm/barrier.h>
-#include <asm/hwcap.h>
static inline void cpu_relax(void)
{
- if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) {
#ifdef __riscv_muldiv
- int dummy;
- /* In lieu of a halt instruction, induce a long-latency stall. */
- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
- } else {
- /*
- * Reduce instruction retirement.
- * This assumes the PC changes.
- */
-#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
- __asm__ __volatile__ ("pause");
+
+#ifdef __riscv_zihintpause
+ /*
+ * Reduce instruction retirement.
+ * This assumes the PC changes.
+ */
+ __asm__ __volatile__ ("pause");
#else
- /* Encoding of the pause instruction */
- __asm__ __volatile__ (".4byte 0x100000F");
+ /* Encoding of the pause instruction */
+ __asm__ __volatile__ (".4byte 0x100000F");
#endif
- }
barrier();
}
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index f21592d20306..2bedec37d092 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -48,15 +48,35 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
post_kprobe_handler(p, kcb, regs);
}
+static bool __kprobes arch_check_kprobe(struct kprobe *p)
+{
+ unsigned long tmp = (unsigned long)p->addr - p->offset;
+ unsigned long addr = (unsigned long)p->addr;
+
+ while (tmp <= addr) {
+ if (tmp == addr)
+ return true;
+
+ tmp += GET_INSN_LENGTH(*(u16 *)tmp);
+ }
+
+ return false;
+}
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
- unsigned long probe_addr = (unsigned long)p->addr;
+ u16 *insn = (u16 *)p->addr;
+
+ if ((unsigned long)insn & 0x1)
+ return -EILSEQ;
- if (probe_addr & 0x1)
+ if (!arch_check_kprobe(p))
return -EILSEQ;
/* copy instruction */
- p->opcode = *p->addr;
+ p->opcode = (kprobe_opcode_t)(*insn++);
+ if (GET_INSN_LENGTH(p->opcode) == 4)
+ p->opcode |= (kprobe_opcode_t)(*insn) << 16;
/* decode instruction */
switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 75c8dd64fc48..f9a5a7c90ff0 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = (unsigned long)__builtin_frame_address(0);
sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe;
+ level = -1;
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -43,7 +44,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
unsigned long low, high;
struct stackframe *frame;
- if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
+ if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
break;
/* Validate frame pointer */
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 3cc07ed45aeb..fcd6145fbead 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -90,8 +90,10 @@ void flush_icache_pte(pte_t pte)
if (PageHuge(page))
page = compound_head(page);
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ if (!test_bit(PG_dcache_clean, &page->flags)) {
flush_icache_all();
+ set_bit(PG_dcache_clean, &page->flags);
+ }
}
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index 6645ead1a7c1..fef4e7328e49 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -81,3 +81,23 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(pmd_trans_huge(*pmdp));
+ /*
+ * When leaf PTE entries (regular pages) are collapsed into a leaf
+ * PMD entry (huge page), a valid non-leaf PTE is converted into a
+ * valid leaf PTE at the level 1 page table. Since the sfence.vma
+ * forms that specify an address only apply to leaf PTEs, we need a
+ * global flush here. collapse_huge_page() assumes these flushes are
+ * eager, so just do the fence here.
+ */
+ flush_tlb_mm(vma->vm_mm);
+ return pmd;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c
index 8dcd7af2911a..b519a1f045d8 100644
--- a/arch/s390/boot/decompressor.c
+++ b/arch/s390/boot/decompressor.c
@@ -80,6 +80,6 @@ void *decompress_kernel(void)
void *output = (void *)decompress_offset;
__decompress(_compressed_start, _compressed_end - _compressed_start,
- NULL, NULL, output, 0, NULL, error);
+ NULL, NULL, output, vmlinux.image_size, NULL, error);
return output;
}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 3161b9ccd2a5..b6276a3521d7 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -4,6 +4,7 @@
* Written by Niibe Yutaka and Paul Mundt
*/
OUTPUT_ARCH(sh)
+#define RUNTIME_DISCARD_EXIT
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index b049d950612f..ca97442e8d49 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -39,7 +39,20 @@ static __always_inline unsigned long native_get_debugreg(int regno)
asm("mov %%db6, %0" :"=r" (val));
break;
case 7:
- asm("mov %%db7, %0" :"=r" (val));
+ /*
+ * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
+ * with other code.
+ *
+ * This is needed because a DR7 access can cause a #VC exception
+ * when running under SEV-ES. Taking a #VC exception is not a
+ * safe thing to do just anywhere in the entry code and
+ * re-ordering might place the access into an unsafe location.
+ *
+ * This happened in the NMI handler, where the DR7 read was
+ * re-ordered to happen before the call to sev_es_ist_enter(),
+ * causing stack recursion.
+ */
+ asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
break;
default:
BUG();
@@ -66,7 +79,16 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value)
asm("mov %0, %%db6" ::"r" (value));
break;
case 7:
- asm("mov %0, %%db7" ::"r" (value));
+ /*
+ * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
+ * with other code.
+ *
+ * While is didn't happen with a DR7 write (see the DR7 read
+ * comment above which explains where it happened), add the
+ * __FORCE_ORDER here too to avoid similar problems in the
+ * future.
+ */
+ asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
break;
default:
BUG();
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 347707d459c6..cbaf174d8efd 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -123,6 +123,8 @@
#define INTEL_FAM6_METEORLAKE 0xAC
#define INTEL_FAM6_METEORLAKE_L 0xAA
+#define INTEL_FAM6_LUNARLAKE_M 0xBD
+
/* "Small Core" Processors (Atom/E-Core) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index b36f3c367cb2..695873c0f50b 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -625,7 +625,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn)
/* 1 byte conditional jump */
p->ainsn.emulate_op = kprobe_emulate_jcc;
p->ainsn.jcc.type = opcode & 0xf;
- p->ainsn.rel32 = *(char *)insn->immediate.bytes;
+ p->ainsn.rel32 = insn->immediate.value;
break;
case 0x0f:
opcode = insn->opcode.bytes[1];
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 40d156a31676..00a831d56c50 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -721,6 +721,7 @@ void arch_cpu_idle(void)
{
x86_idle();
}
+EXPORT_SYMBOL_GPL(arch_cpu_idle);
/*
* We use this if we don't have any better idle routine..
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 7d9b15f0dbd5..0fbde0fc0628 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -769,8 +769,8 @@ static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
* request from the old cgroup.
*/
bfq_put_cooperator(sync_bfqq);
- bfq_release_process_ref(bfqd, sync_bfqq);
bic_set_bfqq(bic, NULL, true);
+ bfq_release_process_ref(bfqd, sync_bfqq);
}
}
}
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index ccf2204477a5..380e9bda2e57 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5425,9 +5425,11 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
bfqq = bic_to_bfqq(bic, false);
if (bfqq) {
- bfq_release_process_ref(bfqd, bfqq);
+ struct bfq_queue *old_bfqq = bfqq;
+
bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
bic_set_bfqq(bic, bfqq, false);
+ bfq_release_process_ref(bfqd, old_bfqq);
}
bfqq = bic_to_bfqq(bic, true);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4c94a6560f62..9ac1efb053e0 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -2001,6 +2001,10 @@ void blk_cgroup_bio_start(struct bio *bio)
struct blkg_iostat_set *bis;
unsigned long flags;
+ /* Root-level stats are sourced from system-wide IO stats */
+ if (!cgroup_parent(blkcg->css.cgroup))
+ return;
+
cpu = get_cpu();
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
flags = u64_stats_update_begin_irqsave(&bis->sync);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9d463f7563bc..9c8dc70020bc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4069,8 +4069,9 @@ EXPORT_SYMBOL(blk_mq_init_queue);
* blk_mq_destroy_queue - shutdown a request queue
* @q: request queue to shutdown
*
- * This shuts down a request queue allocated by blk_mq_init_queue() and drops
- * the initial reference. All future requests will failed with -ENODEV.
+ * This shuts down a request queue allocated by blk_mq_init_queue(). All future
+ * requests will be failed with -ENODEV. The caller is responsible for dropping
+ * the reference from blk_mq_init_queue() by calling blk_put_queue().
*
* Context: can sleep
*/
diff --git a/certs/Makefile b/certs/Makefile
index 9486ed924731..799ad7b9e68a 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -23,8 +23,8 @@ $(obj)/blacklist_hash_list: $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) FORCE
targets += blacklist_hash_list
quiet_cmd_extract_certs = CERT $@
- cmd_extract_certs = $(obj)/extract-cert $(extract-cert-in) $@
-extract-cert-in = $(or $(filter-out $(obj)/extract-cert, $(real-prereqs)),"")
+ cmd_extract_certs = $(obj)/extract-cert "$(extract-cert-in)" $@
+extract-cert-in = $(filter-out $(obj)/extract-cert, $(real-prereqs))
$(obj)/system_certificates.o: $(obj)/x509_certificate_list
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 0f17b1c32718..02d83c807271 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1154,6 +1154,19 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
}
/**
+ * cppc_get_epp_perf - Get the epp register value.
+ * @cpunum: CPU from which to get epp preference value.
+ * @epp_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
+{
+ return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
+}
+EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
+
+/**
* cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
@@ -1365,6 +1378,60 @@ out_err:
}
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
+/*
+ * Set Energy Performance Preference Register value through
+ * Performance Controls Interface
+ */
+int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_register_resource *epp_set_reg;
+ struct cpc_register_resource *auto_sel_reg;
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+ epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
+
+ if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ if (CPC_SUPPORTED(auto_sel_reg)) {
+ ret = cpc_write(cpu, auto_sel_reg, enable);
+ if (ret)
+ return ret;
+ }
+
+ if (CPC_SUPPORTED(epp_set_reg)) {
+ ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
+ if (ret)
+ return ret;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+ } else {
+ ret = -ENOTSUPP;
+ pr_debug("_CPC in PCC is not supported\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
+
/**
* cppc_set_enable - Set to enable CPPC on the processor by writing the
* Continuous Performance Control package EnableRegister field.
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f1cc5ec6a3b6..4e48d6db05eb 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3297,8 +3297,8 @@ void acpi_nfit_shutdown(void *data)
mutex_lock(&acpi_desc->init_mutex);
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
- cancel_delayed_work_sync(&acpi_desc->dwork);
mutex_unlock(&acpi_desc->init_mutex);
+ cancel_delayed_work_sync(&acpi_desc->dwork);
/*
* Bounce the nvdimm bus lock to make sure any in-flight
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 884ae73b11ea..2ea572628b1c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3109,7 +3109,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
*/
if (spd > 1)
mask &= (1 << (spd - 1)) - 1;
- else
+ else if (link->sata_spd)
return -EINVAL;
/* were we already at the bottom? */
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 967bcf9d415e..6097644ebdc5 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -220,13 +220,10 @@ static void genpd_debug_add(struct generic_pm_domain *genpd);
static void genpd_debug_remove(struct generic_pm_domain *genpd)
{
- struct dentry *d;
-
if (!genpd_debugfs_dir)
return;
- d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
- debugfs_remove(d);
+ debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
}
static void genpd_update_accounting(struct generic_pm_domain *genpd)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 50e726b6c2cf..b29be7d4d7d0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1864,6 +1864,10 @@ static bool pm_runtime_need_not_resume(struct device *dev)
* sure the device is put into low power state and it should only be used during
* system-wide PM transitions to sleep states. It assumes that the analogous
* pm_runtime_force_resume() will be used to resume the device.
+ *
+ * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
+ * state where this function has called the ->runtime_suspend callback but the
+ * PM core marks the driver as runtime active.
*/
int pm_runtime_force_suspend(struct device *dev)
{
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index e54693204630..6368b56eacf1 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -137,7 +137,7 @@ struct ublk_device {
char *__queues;
- unsigned short queue_size;
+ unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 3aa91aed3bf7..226e87b85116 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -857,7 +857,13 @@ static int __init sunxi_rsb_init(void)
return ret;
}
- return platform_driver_register(&sunxi_rsb_driver);
+ ret = platform_driver_register(&sunxi_rsb_driver);
+ if (ret) {
+ bus_unregister(&sunxi_rsb_bus);
+ return ret;
+ }
+
+ return 0;
}
module_init(sunxi_rsb_init);
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index ecd395ac8a28..e407f00bd594 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
unsigned long rate, unsigned long parent_rate,
unsigned int *pm, unsigned int *pn, unsigned int *pod)
{
- unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
+ unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
/* The frequency after the N divider must be between 1 and 50 MHz. */
n = parent_rate / (1 * MHZ);
@@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
/* The N divider must be >= 2. */
n = clamp_val(n, 2, 1 << pll_info->n_bits);
- for (;; n >>= 1) {
- od = (unsigned int)-1;
+ rate /= MHZ;
+ parent_rate /= MHZ;
- do {
- m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
- } while ((m > m_max || m & 1) && (od < 4));
-
- if (od < 4 && m >= 4 && m <= m_max)
- break;
+ for (m = m_max; m >= m_max && n >= 2; n--) {
+ m = rate * n / parent_rate;
+ od = m & 1;
+ m <<= od;
}
*pm = m;
- *pn = n;
+ *pn = n + 1;
*pod = 1 << od;
}
diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
index 32aae880a14f..0ddc73e07be4 100644
--- a/drivers/clk/microchip/clk-mpfs-ccc.c
+++ b/drivers/clk/microchip/clk-mpfs-ccc.c
@@ -164,12 +164,11 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
- char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+ char *name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%u", parent->name, i);
if (!name)
return -ENOMEM;
- snprintf(name, 23, "%s_out%u", parent->name, i);
out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
out_hw->reg_offset;
@@ -201,14 +200,13 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
- char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
- if (!name)
+ pll_hw->name = devm_kasprintf(dev, GFP_KERNEL, "ccc%s_pll%u",
+ strchrnul(dev->of_node->full_name, '@'), i);
+ if (!pll_hw->name)
return -ENOMEM;
pll_hw->base = data->pll_base[i];
- snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
- pll_hw->name = (const char *)name;
pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
pll_hw->parents,
&mpfs_ccc_pll_ops, 0);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2a84fc63371e..76aa1336e2be 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -3,7 +3,6 @@ menu "CPU Frequency scaling"
config CPU_FREQ
bool "CPU Frequency scaling"
- select SRCU
help
CPU Frequency scaling allows you to change the clock speed of
CPUs on the fly. This is a nice method to save power, because
@@ -271,15 +270,6 @@ config LOONGSON2_CPUFREQ
Loongson2F and its successors support this feature.
If in doubt, say N.
-
-config LOONGSON1_CPUFREQ
- tristate "Loongson1 CPUFreq Driver"
- depends on LOONGSON1_LS1B
- help
- This option adds a CPUFreq driver for loongson1 processors which
- support software configurable cpu frequency.
-
- If in doubt, say N.
endif
if SPARC64
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 32a7029e25ed..4a806cc5265b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -111,7 +111,6 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
-obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index c17bd845f5fc..45c88894fd8e 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -59,8 +59,171 @@
* we disable it by default to go acpi-cpufreq on these processors and add a
* module parameter to be able to enable it manually for debugging.
*/
+static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
-static int cppc_load __initdata;
+static struct cpufreq_driver amd_pstate_epp_driver;
+static int cppc_state = AMD_PSTATE_DISABLE;
+struct kobject *amd_pstate_kobj;
+
+/*
+ * AMD Energy Preference Performance (EPP)
+ * The EPP is used in the CCLK DPM controller to drive
+ * the frequency that a core is going to operate during
+ * short periods of activity. EPP values will be utilized for
+ * different OS profiles (balanced, performance, power savings)
+ * display strings corresponding to EPP index in the
+ * energy_perf_strings[]
+ * index String
+ *-------------------------------------
+ * 0 default
+ * 1 performance
+ * 2 balance_performance
+ * 3 balance_power
+ * 4 power
+ */
+enum energy_perf_value_index {
+ EPP_INDEX_DEFAULT = 0,
+ EPP_INDEX_PERFORMANCE,
+ EPP_INDEX_BALANCE_PERFORMANCE,
+ EPP_INDEX_BALANCE_POWERSAVE,
+ EPP_INDEX_POWERSAVE,
+};
+
+static const char * const energy_perf_strings[] = {
+ [EPP_INDEX_DEFAULT] = "default",
+ [EPP_INDEX_PERFORMANCE] = "performance",
+ [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
+ [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
+ [EPP_INDEX_POWERSAVE] = "power",
+ NULL
+};
+
+static unsigned int epp_values[] = {
+ [EPP_INDEX_DEFAULT] = 0,
+ [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
+ [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
+ [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
+ };
+
+static inline int get_mode_idx_from_str(const char *str, size_t size)
+{
+ int i;
+
+ for (i=0; i < AMD_PSTATE_MAX; i++) {
+ if (!strncmp(str, amd_pstate_mode_string[i], size))
+ return i;
+ }
+ return -EINVAL;
+}
+
+static DEFINE_MUTEX(amd_pstate_limits_lock);
+static DEFINE_MUTEX(amd_pstate_driver_lock);
+
+static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+{
+ u64 epp;
+ int ret;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (!cppc_req_cached) {
+ epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ &cppc_req_cached);
+ if (epp)
+ return epp;
+ }
+ epp = (cppc_req_cached >> 24) & 0xFF;
+ } else {
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return -EIO;
+ }
+ }
+
+ return (s16)(epp & 0xff);
+}
+
+static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
+{
+ s16 epp;
+ int index = -EINVAL;
+
+ epp = amd_pstate_get_epp(cpudata, 0);
+ if (epp < 0)
+ return epp;
+
+ switch (epp) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ index = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ index = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ index = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ index = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ break;
+ }
+
+ return index;
+}
+
+static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+ int ret;
+ struct cppc_perf_ctrls perf_ctrls;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (!ret)
+ cpudata->epp_cached = epp;
+ } else {
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+ cpudata->epp_cached = epp;
+ }
+
+ return ret;
+}
+
+static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
+ int pref_index)
+{
+ int epp = -EINVAL;
+ int ret;
+
+ if (!pref_index) {
+ pr_debug("EPP pref_index is invalid\n");
+ return -EINVAL;
+ }
+
+ if (epp == -EINVAL)
+ epp = epp_values[pref_index];
+
+ if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(cpudata, epp);
+
+ return ret;
+}
static inline int pstate_enable(bool enable)
{
@@ -70,11 +233,21 @@ static inline int pstate_enable(bool enable)
static int cppc_enable(bool enable)
{
int cpu, ret = 0;
+ struct cppc_perf_ctrls perf_ctrls;
for_each_present_cpu(cpu) {
ret = cppc_set_enable(cpu, enable);
if (ret)
return ret;
+
+ /* Enable autonomous mode for EPP */
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ /* Set desired perf as zero to allow EPP firmware control */
+ perf_ctrls.desired_perf = 0;
+ ret = cppc_set_perf(cpu, &perf_ctrls);
+ if (ret)
+ return ret;
+ }
}
return ret;
@@ -418,7 +591,7 @@ static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
return;
cpudata->boost_supported = true;
- amd_pstate_driver.boost_enabled = true;
+ current_pstate_driver->boost_enabled = true;
}
static void amd_perf_ctl_reset(unsigned int cpu)
@@ -501,6 +674,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
amd_pstate_boost_init(cpudata);
+ if (!current_pstate_driver->adjust_perf)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
return 0;
@@ -561,7 +736,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
if (max_freq < 0)
return max_freq;
- return sprintf(&buf[0], "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n", max_freq);
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
@@ -574,7 +749,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
if (freq < 0)
return freq;
- return sprintf(&buf[0], "%u\n", freq);
+ return sysfs_emit(buf, "%u\n", freq);
}
/*
@@ -589,13 +764,151 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
perf = READ_ONCE(cpudata->highest_perf);
- return sprintf(&buf[0], "%u\n", perf);
+ return sysfs_emit(buf, "%u\n", perf);
+}
+
+static ssize_t show_energy_performance_available_preferences(
+ struct cpufreq_policy *policy, char *buf)
+{
+ int i = 0;
+ int offset = 0;
+
+ while (energy_perf_strings[i] != NULL)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+
+ sysfs_emit_at(buf, offset, "\n");
+
+ return offset;
+}
+
+static ssize_t store_energy_performance_preference(
+ struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ char str_preference[21];
+ ssize_t ret;
+
+ ret = sscanf(buf, "%20s", str_preference);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = match_string(energy_perf_strings, -1, str_preference);
+ if (ret < 0)
+ return -EINVAL;
+
+ mutex_lock(&amd_pstate_limits_lock);
+ ret = amd_pstate_set_energy_pref_index(cpudata, ret);
+ mutex_unlock(&amd_pstate_limits_lock);
+
+ return ret ?: count;
+}
+
+static ssize_t show_energy_performance_preference(
+ struct cpufreq_policy *policy, char *buf)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int preference;
+
+ preference = amd_pstate_get_energy_pref_index(cpudata);
+ if (preference < 0)
+ return preference;
+
+ return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
+}
+
+static ssize_t amd_pstate_show_status(char *buf)
+{
+ if (!current_pstate_driver)
+ return sysfs_emit(buf, "disable\n");
+
+ return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
+}
+
+static void amd_pstate_driver_cleanup(void)
+{
+ current_pstate_driver = NULL;
+}
+
+static int amd_pstate_update_status(const char *buf, size_t size)
+{
+ int ret = 0;
+ int mode_idx;
+
+ if (size > 7 || size < 6)
+ return -EINVAL;
+ mode_idx = get_mode_idx_from_str(buf, size);
+
+ switch(mode_idx) {
+ case AMD_PSTATE_DISABLE:
+ if (!current_pstate_driver)
+ return -EINVAL;
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ return -EBUSY;
+ cpufreq_unregister_driver(current_pstate_driver);
+ amd_pstate_driver_cleanup();
+ break;
+ case AMD_PSTATE_PASSIVE:
+ if (current_pstate_driver) {
+ if (current_pstate_driver == &amd_pstate_driver)
+ return 0;
+ cpufreq_unregister_driver(current_pstate_driver);
+ cppc_state = AMD_PSTATE_PASSIVE;
+ current_pstate_driver = &amd_pstate_driver;
+ }
+
+ ret = cpufreq_register_driver(current_pstate_driver);
+ break;
+ case AMD_PSTATE_ACTIVE:
+ if (current_pstate_driver) {
+ if (current_pstate_driver == &amd_pstate_epp_driver)
+ return 0;
+ cpufreq_unregister_driver(current_pstate_driver);
+ current_pstate_driver = &amd_pstate_epp_driver;
+ cppc_state = AMD_PSTATE_ACTIVE;
+ }
+
+ ret = cpufreq_register_driver(current_pstate_driver);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t show_status(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t ret;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_show_status(buf);
+ mutex_unlock(&amd_pstate_driver_lock);
+
+ return ret;
+}
+
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ char *p = memchr(buf, '\n', count);
+ int ret;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_update_status(buf, p ? p - buf : count);
+ mutex_unlock(&amd_pstate_driver_lock);
+
+ return ret < 0 ? ret : count;
}
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_rw(energy_performance_preference);
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+define_one_global_rw(status);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
@@ -604,6 +917,313 @@ static struct freq_attr *amd_pstate_attr[] = {
NULL,
};
+static struct freq_attr *amd_pstate_epp_attr[] = {
+ &amd_pstate_max_freq,
+ &amd_pstate_lowest_nonlinear_freq,
+ &amd_pstate_highest_perf,
+ &energy_performance_preference,
+ &energy_performance_available_preferences,
+ NULL,
+};
+
+static struct attribute *pstate_global_attributes[] = {
+ &status.attr,
+ NULL
+};
+
+static const struct attribute_group amd_pstate_global_attr_group = {
+ .attrs = pstate_global_attributes,
+};
+
+static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+{
+ int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ struct amd_cpudata *cpudata;
+ struct device *dev;
+ u64 value;
+
+ /*
+ * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
+ * which is ideal for initialization process.
+ */
+ amd_perf_ctl_reset(policy->cpu);
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
+ if (!cpudata)
+ return -ENOMEM;
+
+ cpudata->cpu = policy->cpu;
+ cpudata->epp_policy = 0;
+
+ ret = amd_pstate_init_perf(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = amd_get_min_freq(cpudata);
+ max_freq = amd_get_max_freq(cpudata);
+ nominal_freq = amd_get_nominal_freq(cpudata);
+ lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
+ dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
+ min_freq, max_freq);
+ ret = -EINVAL;
+ goto free_cpudata1;
+ }
+
+ policy->cpuinfo.min_freq = min_freq;
+ policy->cpuinfo.max_freq = max_freq;
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
+
+ /* Initial processor data capability frequencies */
+ cpudata->max_freq = max_freq;
+ cpudata->min_freq = min_freq;
+ cpudata->nominal_freq = nominal_freq;
+ cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+
+ policy->driver_data = cpudata;
+
+ cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ /*
+ * Set the policy to powersave to provide a valid fallback value in case
+ * the default cpufreq governor is neither powersave nor performance.
+ */
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ policy->fast_switch_possible = true;
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret)
+ return ret;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
+ if (ret)
+ return ret;
+ WRITE_ONCE(cpudata->cppc_cap1_cached, value);
+ }
+ amd_pstate_boost_init(cpudata);
+
+ return 0;
+
+free_cpudata1:
+ kfree(cpudata);
+ return ret;
+}
+
+static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+{
+ pr_debug("CPU %d exiting\n", policy->cpu);
+ policy->fast_switch_possible = false;
+ return 0;
+}
+
+static void amd_pstate_epp_init(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u32 max_perf, min_perf;
+ u64 value;
+ s16 epp;
+
+ max_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min_perf = max_perf;
+
+ /* Initial min/max values for CPPC Performance Controls Register */
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(max_perf);
+
+ /* CPPC EPP feature require to set zero to the desire perf bit */
+ value &= ~AMD_CPPC_DES_PERF(~0L);
+ value |= AMD_CPPC_DES_PERF(0);
+
+ if (cpudata->epp_policy == cpudata->policy)
+ goto skip_epp;
+
+ cpudata->epp_policy = cpudata->policy;
+
+ /* Get BIOS pre-defined epp value */
+ epp = amd_pstate_get_epp(cpudata, value);
+ if (epp < 0) {
+ /**
+ * This return value can only be negative for shared_memory
+ * systems where EPP register read/write not supported.
+ */
+ goto skip_epp;
+ }
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ epp = 0;
+
+ /* Set initial EPP value */
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ }
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ amd_pstate_set_epp(cpudata, epp);
+skip_epp:
+ cpufreq_cpu_put(policy);
+}
+
+static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (!policy->cpuinfo.max_freq)
+ return -ENODEV;
+
+ pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
+
+ cpudata->policy = policy->policy;
+
+ amd_pstate_epp_init(policy->cpu);
+
+ return 0;
+}
+
+static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_ctrls perf_ctrls;
+ u64 value, max_perf;
+ int ret;
+
+ ret = amd_pstate_enable(true);
+ if (ret)
+ pr_err("failed to enable amd pstate during resume, return %d\n", ret);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.max_perf = max_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+}
+
+static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
+
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ amd_pstate_epp_reenable(cpudata);
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
+static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ int min_perf;
+ u64 value;
+
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ mutex_lock(&amd_pstate_limits_lock);
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+
+ /* Set max perf same as min perf */
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(min_perf);
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.desired_perf = 0;
+ perf_ctrls.max_perf = min_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+ mutex_unlock(&amd_pstate_limits_lock);
+}
+
+static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+
+ if (cpudata->suspended)
+ return 0;
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ amd_pstate_epp_offline(policy);
+
+ return 0;
+}
+
+static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+ pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
+ return 0;
+}
+
+static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
+
+ /* avoid suspending when EPP is not enabled */
+ if (cppc_state != AMD_PSTATE_ACTIVE)
+ return 0;
+
+ /* set this flag to avoid setting core offline*/
+ cpudata->suspended = true;
+
+ /* disable CPPC in lowlevel firmware */
+ ret = amd_pstate_enable(false);
+ if (ret)
+ pr_err("failed to suspend, return %d\n", ret);
+
+ return 0;
+}
+
+static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata->suspended) {
+ mutex_lock(&amd_pstate_limits_lock);
+
+ /* enable amd pstate from suspend state*/
+ amd_pstate_epp_reenable(cpudata);
+
+ mutex_unlock(&amd_pstate_limits_lock);
+
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
static struct cpufreq_driver amd_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = amd_pstate_verify,
@@ -617,6 +1237,20 @@ static struct cpufreq_driver amd_pstate_driver = {
.attr = amd_pstate_attr,
};
+static struct cpufreq_driver amd_pstate_epp_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = amd_pstate_epp_verify_policy,
+ .setpolicy = amd_pstate_epp_set_policy,
+ .init = amd_pstate_epp_cpu_init,
+ .exit = amd_pstate_epp_cpu_exit,
+ .offline = amd_pstate_epp_cpu_offline,
+ .online = amd_pstate_epp_cpu_online,
+ .suspend = amd_pstate_epp_suspend,
+ .resume = amd_pstate_epp_resume,
+ .name = "amd_pstate_epp",
+ .attr = amd_pstate_epp_attr,
+};
+
static int __init amd_pstate_init(void)
{
int ret;
@@ -626,10 +1260,10 @@ static int __init amd_pstate_init(void)
/*
* by default the pstate driver is disabled to load
* enable the amd_pstate passive mode driver explicitly
- * with amd_pstate=passive in kernel command line
+ * with amd_pstate=passive or other modes in kernel command line
*/
- if (!cppc_load) {
- pr_debug("driver load is disabled, boot with amd_pstate=passive to enable this\n");
+ if (cppc_state == AMD_PSTATE_DISABLE) {
+ pr_debug("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
@@ -645,7 +1279,8 @@ static int __init amd_pstate_init(void)
/* capability check */
if (boot_cpu_has(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
+ if (cppc_state == AMD_PSTATE_PASSIVE)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
static_call_update(amd_pstate_enable, cppc_enable);
@@ -656,31 +1291,63 @@ static int __init amd_pstate_init(void)
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
- pr_err("failed to enable amd-pstate with return %d\n", ret);
+ pr_err("failed to enable with return %d\n", ret);
return ret;
}
- ret = cpufreq_register_driver(&amd_pstate_driver);
+ ret = cpufreq_register_driver(current_pstate_driver);
if (ret)
- pr_err("failed to register amd_pstate_driver with return %d\n",
- ret);
+ pr_err("failed to register with return %d\n", ret);
+
+ amd_pstate_kobj = kobject_create_and_add("amd_pstate", &cpu_subsys.dev_root->kobj);
+ if (!amd_pstate_kobj) {
+ ret = -EINVAL;
+ pr_err("global sysfs registration failed.\n");
+ goto kobject_free;
+ }
+
+ ret = sysfs_create_group(amd_pstate_kobj, &amd_pstate_global_attr_group);
+ if (ret) {
+ pr_err("sysfs attribute export failed with error %d.\n", ret);
+ goto global_attr_free;
+ }
return ret;
+
+global_attr_free:
+ kobject_put(amd_pstate_kobj);
+kobject_free:
+ cpufreq_unregister_driver(current_pstate_driver);
+ return ret;
}
device_initcall(amd_pstate_init);
static int __init amd_pstate_param(char *str)
{
+ size_t size;
+ int mode_idx;
+
if (!str)
return -EINVAL;
- if (!strcmp(str, "disable")) {
- cppc_load = 0;
- pr_info("driver is explicitly disabled\n");
- } else if (!strcmp(str, "passive"))
- cppc_load = 1;
+ size = strlen(str);
+ mode_idx = get_mode_idx_from_str(str, size);
- return 0;
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
}
early_param("amd_pstate", amd_pstate_param);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 4153150e20db..ffea6402189d 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -751,10 +751,7 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
- int ret;
-
- ret = cpufreq_unregister_driver(&brcm_avs_driver);
- WARN_ON(ret);
+ cpufreq_unregister_driver(&brcm_avs_driver);
brcm_avs_prepare_uninit(pdev);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7e56a42750ea..6d8fd3b8dcb5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -993,7 +993,7 @@ static const struct sysfs_ops sysfs_ops = {
.store = store,
};
-static struct kobj_type ktype_cpufreq = {
+static const struct kobj_type ktype_cpufreq = {
.sysfs_ops = &sysfs_ops,
.default_groups = cpufreq_groups,
.release = cpufreq_sysfs_release,
@@ -2904,12 +2904,12 @@ EXPORT_SYMBOL_GPL(cpufreq_register_driver);
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.
*/
-int cpufreq_unregister_driver(struct cpufreq_driver *driver)
+void cpufreq_unregister_driver(struct cpufreq_driver *driver)
{
unsigned long flags;
- if (!cpufreq_driver || (driver != cpufreq_driver))
- return -EINVAL;
+ if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
+ return;
pr_debug("unregistering driver %s\n", driver->name);
@@ -2926,8 +2926,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpus_read_unlock();
-
- return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 9e97f60f8199..ebb3a8102681 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -133,12 +133,14 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
+ cpufreq_unregister_driver(&davinci_driver);
+
clk_put(cpufreq.armclk);
if (cpufreq.asyncclk)
clk_put(cpufreq.asyncclk);
- return cpufreq_unregister_driver(&davinci_driver);
+ return 0;
}
static struct platform_driver davinci_cpufreq_driver = {
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
deleted file mode 100644
index fb72d709db56..000000000000
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * CPU Frequency Scaling for Loongson 1 SoC
- *
- * Copyright (C) 2014-2016 Zhang, Keguang <[email protected]>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <cpufreq.h>
-#include <loongson1.h>
-
-struct ls1x_cpufreq {
- struct device *dev;
- struct clk *clk; /* CPU clk */
- struct clk *mux_clk; /* MUX of CPU clk */
- struct clk *pll_clk; /* PLL clk */
- struct clk *osc_clk; /* OSC clk */
- unsigned int max_freq;
- unsigned int min_freq;
-};
-
-static struct ls1x_cpufreq *cpufreq;
-
-static int ls1x_cpufreq_notifier(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- if (val == CPUFREQ_POSTCHANGE)
- current_cpu_data.udelay_val = loops_per_jiffy;
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block ls1x_cpufreq_notifier_block = {
- .notifier_call = ls1x_cpufreq_notifier
-};
-
-static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
- unsigned int old_freq, new_freq;
-
- old_freq = policy->cur;
- new_freq = policy->freq_table[index].frequency;
-
- /*
- * The procedure of reconfiguring CPU clk is as below.
- *
- * - Reparent CPU clk to OSC clk
- * - Reset CPU clock (very important)
- * - Reconfigure CPU DIV
- * - Reparent CPU clk back to CPU DIV clk
- */
-
- clk_set_parent(policy->clk, cpufreq->osc_clk);
- __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
- LS1X_CLK_PLL_DIV);
- __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
- LS1X_CLK_PLL_DIV);
- clk_set_rate(cpufreq->mux_clk, new_freq * 1000);
- clk_set_parent(policy->clk, cpufreq->mux_clk);
- dev_dbg(cpu_dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
-
- return 0;
-}
-
-static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
-{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
- struct cpufreq_frequency_table *freq_tbl;
- unsigned int pll_freq, freq;
- int steps, i;
-
- pll_freq = clk_get_rate(cpufreq->pll_clk) / 1000;
-
- steps = 1 << DIV_CPU_WIDTH;
- freq_tbl = kcalloc(steps, sizeof(*freq_tbl), GFP_KERNEL);
- if (!freq_tbl)
- return -ENOMEM;
-
- for (i = 0; i < (steps - 1); i++) {
- freq = pll_freq / (i + 1);
- if ((freq < cpufreq->min_freq) || (freq > cpufreq->max_freq))
- freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
- else
- freq_tbl[i].frequency = freq;
- dev_dbg(cpu_dev,
- "cpufreq table: index %d: frequency %d\n", i,
- freq_tbl[i].frequency);
- }
- freq_tbl[i].frequency = CPUFREQ_TABLE_END;
-
- policy->clk = cpufreq->clk;
- cpufreq_generic_init(policy, freq_tbl, 0);
-
- return 0;
-}
-
-static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
-{
- kfree(policy->freq_table);
- return 0;
-}
-
-static struct cpufreq_driver ls1x_cpufreq_driver = {
- .name = "cpufreq-ls1x",
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = ls1x_cpufreq_target,
- .get = cpufreq_generic_get,
- .init = ls1x_cpufreq_init,
- .exit = ls1x_cpufreq_exit,
- .attr = cpufreq_generic_attr,
-};
-
-static int ls1x_cpufreq_remove(struct platform_device *pdev)
-{
- cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- cpufreq_unregister_driver(&ls1x_cpufreq_driver);
-
- return 0;
-}
-
-static int ls1x_cpufreq_probe(struct platform_device *pdev)
-{
- struct plat_ls1x_cpufreq *pdata = dev_get_platdata(&pdev->dev);
- struct clk *clk;
- int ret;
-
- if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -EINVAL;
- }
-
- cpufreq =
- devm_kzalloc(&pdev->dev, sizeof(struct ls1x_cpufreq), GFP_KERNEL);
- if (!cpufreq)
- return -ENOMEM;
-
- cpufreq->dev = &pdev->dev;
-
- clk = devm_clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get %s clock\n",
- pdata->clk_name);
- return PTR_ERR(clk);
- }
- cpufreq->clk = clk;
-
- clk = clk_get_parent(clk);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get parent of %s clock\n",
- __clk_get_name(cpufreq->clk));
- return PTR_ERR(clk);
- }
- cpufreq->mux_clk = clk;
-
- clk = clk_get_parent(clk);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get parent of %s clock\n",
- __clk_get_name(cpufreq->mux_clk));
- return PTR_ERR(clk);
- }
- cpufreq->pll_clk = clk;
-
- clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get %s clock\n",
- pdata->osc_clk_name);
- return PTR_ERR(clk);
- }
- cpufreq->osc_clk = clk;
-
- cpufreq->max_freq = pdata->max_freq;
- cpufreq->min_freq = pdata->min_freq;
-
- ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to register CPUFreq driver: %d\n", ret);
- return ret;
- }
-
- ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- if (ret) {
- dev_err(&pdev->dev,
- "failed to register CPUFreq notifier: %d\n",ret);
- cpufreq_unregister_driver(&ls1x_cpufreq_driver);
- }
-
- return ret;
-}
-
-static struct platform_driver ls1x_cpufreq_platdrv = {
- .probe = ls1x_cpufreq_probe,
- .remove = ls1x_cpufreq_remove,
- .driver = {
- .name = "ls1x-cpufreq",
- },
-};
-
-module_platform_driver(ls1x_cpufreq_platdrv);
-
-MODULE_ALIAS("platform:ls1x-cpufreq");
-MODULE_AUTHOR("Kelvin Cheung <[email protected]>");
-MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index f80339779084..b22f5cc8a463 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -317,13 +317,16 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+ cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+
+ return 0;
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
{ .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
{}
};
+MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
static struct platform_driver mtk_cpufreq_hw_driver = {
.probe = mtk_cpufreq_hw_driver_probe,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 1b50df06c6bc..81649a1969b6 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -184,7 +184,9 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
static int omap_cpufreq_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&omap_driver);
+ cpufreq_unregister_driver(&omap_driver);
+
+ return 0;
}
static struct platform_driver omap_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 9505a812d6a1..2f581d2d617d 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -143,40 +143,42 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
return lval * xo_rate;
}
-/* Get the current frequency of the CPU (after throttling) */
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+/* Get the frequency requested by the cpufreq core for the CPU */
+static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
+ unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
+ soc_data = qcom_cpufreq.soc_data;
- return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+ index = readl_relaxed(data->base + soc_data->reg_perf_state);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return policy->freq_table[index].frequency;
}
-/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
- const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
- unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
- soc_data = qcom_cpufreq.soc_data;
- index = readl_relaxed(data->base + soc_data->reg_perf_state);
- index = min(index, LUT_MAX_ENTRIES - 1);
+ if (data->throttle_irq >= 0)
+ return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return policy->freq_table[index].frequency;
+ return qcom_cpufreq_get_freq(cpu);
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -704,6 +706,8 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
return -ENOMEM;
qcom_cpufreq.soc_data = of_device_get_match_data(dev);
+ if (!qcom_cpufreq.soc_data)
+ return -ENODEV;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
if (!clk_data)
@@ -766,7 +770,9 @@ of_exit:
static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+ cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+
+ return 0;
}
static struct platform_driver qcom_cpufreq_hw_driver = {
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 4596c3e323aa..5890e25d7f77 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -411,7 +411,8 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
static struct cpufreq_driver tegra194_cpufreq_driver = {
.name = "tegra194",
- .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index ff71dd662880..cac5997dca50 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -74,6 +74,7 @@ endmenu
config HALTPOLL_CPUIDLE
tristate "Halt poll cpuidle driver"
depends on X86 && KVM_GUEST
+ select CPU_IDLE_GOV_HALTPOLL
default y
help
This option enables halt poll cpuidle driver, which allows to poll
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 747aa537389b..a1ee475d180d 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -24,6 +24,14 @@ config ARM_PSCI_CPUIDLE
It provides an idle driver that is capable of detecting and
managing idle states through the PSCI firmware interface.
+ The driver has limitations when used with PREEMPT_RT:
+ - If the idle states are described with the non-hierarchical layout,
+ all idle states are still available.
+
+ - If the idle states are described with the hierarchical layout,
+ only the idle states defined per CPU are available, but not the ones
+ being shared among a group of CPUs (aka cluster idle states).
+
config ARM_PSCI_CPUIDLE_DOMAIN
bool "PSCI CPU idle Domain"
depends on ARM_PSCI_CPUIDLE
@@ -102,6 +110,7 @@ config ARM_MVEBU_V7_CPUIDLE
config ARM_TEGRA_CPUIDLE
bool "CPU Idle Driver for NVIDIA Tegra SoCs"
depends on (ARCH_TEGRA || COMPILE_TEST) && !ARM64 && MMU
+ depends on ARCH_SUSPEND_POSSIBLE
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
select ARM_CPU_SUSPEND
help
@@ -110,6 +119,7 @@ config ARM_TEGRA_CPUIDLE
config ARM_QCOM_SPM_CPUIDLE
bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
+ depends on ARCH_SUSPEND_POSSIBLE
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
select DT_IDLE_STATES
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index 3a39a7f48b77..e66df22f9695 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -32,7 +32,7 @@ static int default_enter_idle(struct cpuidle_device *dev,
local_irq_enable();
return index;
}
- default_idle();
+ arch_cpu_idle();
return index;
}
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index c80cf9ddabd8..6ad2954948a5 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -64,8 +64,11 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
- /* Allow power off when OSI has been successfully enabled. */
- if (use_osi)
+ /*
+ * Allow power off when OSI has been successfully enabled.
+ * PREEMPT_RT is not yet ready to enter domain idle states.
+ */
+ if (use_osi && !IS_ENABLED(CONFIG_PREEMPT_RT))
pd->power_off = psci_pd_power_off;
else
pd->flags |= GENPD_FLAG_ALWAYS_ON;
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 57bc3e3ae391..68467a325dcb 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -231,6 +231,9 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
if (!psci_has_osi_support())
return 0;
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ return 0;
+
data->dev = psci_dt_attach_cpu(cpu);
if (IS_ERR_OR_NULL(data->dev))
return PTR_ERR_OR_ZERO(data->dev);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index f70aa17e2a8e..d9cda7f6ccb9 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -183,11 +183,15 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
else if (s->target_residency_ns < 0)
s->target_residency_ns = 0;
+ else
+ s->target_residency = div_u64(s->target_residency_ns, NSEC_PER_USEC);
if (s->exit_latency > 0)
s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
else if (s->exit_latency_ns < 0)
s->exit_latency_ns = 0;
+ else
+ s->exit_latency = div_u64(s->exit_latency_ns, NSEC_PER_USEC);
}
}
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index d9262db79cae..987fc5f3997d 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -2,8 +2,13 @@
/*
* Timer events oriented CPU idle governor
*
+ * TEO governor:
* Copyright (C) 2018 - 2021 Intel Corporation
* Author: Rafael J. Wysocki <[email protected]>
+ *
+ * Util-awareness mechanism:
+ * Copyright (C) 2022 Arm Ltd.
+ * Author: Kajetan Puchalski <[email protected]>
*/
/**
@@ -99,15 +104,56 @@
* select the given idle state instead of the candidate one.
*
* 3. By default, select the candidate state.
+ *
+ * Util-awareness mechanism:
+ *
+ * The idea behind the util-awareness extension is that there are two distinct
+ * scenarios for the CPU which should result in two different approaches to idle
+ * state selection - utilized and not utilized.
+ *
+ * In this case, 'utilized' means that the average runqueue util of the CPU is
+ * above a certain threshold.
+ *
+ * When the CPU is utilized while going into idle, more likely than not it will
+ * be woken up to do more work soon and so a shallower idle state should be
+ * selected to minimise latency and maximise performance. When the CPU is not
+ * being utilized, the usual metrics-based approach to selecting the deepest
+ * available idle state should be preferred to take advantage of the power
+ * saving.
+ *
+ * In order to achieve this, the governor uses a utilization threshold.
+ * The threshold is computed per-CPU as a percentage of the CPU's capacity
+ * by bit shifting the capacity value. Based on testing, the shift of 6 (~1.56%)
+ * seems to be getting the best results.
+ *
+ * Before selecting the next idle state, the governor compares the current CPU
+ * util to the precomputed util threshold. If it's below, it defaults to the
+ * TEO metrics mechanism. If it's above, the closest shallower idle state will
+ * be selected instead, as long as is not a polling state.
*/
#include <linux/cpuidle.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/sched/topology.h>
#include <linux/tick.h>
/*
+ * The number of bits to shift the CPU's capacity by in order to determine
+ * the utilized threshold.
+ *
+ * 6 was chosen based on testing as the number that achieved the best balance
+ * of power and performance on average.
+ *
+ * The resulting threshold is high enough to not be triggered by background
+ * noise and low enough to react quickly when activity starts to ramp up.
+ */
+#define UTIL_THRESHOLD_SHIFT 6
+
+
+/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
*/
@@ -137,9 +183,11 @@ struct teo_bin {
* @time_span_ns: Time between idle state selection and post-wakeup update.
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
- * @total: Grand total of the "intercepts" and "hits" mertics for all bins.
+ * @total: Grand total of the "intercepts" and "hits" metrics for all bins.
* @next_recent_idx: Index of the next @recent_idx entry to update.
* @recent_idx: Indices of bins corresponding to recent "intercepts".
+ * @util_threshold: Threshold above which the CPU is considered utilized
+ * @utilized: Whether the last sleep on the CPU happened while utilized
*/
struct teo_cpu {
s64 time_span_ns;
@@ -148,11 +196,30 @@ struct teo_cpu {
unsigned int total;
int next_recent_idx;
int recent_idx[NR_RECENT];
+ unsigned long util_threshold;
+ bool utilized;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
/**
+ * teo_cpu_is_utilized - Check if the CPU's util is above the threshold
+ * @cpu: Target CPU
+ * @cpu_data: Governor CPU data for the target CPU
+ */
+#ifdef CONFIG_SMP
+static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
+{
+ return sched_cpu_util(cpu) > cpu_data->util_threshold;
+}
+#else
+static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
+{
+ return false;
+}
+#endif
+
+/**
* teo_update - Update CPU metrics after wakeup.
* @drv: cpuidle driver containing state data.
* @dev: Target CPU.
@@ -258,15 +325,17 @@ static s64 teo_middle_of_bin(int idx, struct cpuidle_driver *drv)
* @dev: Target CPU.
* @state_idx: Index of the capping idle state.
* @duration_ns: Idle duration value to match.
+ * @no_poll: Don't consider polling states.
*/
static int teo_find_shallower_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int state_idx,
- s64 duration_ns)
+ s64 duration_ns, bool no_poll)
{
int i;
for (i = state_idx - 1; i >= 0; i--) {
- if (dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable ||
+ (no_poll && drv->states[i].flags & CPUIDLE_FLAG_POLLING))
continue;
state_idx = i;
@@ -321,6 +390,22 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
goto end;
}
+ cpu_data->utilized = teo_cpu_is_utilized(dev->cpu, cpu_data);
+ /*
+ * If the CPU is being utilized over the threshold and there are only 2
+ * states to choose from, the metrics need not be considered, so choose
+ * the shallowest non-polling state and exit.
+ */
+ if (drv->state_count < 3 && cpu_data->utilized) {
+ for (i = 0; i < drv->state_count; ++i) {
+ if (!dev->states_usage[i].disable &&
+ !(drv->states[i].flags & CPUIDLE_FLAG_POLLING)) {
+ idx = i;
+ goto end;
+ }
+ }
+ }
+
/*
* Find the deepest idle state whose target residency does not exceed
* the current sleep length and the deepest idle state not deeper than
@@ -452,6 +537,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx > constraint_idx)
idx = constraint_idx;
+ /*
+ * If the CPU is being utilized over the threshold, choose a shallower
+ * non-polling state to improve latency
+ */
+ if (cpu_data->utilized)
+ idx = teo_find_shallower_state(drv, dev, idx, duration_ns, true);
+
end:
/*
* Don't stop the tick if the selected state is a polling one or if the
@@ -469,7 +561,7 @@ end:
*/
if (idx > idx0 &&
drv->states[idx].target_residency_ns > delta_tick)
- idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick, false);
}
return idx;
@@ -508,9 +600,11 @@ static int teo_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ unsigned long max_capacity = arch_scale_cpu_capacity(dev->cpu);
int i;
memset(cpu_data, 0, sizeof(*cpu_data));
+ cpu_data->util_threshold = max_capacity >> UTIL_THRESHOLD_SHIFT;
for (i = 0; i < NR_RECENT; i++)
cpu_data->recent_idx[i] = -1;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 2b496a53cbca..48948b171749 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -200,7 +200,7 @@ static void cpuidle_sysfs_release(struct kobject *kobj)
complete(&kdev->kobj_unregister);
}
-static struct kobj_type ktype_cpuidle = {
+static const struct kobj_type ktype_cpuidle = {
.sysfs_ops = &cpuidle_sysfs_ops,
.release = cpuidle_sysfs_release,
};
@@ -447,7 +447,7 @@ static void cpuidle_state_sysfs_release(struct kobject *kobj)
complete(&state_obj->kobj_unregister);
}
-static struct kobj_type ktype_state_cpuidle = {
+static const struct kobj_type ktype_state_cpuidle = {
.sysfs_ops = &cpuidle_state_sysfs_ops,
.default_groups = cpuidle_state_default_groups,
.release = cpuidle_state_sysfs_release,
@@ -594,7 +594,7 @@ static struct attribute *cpuidle_driver_default_attrs[] = {
};
ATTRIBUTE_GROUPS(cpuidle_driver_default);
-static struct kobj_type ktype_driver_cpuidle = {
+static const struct kobj_type ktype_driver_cpuidle = {
.sysfs_ops = &cpuidle_driver_sysfs_ops,
.default_groups = cpuidle_driver_default_groups,
.release = cpuidle_driver_sysfs_release,
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 02f28da519e3..940f805b1534 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -131,7 +131,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *iter = cxled_to_port(cxled);
struct cxl_ep *ep;
- int rc;
+ int rc = 0;
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
iter = to_cxl_port(iter->dev.parent);
@@ -143,7 +143,8 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
- rc = cxld->reset(cxld);
+ if (cxld->reset)
+ rc = cxld->reset(cxld);
if (rc)
return rc;
}
@@ -186,7 +187,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
- cxld->reset(cxld);
+ if (cxld->reset)
+ cxld->reset(cxld);
}
cxled->cxld.reset(&cxled->cxld);
@@ -991,10 +993,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
int i, distance;
/*
- * Passthrough ports impose no distance requirements between
+ * Passthrough decoders impose no distance requirements between
* peers
*/
- if (port->nr_dports == 1)
+ if (cxl_rr->nr_targets == 1)
distance = 0;
else
distance = p->nr_targets / cxl_rr->nr_targets;
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index da4438f3188c..c4c4728a36e4 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -475,7 +475,7 @@ EXPORT_SYMBOL_GPL(put_dax);
/**
* dax_holder() - obtain the holder of a dax device
* @dax_dev: a dax_device instance
-
+ *
* Return: the holder's data which represents the holder if registered,
* otherwize NULL.
*/
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 406b4e26f538..0de0482cd36e 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -167,7 +167,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
0, 0);
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &dma_fence_stub.flags);
+ &fence->flags);
dma_fence_signal(fence);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 9c89f7d53e99..958aa4662ccb 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -819,8 +819,10 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
r = container_of(resource, struct inbound_transaction_resource,
resource);
- if (is_fcp_request(r->request))
+ if (is_fcp_request(r->request)) {
+ kfree(r->data);
goto out;
+ }
if (a->length != fw_get_response_length(r->request)) {
ret = -EINVAL;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index a2b0cbc8741c..1e0b016fdc2b 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -1007,6 +1007,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; ) {
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ if (!rsv)
+ return -ENOMEM;
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
index ff2d18c42ee7..4501652e11ab 100644
--- a/drivers/firmware/efi/libstub/arm64.c
+++ b/drivers/firmware/efi/libstub/arm64.c
@@ -19,10 +19,13 @@ static bool system_needs_vamap(void)
const u8 *type1_family = efi_get_smbios_string(1, family);
/*
- * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
- * has not been called prior.
+ * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+ * SetVirtualAddressMap() has not been called prior.
*/
- if (!type1_family || strcmp(type1_family, "Altra"))
+ if (!type1_family || (
+ strcmp(type1_family, "eMAG") &&
+ strcmp(type1_family, "Altra") &&
+ strcmp(type1_family, "Altra Max")))
return false;
efi_warn("Working around broken SetVirtualAddressMap()\n");
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 0a9aba5f9cef..f178b2984dfb 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -33,7 +33,7 @@ int __init efi_memattr_init(void)
return -ENOMEM;
}
- if (tbl->version > 1) {
+ if (tbl->version > 2) {
pr_warn("Unexpected EFI Memory Attributes table version %d\n",
tbl->version);
goto unmap;
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 79d48852825e..03f1bd81c434 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -574,20 +574,27 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
sec->fw_name_id);
sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
- if (!sec->fw_name)
- return -ENOMEM;
+ if (!sec->fw_name) {
+ ret = -ENOMEM;
+ goto fw_name_fail;
+ }
fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
&m10bmc_ops, sec);
if (IS_ERR(fwl)) {
dev_err(sec->dev, "Firmware Upload driver failed to start\n");
- kfree(sec->fw_name);
- xa_erase(&fw_upload_xa, sec->fw_name_id);
- return PTR_ERR(fwl);
+ ret = PTR_ERR(fwl);
+ goto fw_uploader_fail;
}
sec->fwl = fwl;
return 0;
+
+fw_uploader_fail:
+ kfree(sec->fw_name);
+fw_name_fail:
+ xa_erase(&fw_upload_xa, sec->fw_name_id);
+ return ret;
}
static int m10bmc_sec_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 357cea58ec98..f7f01982a512 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -213,9 +213,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
/* Allocate buffers from the service layer's pool. */
for (i = 0; i < NUM_SVC_BUFS; i++) {
kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
- if (!kbuf) {
+ if (IS_ERR(kbuf)) {
s10_free_buffers(mgr);
- ret = -ENOMEM;
+ ret = PTR_ERR(kbuf);
goto init_done;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e3e2e6e3b485..d148a1bd85e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -243,6 +243,7 @@ extern int amdgpu_num_kcq;
#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
extern int amdgpu_vcnfw_log;
+extern int amdgpu_sg_display;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7b5ce00f0602..7af3041ccd0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1220,10 +1220,13 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
* next job actually sees the results from the previous one
* before we start executing on the same scheduler ring.
*/
- if (!s_fence || s_fence->sched != sched)
+ if (!s_fence || s_fence->sched != sched) {
+ dma_fence_put(fence);
continue;
+ }
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ dma_fence_put(fence);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index cd4caaa29528..3fe277bc233f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -186,6 +186,7 @@ int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
int amdgpu_use_xgmi_p2p = 1;
int amdgpu_vcnfw_log;
+int amdgpu_sg_display = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -932,6 +933,16 @@ MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = e
module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
/**
+ * DOC: sg_display (int)
+ * Disable S/G (scatter/gather) display (i.e., display from system memory).
+ * This option is only relevant on APUs. Set this option to 0 to disable
+ * S/G display if you experience flickering or other issues under memory
+ * pressure and report the issue.
+ */
+MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
+module_param_named(sg_display, amdgpu_sg_display, int, 0444);
+
+/**
* DOC: smu_pptable_id (int)
* Used to override pptable id. id = 0 use VBIOS pptable.
* id > 0 use the soft pptable with specicfied id.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 00444203220d..faff4a3f96e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -618,7 +618,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
- if (!ring->no_scheduler)
+ /*
+ * Notice we check for sched.ops since there's some
+ * override on the meaning of sched.ready by amdgpu.
+ * The natural check would be sched.ready, which is
+ * set as drm_sched_init() finishes...
+ */
+ if (ring->sched.ops)
drm_sched_fini(&ring->sched);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index f752c7ae7f60..3989e755a5b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -295,7 +295,7 @@ struct amdgpu_ring {
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
#define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
+#define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index b5f3bba851db..01e42bdd8e4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -974,7 +974,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
min(nptes, 32u), dst, incr,
upd_flags,
- vm->task_info.pid,
+ vm->task_info.tgid,
vm->immediate.fence_context);
amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
cursor.level, pe_start, dst,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index b9b57a66e113..66eb102cd88f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -790,8 +790,8 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
* zero here */
WARN_ON(simd != 0);
- /* type 2 wave data */
- dst[(*no_fields)++] = 2;
+ /* type 3 wave data */
+ dst[(*no_fields)++] = 3;
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f202b45c413c..5dde6f82a1ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -6877,7 +6877,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
- .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index 15eb3658d70e..09fdcd20cb91 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -337,7 +337,13 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
{
- return;
+ if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {
+ uint32_t data;
+
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
+ data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
+ }
}
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 5562670b7b52..7050238c4c48 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -640,7 +640,10 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_REPEATER_FGCG |
- AMD_CG_SUPPORT_GFX_MGCG;
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_HDP_SD |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index af37bc6ed1f5..93dee3d1a483 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1184,24 +1184,38 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
memset(pa_config, 0, sizeof(*pa_config));
- logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
- pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
-
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- /*
- * Raven2 has a HW issue that it is unable to use the vram which
- * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
- * workaround that increase system aperture high address (add 1)
- * to get rid of the VM fault and hardware hang.
- */
- logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
- else
- logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
-
agp_base = 0;
agp_bot = adev->gmc.agp_start >> 24;
agp_top = adev->gmc.agp_end >> 24;
+ /* AGP aperture is disabled */
+ if (agp_bot == agp_top) {
+ logical_addr_low = adev->gmc.vram_start >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+ else
+ logical_addr_high = adev->gmc.vram_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+ }
+
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
@@ -1503,6 +1517,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
break;
@@ -1511,6 +1527,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
+ if (init_data.flags.gpu_vm_support &&
+ (amdgpu_sg_display == 0))
+ init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
@@ -4501,6 +4520,17 @@ DEVICE_ATTR_WO(s3_debug);
static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 data_offset;
+
+ /* if there is no object header, skip DM */
+ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ dev_info(adev->dev, "No object header, skipping DM\n");
+ return -ENOENT;
+ }
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index fe2023f18b7d..8f894c1d1d1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3626,7 +3626,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ pos_cpy.x = 2 * viewport_width - temp_x;
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index f9ea1e86707f..79850a68f62a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -874,8 +874,9 @@ static const struct dc_plane_cap plane_cap = {
},
// 6:1 downscaling ratio: 1000/6 = 166.666
+ // 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
.max_downscale_factor = {
- .argb8888 = 167,
+ .argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@@ -1763,7 +1764,7 @@ static bool dcn314_resource_construct(
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
- dc->caps.max_downscale_ratio = 600;
+ dc->caps.max_downscale_ratio = 400;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index dc4649458567..a4e9fd5307c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -94,7 +94,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
- .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
+ .does_plane_fit_in_mall = NULL,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 950669f2c10d..cb7c0c878423 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -3183,7 +3183,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
} else {
v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
}
- v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
+ v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
<= (isInterlaceTiming ?
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 4a122925c3ae..92c18bfb98b3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
+ /* reset the cache of the last wptr as well now that hw is reset */
+ dmub->inbox1_last_wptr = 0;
+
cw0.offset.quad_part = inst_fb->gpu_addr;
cw0.region.base = DMUB_CW0_BASE;
cw0.region.top = cw0.region.base + inst_fb->size - 1;
@@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
+ /* mailboxes have been reset in hw, so reset the sw state as well */
+ dmub->inbox1_last_wptr = 0;
+ dmub->inbox1_rb.wrpt = 0;
+ dmub->inbox1_rb.rptr = 0;
+ dmub->outbox0_rb.wrpt = 0;
+ dmub->outbox0_rb.rptr = 0;
+ dmub->outbox1_rb.wrpt = 0;
+ dmub->outbox1_rb.rptr = 0;
+
dmub->hw_init = false;
return DMUB_STATUS_OK;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 236657eece47..2f3e239e623d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1991,6 +1991,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -2007,14 +2009,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2)))
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2)))
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ca3beb5d8f27..6ab155023592 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1500,6 +1500,20 @@ static int smu_disable_dpms(struct smu_context *smu)
}
/*
+ * For SMU 13.0.4/11, PMFW will handle the features disablement properly
+ * for gpu reset case. Driver involvement is unnecessary.
+ */
+ if (amdgpu_in_reset(adev)) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 11):
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ /*
* For gpu reset, runpm and hibernation through BACO,
* BACO feature has to be kept enabled.
*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index d6b964cf73bd..4bc7aee4d44f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -123,7 +123,8 @@
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT))
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
@@ -522,9 +523,9 @@ typedef enum {
TEMP_HOTSPOT_M,
TEMP_MEM,
TEMP_VR_GFX,
- TEMP_VR_SOC,
TEMP_VR_MEM0,
TEMP_VR_MEM1,
+ TEMP_VR_SOC,
TEMP_VR_U,
TEMP_LIQUID0,
TEMP_LIQUID1,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index d6b13933a98f..48a3a3952ceb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -113,20 +113,21 @@
#define NUM_FEATURES 64
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
-#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
- (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
- (1 << FEATURE_DPM_UCLK_BIT) | \
- (1 << FEATURE_DPM_FCLK_BIT) | \
- (1 << FEATURE_DPM_SOCCLK_BIT) | \
- (1 << FEATURE_DPM_MP0CLK_BIT) | \
- (1 << FEATURE_DPM_LINK_BIT) | \
- (1 << FEATURE_DPM_DCN_BIT) | \
- (1 << FEATURE_DS_GFXCLK_BIT) | \
- (1 << FEATURE_DS_SOCCLK_BIT) | \
- (1 << FEATURE_DS_FCLK_BIT) | \
- (1 << FEATURE_DS_LCLK_BIT) | \
- (1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT)
+#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
+ (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+ (1 << FEATURE_DPM_UCLK_BIT) | \
+ (1 << FEATURE_DPM_FCLK_BIT) | \
+ (1 << FEATURE_DPM_SOCCLK_BIT) | \
+ (1 << FEATURE_DPM_MP0CLK_BIT) | \
+ (1 << FEATURE_DPM_LINK_BIT) | \
+ (1 << FEATURE_DPM_DCN_BIT) | \
+ (1 << FEATURE_DS_GFXCLK_BIT) | \
+ (1 << FEATURE_DS_SOCCLK_BIT) | \
+ (1 << FEATURE_DS_FCLK_BIT) | \
+ (1 << FEATURE_DS_LCLK_BIT) | \
+ (1 << FEATURE_DS_DCFCLK_BIT) | \
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index e8c6febb8b64..992163e66f7b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,11 +28,11 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index cf96c3f2affe..508e392547d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -407,6 +407,9 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
@@ -1257,6 +1260,9 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!range)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index e87db7e02e8a..9e1967d8049e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index a2f0860b20bb..d751820c6da6 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -193,6 +193,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
struct hdmi_codec_pdata pdata;
struct platform_device *platform;
+ memset(&pdata, 0, sizeof(pdata));
pdata.ops = &dw_hdmi_i2s_ops;
pdata.i2s = 1;
pdata.max_i2s_channels = 8;
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fd67efe37c63..056ab9d5f313 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -233,21 +233,17 @@ void drm_client_dev_restore(struct drm_device *dev)
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
- struct drm_device *dev = buffer->client->dev;
-
if (buffer->gem) {
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
- if (buffer->handle)
- drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
-
kfree(buffer);
}
static struct drm_client_buffer *
-drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
+ u32 format, u32 *handle)
{
const struct drm_format_info *info = drm_format_info(format);
struct drm_mode_create_dumb dumb_args = { };
@@ -269,16 +265,15 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
if (ret)
goto err_delete;
- buffer->handle = dumb_args.handle;
- buffer->pitch = dumb_args.pitch;
-
obj = drm_gem_object_lookup(client->file, dumb_args.handle);
if (!obj) {
ret = -ENOENT;
goto err_delete;
}
+ buffer->pitch = dumb_args.pitch;
buffer->gem = obj;
+ *handle = dumb_args.handle;
return buffer;
@@ -365,7 +360,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
}
static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
- u32 width, u32 height, u32 format)
+ u32 width, u32 height, u32 format,
+ u32 handle)
{
struct drm_client_dev *client = buffer->client;
struct drm_mode_fb_cmd fb_req = { };
@@ -377,7 +373,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
fb_req.depth = info->depth;
fb_req.width = width;
fb_req.height = height;
- fb_req.handle = buffer->handle;
+ fb_req.handle = handle;
fb_req.pitch = buffer->pitch;
ret = drm_mode_addfb(client->dev, &fb_req, client->file);
@@ -414,13 +410,24 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
struct drm_client_buffer *buffer;
+ u32 handle;
int ret;
- buffer = drm_client_buffer_create(client, width, height, format);
+ buffer = drm_client_buffer_create(client, width, height, format,
+ &handle);
if (IS_ERR(buffer))
return buffer;
- ret = drm_client_buffer_addfb(buffer, width, height, format);
+ ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
+
+ /*
+ * The handle is only needed for creating the framebuffer, destroy it
+ * again to solve a circular dependency should anybody export the GEM
+ * object as DMA-buf. The framebuffer and our buffer structure are still
+ * holding references to the GEM object to prevent its destruction.
+ */
+ drm_mode_destroy_dumb(client->dev, handle, client->file);
+
if (ret) {
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 572a4e3769f3..a491e6c38875 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2466,6 +2466,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
dvo_port);
}
+static enum port
+dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+{
+ switch (dvo_port) {
+ case DVO_PORT_MIPIA:
+ return PORT_A;
+ case DVO_PORT_MIPIC:
+ if (DISPLAY_VER(i915) >= 11)
+ return PORT_B;
+ else
+ return PORT_C;
+ default:
+ return PORT_NONE;
+ }
+}
+
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
{
switch (vbt_max_link_rate) {
@@ -3414,19 +3430,16 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
dvo_port = child->dvo_port;
- if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
- (dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
- if (port)
- *port = dvo_port - DVO_PORT_MIPIA;
- return true;
- } else if (dvo_port == DVO_PORT_MIPIB ||
- dvo_port == DVO_PORT_MIPIC ||
- dvo_port == DVO_PORT_MIPID) {
+ if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
+ continue;
}
+
+ if (port)
+ *port = dsi_dvo_port_to_port(i915, dvo_port);
+ return true;
}
return false;
@@ -3511,7 +3524,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+ if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
if (!devdata->dsc)
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b74e36d76013..407a477939e5 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1319,7 +1319,7 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 5575d7abdc09..f76c06b7f1d4 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -328,8 +328,20 @@ out_unlock:
return ret;
}
+static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intelfb_create,
+ .fb_dirty = intelfb_dirty,
};
static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index e0766d1be966..11554645e6ee 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1587,7 +1587,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_check_wm_level(&wm->wm[level], ddb);
if (icl_need_wm1_wa(i915, plane_id) &&
- level == 1 && wm->wm[0].enable) {
+ level == 1 && !wm->wm[level].enable &&
+ wm->wm[0].enable) {
wm->wm[level].blocks = wm->wm[0].blocks;
wm->wm[level].lines = wm->wm[0].lines;
wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 6250de9b9196..e4b78ab4773b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1861,11 +1861,19 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
vm = ctx->vm;
GEM_BUG_ON(!vm);
+ /*
+ * Get a reference for the allocated handle. Once the handle is
+ * visible in the vm_xa table, userspace could try to close it
+ * from under our feet, so we need to hold the extra reference
+ * first.
+ */
+ i915_vm_get(vm);
+
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
- if (err)
+ if (err) {
+ i915_vm_put(vm);
return err;
-
- i915_vm_get(vm);
+ }
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->value = id;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index f266b68cf012..0f2e056c02dd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -3483,6 +3483,13 @@ err_request:
eb.composite_fence :
&eb.requests[0]->fence);
+ if (unlikely(eb.gem_context->syncobj)) {
+ drm_syncobj_replace_fence(eb.gem_context->syncobj,
+ eb.composite_fence ?
+ eb.composite_fence :
+ &eb.requests[0]->fence);
+ }
+
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
@@ -3494,13 +3501,6 @@ err_request:
}
}
- if (unlikely(eb.gem_context->syncobj)) {
- drm_syncobj_replace_fence(eb.gem_context->syncobj,
- eb.composite_fence ?
- eb.composite_fence :
- &eb.requests[0]->fence);
- }
-
if (!out_fence && eb.composite_fence)
dma_fence_put(eb.composite_fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 9c759df700ca..937728840428 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -579,7 +579,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index fd42b89b7162..bc21b1c2350a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -305,10 +305,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
obj->tiling_and_stride = tiling | stride;
- i915_gem_object_unlock(obj);
-
- /* Force the fence to be reacquired for GTT access */
- i915_gem_object_release_mmap_gtt(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -321,6 +317,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->bit_17 = NULL;
}
+ i915_gem_object_unlock(obj);
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_object_release_mmap_gtt(obj);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index e94365b08f1e..2aa63ec521b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -528,7 +528,7 @@ retry:
return rq;
}
-struct i915_request *intel_context_find_active_request(struct intel_context *ce)
+struct i915_request *intel_context_get_active_request(struct intel_context *ce)
{
struct intel_context *parent = intel_context_to_parent(ce);
struct i915_request *rq, *active = NULL;
@@ -552,6 +552,8 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce)
active = rq;
}
+ if (active)
+ active = i915_request_get_rcu(active);
spin_unlock_irqrestore(&parent->guc_state.lock, flags);
return active;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index fb62b7b8cbcd..0a8d553da3f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -268,8 +268,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *intel_context_create_request(struct intel_context *ce);
-struct i915_request *
-intel_context_find_active_request(struct intel_context *ce);
+struct i915_request *intel_context_get_active_request(struct intel_context *ce);
static inline bool intel_context_is_barrier(const struct intel_context *ce)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index cbc8b857d5f7..7a4504ea35c3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -248,8 +248,8 @@ void intel_engine_dump_active_requests(struct list_head *requests,
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
ktime_t *now);
-struct i915_request *
-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine);
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq);
u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index c33e0d72d670..d37931e16fd9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -2094,17 +2094,6 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
}
}
-static unsigned long list_count(struct list_head *list)
-{
- struct list_head *pos;
- unsigned long count = 0;
-
- list_for_each(pos, list)
- count++;
-
- return count;
-}
-
static unsigned long read_ul(void *p, size_t x)
{
return *(unsigned long *)(p + x);
@@ -2196,11 +2185,11 @@ void intel_engine_dump_active_requests(struct list_head *requests,
}
}
-static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
+static void engine_dump_active_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m)
{
+ struct intel_context *hung_ce = NULL;
struct i915_request *hung_rq = NULL;
- struct intel_context *ce;
- bool guc;
/*
* No need for an engine->irq_seqno_barrier() before the seqno reads.
@@ -2209,27 +2198,22 @@ static void engine_dump_active_requests(struct intel_engine_cs *engine, struct d
* But the intention here is just to report an instantaneous snapshot
* so that's fine.
*/
- lockdep_assert_held(&engine->sched_engine->lock);
+ intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
drm_printf(m, "\tRequests:\n");
- guc = intel_uc_uses_guc_submission(&engine->gt->uc);
- if (guc) {
- ce = intel_engine_get_hung_context(engine);
- if (ce)
- hung_rq = intel_context_find_active_request(ce);
- } else {
- hung_rq = intel_engine_execlist_find_hung_request(engine);
- }
-
if (hung_rq)
engine_dump_request(hung_rq, m, "\t\thung");
+ else if (hung_ce)
+ drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
- if (guc)
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
intel_guc_dump_active_requests(engine, hung_rq, m);
else
- intel_engine_dump_active_requests(&engine->sched_engine->requests,
- hung_rq, m);
+ intel_execlists_dump_active_requests(engine, hung_rq, m);
+
+ if (hung_rq)
+ i915_request_put(hung_rq);
}
void intel_engine_dump(struct intel_engine_cs *engine,
@@ -2239,7 +2223,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
- unsigned long flags;
ktime_t dummy;
if (header) {
@@ -2276,13 +2259,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
i915_reset_count(error));
print_properties(engine, m);
- spin_lock_irqsave(&engine->sched_engine->lock, flags);
engine_dump_active_requests(engine, m);
- drm_printf(m, "\tOn hold?: %lu\n",
- list_count(&engine->sched_engine->hold));
- spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
-
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
@@ -2328,8 +2306,7 @@ intel_engine_create_virtual(struct intel_engine_cs **siblings,
return siblings[0]->cops->create_virtual(siblings, count, flags);
}
-struct i915_request *
-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
+static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
@@ -2381,6 +2358,33 @@ intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
return active;
}
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq)
+{
+ unsigned long flags;
+
+ *ce = intel_engine_get_hung_context(engine);
+ if (*ce) {
+ intel_engine_clear_hung_context(engine);
+
+ *rq = intel_context_get_active_request(*ce);
+ return;
+ }
+
+ /*
+ * Getting here with GuC enabled means it is a forced error capture
+ * with no actual hang. So, no need to attempt the execlist search.
+ */
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
+ return;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+ *rq = engine_execlist_find_hung_request(engine);
+ if (*rq)
+ *rq = i915_request_get_rcu(*rq);
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
{
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 2daffa7c7dfd..21cb5b69d82e 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -4148,6 +4148,33 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+static unsigned long list_count(struct list_head *list)
+{
+ struct list_head *pos;
+ unsigned long count = 0;
+
+ list_for_each(pos, list)
+ count++;
+
+ return count;
+}
+
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+
+ intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
+
+ drm_printf(m, "\tOn hold?: %lu\n",
+ list_count(&engine->sched_engine->hold));
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_execlists.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
index a1aa92c983a5..d2c7d45ea062 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -32,6 +32,10 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
int indent),
unsigned int max);
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m);
+
bool
intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 0a42f1807f52..c10977cb06b9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1702,7 +1702,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
goto next_context;
guilty = false;
- rq = intel_context_find_active_request(ce);
+ rq = intel_context_get_active_request(ce);
if (!rq) {
head = ce->ring->tail;
goto out_replay;
@@ -1715,6 +1715,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
head = intel_ring_wrap(ce->ring, rq->head);
__i915_request_reset(rq, guilty);
+ i915_request_put(rq);
out_replay:
guc_reset_state(ce, head, guilty);
next_context:
@@ -4817,6 +4818,8 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
+ bool found;
+
if (!kref_get_unless_zero(&ce->ref))
continue;
@@ -4833,10 +4836,18 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
goto next;
}
+ found = false;
+ spin_lock(&ce->guc_state.lock);
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
continue;
+ found = true;
+ break;
+ }
+ spin_unlock(&ce->guc_state.lock);
+
+ if (found) {
intel_engine_set_hung_context(engine, ce);
/* Can only cope with one hang at a time... */
@@ -4844,6 +4855,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
xa_lock(&guc->context_lookup);
goto done;
}
+
next:
intel_context_put(ce);
xa_lock(&guc->context_lookup);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9d5d5a397b64..b20bd6365615 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1596,43 +1596,20 @@ capture_engine(struct intel_engine_cs *engine,
{
struct intel_engine_capture_vma *capture = NULL;
struct intel_engine_coredump *ee;
- struct intel_context *ce;
+ struct intel_context *ce = NULL;
struct i915_request *rq = NULL;
- unsigned long flags;
ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
if (!ee)
return NULL;
- ce = intel_engine_get_hung_context(engine);
- if (ce) {
- intel_engine_clear_hung_context(engine);
- rq = intel_context_find_active_request(ce);
- if (!rq || !i915_request_started(rq))
- goto no_request_capture;
- } else {
- /*
- * Getting here with GuC enabled means it is a forced error capture
- * with no actual hang. So, no need to attempt the execlist search.
- */
- if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
- spin_lock_irqsave(&engine->sched_engine->lock, flags);
- rq = intel_engine_execlist_find_hung_request(engine);
- spin_unlock_irqrestore(&engine->sched_engine->lock,
- flags);
- }
- }
- if (rq)
- rq = i915_request_get_rcu(rq);
-
- if (!rq)
+ intel_engine_get_hung_entity(engine, &ce, &rq);
+ if (!rq || !i915_request_started(rq))
goto no_request_capture;
capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
- if (!capture) {
- i915_request_put(rq);
+ if (!capture)
goto no_request_capture;
- }
if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
intel_guc_capture_get_matching_node(engine->gt, ee, ce);
@@ -1642,6 +1619,8 @@ capture_engine(struct intel_engine_cs *engine,
return ee;
no_request_capture:
+ if (rq)
+ i915_request_put(rq);
kfree(ee);
return NULL;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 40768373cdd9..c5a4f49ee206 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -97,6 +97,7 @@ int gp100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int gp102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int gp10b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index fcf2a002f6cb..91fb494d4009 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -151,6 +151,9 @@ nvkm_firmware_mem_page(struct nvkm_memory *memory)
static enum nvkm_memory_target
nvkm_firmware_mem_target(struct nvkm_memory *memory)
{
+ if (nvkm_firmware_mem(memory)->device->func->tegra)
+ return NVKM_MEM_TARGET_NCOH;
+
return NVKM_MEM_TARGET_HOST;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 364fea320cb3..1c81e5b34d29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2405,7 +2405,7 @@ nv162_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2440,7 +2440,7 @@ nv164_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2475,7 +2475,7 @@ nv166_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2510,7 +2510,7 @@ nv167_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2545,7 +2545,7 @@ nv168_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
index 393ade9f7e6c..b7da3ab44c27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
@@ -48,6 +48,16 @@ gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
img += 4;
len -= 4;
}
+
+ /* Sigh. Tegra PMU FW's init message... */
+ if (len) {
+ u32 data = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
+
+ while (len--) {
+ *(u8 *)img++ = data & 0xff;
+ data >>= 8;
+ }
+ }
}
static void
@@ -64,6 +74,8 @@ gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
img += 4;
len -= 4;
}
+
+ WARN_ON(len);
}
static void
@@ -74,7 +86,7 @@ gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 d
const struct nvkm_falcon_func_pio
gm200_flcn_dmem_pio = {
- .min = 4,
+ .min = 1,
.max = 0x100,
.wr_init = gm200_flcn_pio_dmem_wr_init,
.wr = gm200_flcn_pio_dmem_wr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 634f64f88fc8..81a1ad2c88a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
+static int
+tu102_devinit_wait(struct nvkm_device *device)
+{
+ unsigned timeout = 50 + 2000;
+
+ do {
+ if (nvkm_rd32(device, 0x118128) & 0x00000001) {
+ if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
+ return 0;
+ }
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
int
tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
+ int ret;
+
+ ret = tu102_devinit_wait(init->base.subdev.device);
+ if (ret)
+ return ret;
+
gm200_devinit_preos(init, post);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 5d0bab8ecb43..6ba5120a2ebe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -32,6 +32,7 @@ nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/gv100.o
+nvkm-y += nvkm/subdev/fb/tu102.o
nvkm-y += nvkm/subdev/fb/ga100.o
nvkm-y += nvkm/subdev/fb/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index 8b7c8ea5e8a5..5a21b0ae4595 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -40,12 +40,6 @@ ga102_fb_vpr_scrub(struct nvkm_fb *fb)
return ret;
}
-static bool
-ga102_fb_vpr_scrub_required(struct nvkm_fb *fb)
-{
- return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
-}
-
static const struct nvkm_fb_func
ga102_fb = {
.dtor = gf100_fb_dtor,
@@ -56,7 +50,7 @@ ga102_fb = {
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = ga102_ram_new,
.default_bigpage = 16,
- .vpr.scrub_required = ga102_fb_vpr_scrub_required,
+ .vpr.scrub_required = tu102_fb_vpr_scrub_required,
.vpr.scrub = ga102_fb_vpr_scrub,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 1f0126437c1a..0e3c0a8f5d71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -49,8 +49,3 @@ gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, s
}
MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index ac03eac0f261..f517751f94ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -89,4 +89,6 @@ bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
int gv100_fb_init_page(struct nvkm_fb *);
+
+bool tu102_fb_vpr_scrub_required(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
new file mode 100644
index 000000000000..be82af0364ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+bool
+tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
+{
+ return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
+}
+
+static const struct nvkm_fb_func
+tu102_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ .vpr.scrub_required = tu102_fb_vpr_scrub_required,
+ .vpr.scrub = gp102_fb_vpr_scrub,
+ .ram_new = gp100_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&tu102_fb, device, type, inst, pfb);
+}
+
+MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index a72403777329..2ed04da3621d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -225,7 +225,7 @@ gm20b_pmu_init(struct nvkm_pmu *pmu)
pmu->initmsg_received = false;
- nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
+ nvkm_falcon_pio_wr(falcon, (u8 *)&args, 0, 0, DMEM, addr_args, sizeof(args), 0, false);
nvkm_falcon_start(falcon);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 857a2f0420d7..c924f1124ebc 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1193,14 +1193,11 @@ static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
return 0;
}
-static int boe_panel_unprepare(struct drm_panel *panel)
+static int boe_panel_disable(struct drm_panel *panel)
{
struct boe_panel *boe = to_boe_panel(panel);
int ret;
- if (!boe->prepared)
- return 0;
-
ret = boe_panel_enter_sleep_mode(boe);
if (ret < 0) {
dev_err(panel->dev, "failed to set panel off: %d\n", ret);
@@ -1209,6 +1206,16 @@ static int boe_panel_unprepare(struct drm_panel *panel)
msleep(150);
+ return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+
+ if (!boe->prepared)
+ return 0;
+
if (boe->desc->discharge_on_disable) {
regulator_disable(boe->avee);
regulator_disable(boe->avdd);
@@ -1528,6 +1535,7 @@ static enum drm_panel_orientation boe_panel_get_orientation(struct drm_panel *pa
}
static const struct drm_panel_funcs boe_panel_funcs = {
+ .disable = boe_panel_disable,
.unprepare = boe_panel_unprepare,
.prepare = boe_panel_prepare,
.enable = boe_panel_enable,
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 53464afc2b9a..91f69e62430b 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -656,18 +656,8 @@ static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
.atomic_check = drm_crtc_helper_atomic_check,
};
-static void ssd130x_crtc_reset(struct drm_crtc *crtc)
-{
- struct drm_device *drm = crtc->dev;
- struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
-
- ssd130x_init(ssd130x);
-
- drm_atomic_helper_crtc_reset(crtc);
-}
-
static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
- .reset = ssd130x_crtc_reset,
+ .reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -686,6 +676,12 @@ static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
if (ret)
return;
+ ret = ssd130x_init(ssd130x);
+ if (ret) {
+ ssd130x_power_off(ssd130x);
+ return;
+ }
+
ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
backlight_enable(ssd130x->bl_dev);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 12a00d644b61..55744216392b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -3018,7 +3018,8 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
}
vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
- vc4_hdmi, "vc4",
+ vc4_hdmi,
+ vc4_hdmi->variant->card_name,
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9f4a90493aea..da45215a933d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -126,7 +126,6 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
void __user *user_bo_handles = NULL;
struct virtio_gpu_object_array *buflist = NULL;
struct sync_file *sync_file;
- int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
void *buf;
uint64_t fence_ctx;
@@ -152,13 +151,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
ring_idx = exbuf->ring_idx;
}
- exbuf->fence_fd = -1;
-
virtio_gpu_create_context(dev, file);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
- in_fence = sync_file_get_fence(in_fence_fd);
+ in_fence = sync_file_get_fence(exbuf->fence_fd);
if (!in_fence)
return -EINVAL;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 1fb0f7105fb2..c751d12f5df8 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -227,6 +227,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
if (cl_data->num_hid_devices == 0)
return -ENODEV;
+ cl_data->is_any_sensor_enabled = false;
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
@@ -287,6 +288,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
status = amd_sfh_wait_for_response
(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
if (status == SENSOR_ENABLED) {
+ cl_data->is_any_sensor_enabled = true;
cl_data->sensor_sts[i] = SENSOR_ENABLED;
rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
if (rc) {
@@ -301,19 +303,26 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->sensor_sts[i]);
goto cleanup;
}
+ } else {
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i],
+ get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
}
dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
cl_data->sensor_sts[i]);
}
- if (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0) {
+ if (!cl_data->is_any_sensor_enabled ||
+ (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
amd_sfh_hid_client_deinit(privdata);
for (i = 0; i < cl_data->num_hid_devices; i++) {
devm_kfree(dev, cl_data->feature_report[i]);
devm_kfree(dev, in_data->input_report[i]);
devm_kfree(dev, cl_data->report_descr[i]);
}
- dev_warn(dev, "Failed to discover, sensors not enabled\n");
+ dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
return -EOPNOTSUPP;
}
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index 3754fb423e3a..528036892c9d 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -32,6 +32,7 @@ struct amd_input_data {
struct amdtp_cl_data {
u8 init_done;
u32 cur_hid_dev;
+ bool is_any_sensor_enabled;
u32 hid_dev_count;
u32 num_hid_devices;
struct device_info *hid_devices;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3e1803592bd4..5c72aef3d3dd 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1202,6 +1202,7 @@ int hid_open_report(struct hid_device *device)
__u8 *end;
__u8 *next;
int ret;
+ int i;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
hid_parser_main,
@@ -1252,6 +1253,8 @@ int hid_open_report(struct hid_device *device)
goto err;
}
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
+ for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
+ device->collection[i].parent_idx = -1;
ret = -EINVAL;
while ((next = fetch_item(start, end, &item)) != NULL) {
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index e59e9911fc37..4fa45ee77503 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -12,6 +12,7 @@
* Copyright (c) 2017 Alex Manoussakis <[email protected]>
* Copyright (c) 2017 Tomasz Kramkowski <[email protected]>
* Copyright (c) 2020 YOSHIOKA Takuma <[email protected]>
+ * Copyright (c) 2022 Takahiro Fujii <[email protected]>
*/
/*
@@ -89,7 +90,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
case USB_DEVICE_ID_ELECOM_M_DT1URBK:
case USB_DEVICE_ID_ELECOM_M_DT1DRBK:
case USB_DEVICE_ID_ELECOM_M_HT1URBK:
- case USB_DEVICE_ID_ELECOM_M_HT1DRBK:
+ case USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D:
/*
* Report descriptor format:
* 12: button bit count
@@ -99,6 +100,16 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
break;
+ case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
+ /*
+ * Report descriptor format:
+ * 22: button bit count
+ * 30: padding bit count
+ * 24: button report size
+ * 16: button usage maximum
+ */
+ mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 8);
+ break;
}
return rdesc;
}
@@ -112,7 +123,8 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
{ }
};
MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0f8c11842a3a..9e36b4cd905e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -413,6 +413,8 @@
#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF
#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV 0x2CF9
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
+#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
@@ -428,7 +430,8 @@
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
#define USB_DEVICE_ID_ELECOM_M_DT1DRBK 0x00ff
#define USB_DEVICE_ID_ELECOM_M_HT1URBK 0x010c
-#define USB_DEVICE_ID_ELECOM_M_HT1DRBK 0x010d
+#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D 0x010d
+#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C 0x011c
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 9b59e436df0a..77c8c49852b5 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -370,6 +370,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
@@ -384,6 +386,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index abf2c95e4d0b..9c1ee8e91e0c 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3978,7 +3978,8 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
}
hidpp_initialize_battery(hidpp);
- hidpp_initialize_hires_scroll(hidpp);
+ if (!hid_is_usb(hidpp->hid_dev))
+ hidpp_initialize_hires_scroll(hidpp);
/* forward current battery state */
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index be3ad02573de..5bc91f68b374 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -393,7 +393,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
#endif
#if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index cbe43e2567a7..64ac5bdee3a6 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1963,7 +1963,7 @@ static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
{
- debugfs_remove(debugfs_lookup("hv-balloon", NULL));
+ debugfs_lookup_and_remove("hv-balloon", NULL);
}
#else
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cfeb24d40d37..bb3d10099ba4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1430,6 +1430,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
+ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
@@ -1862,6 +1863,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
skx_idle_state_table_update();
break;
case INTEL_FAM6_SAPPHIRERAPIDS_X:
+ case INTEL_FAM6_EMERALDRAPIDS_X:
spr_idle_state_table_update();
break;
case INTEL_FAM6_ALDERLAKE:
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index a2def6f9380a..5eac7ea19993 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -280,6 +280,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
hid_sensor_convert_timestamp(
&accel_state->common_attributes,
*(int64_t *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 3d2e8b4db61a..a4e7c7eff5ac 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -298,8 +298,10 @@ static int berlin2_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
- if (!indio_dev)
+ if (!indio_dev) {
+ of_node_put(parent_np);
return -ENOMEM;
+ }
priv = iio_priv(indio_dev);
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index 36777b827165..f5a0fc9e64c5 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -86,6 +86,8 @@
#define IMX8QXP_ADC_TIMEOUT msecs_to_jiffies(100)
+#define IMX8QXP_ADC_MAX_FIFO_SIZE 16
+
struct imx8qxp_adc {
struct device *dev;
void __iomem *regs;
@@ -95,6 +97,7 @@ struct imx8qxp_adc {
/* Serialise ADC channel reads */
struct mutex lock;
struct completion completion;
+ u32 fifo[IMX8QXP_ADC_MAX_FIFO_SIZE];
};
#define IMX8QXP_ADC_CHAN(_idx) { \
@@ -238,8 +241,7 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
- *val = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
- readl(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
+ *val = adc->fifo[0];
mutex_unlock(&adc->lock);
return IIO_VAL_INT;
@@ -265,10 +267,15 @@ static irqreturn_t imx8qxp_adc_isr(int irq, void *dev_id)
{
struct imx8qxp_adc *adc = dev_id;
u32 fifo_count;
+ int i;
fifo_count = FIELD_GET(IMX8QXP_ADC_FCTRL_FCOUNT_MASK,
readl(adc->regs + IMX8QXP_ADR_ADC_FCTRL));
+ for (i = 0; i < fifo_count; i++)
+ adc->fifo[i] = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
+ readl_relaxed(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
+
if (fifo_count)
complete(&adc->completion);
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 6d21ea84fa82..a428bdb567d5 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1520,6 +1520,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match);
static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
{
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index f53e8558b560..32873fb5f367 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -57,6 +57,18 @@
#define TWL6030_GPADCS BIT(1)
#define TWL6030_GPADCR BIT(0)
+#define USB_VBUS_CTRL_SET 0x04
+#define USB_ID_CTRL_SET 0x06
+
+#define TWL6030_MISC1 0xE4
+#define VBUS_MEAS 0x01
+#define ID_MEAS 0x01
+
+#define VAC_MEAS 0x04
+#define VBAT_MEAS 0x02
+#define BB_MEAS 0x01
+
+
/**
* struct twl6030_chnl_calib - channel calibration
* @gain: slope coefficient for ideal curve
@@ -927,6 +939,26 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
return ret;
}
+ ret = twl_i2c_write_u8(TWL_MODULE_USB, VBUS_MEAS, USB_VBUS_CTRL_SET);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL_MODULE_USB, ID_MEAS, USB_ID_CTRL_SET);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID0,
+ VBAT_MEAS | BB_MEAS | VAC_MEAS,
+ TWL6030_MISC1);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
indio_dev->name = DRIVER_NAME;
indio_dev->info = &twl6030_gpadc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index 5b4bdf3a26bb..a507d2e17079 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1329,7 +1329,7 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
if (!dev_channels)
- ret = -ENOMEM;
+ return -ENOMEM;
indio_dev->channels = dev_channels;
indio_dev->num_channels = num_channels;
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 8f0ad022c7f1..698c50da1f10 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -231,6 +231,7 @@ static int gyro_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
gyro_state->timestamp =
hid_sensor_convert_timestamp(&gyro_state->common_attributes,
*(s64 *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c
index 423cfe526f2a..6d189c4b9ff9 100644
--- a/drivers/iio/imu/fxos8700_core.c
+++ b/drivers/iio/imu/fxos8700_core.c
@@ -10,6 +10,7 @@
#include <linux/regmap.h>
#include <linux/acpi.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -144,9 +145,8 @@
#define FXOS8700_NVM_DATA_BNK0 0xa7
/* Bit definitions for FXOS8700_CTRL_REG1 */
-#define FXOS8700_CTRL_ODR_MSK 0x38
#define FXOS8700_CTRL_ODR_MAX 0x00
-#define FXOS8700_CTRL_ODR_MIN GENMASK(4, 3)
+#define FXOS8700_CTRL_ODR_MSK GENMASK(5, 3)
/* Bit definitions for FXOS8700_M_CTRL_REG1 */
#define FXOS8700_HMS_MASK GENMASK(1, 0)
@@ -320,7 +320,7 @@ static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type)
switch (iio_type) {
case IIO_ACCEL:
return FXOS8700_ACCEL;
- case IIO_ANGL_VEL:
+ case IIO_MAGN:
return FXOS8700_MAGN;
default:
return -EINVAL;
@@ -345,15 +345,35 @@ static int fxos8700_set_active_mode(struct fxos8700_data *data,
static int fxos8700_set_scale(struct fxos8700_data *data,
enum fxos8700_sensor t, int uscale)
{
- int i;
+ int i, ret, val;
+ bool active_mode;
static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
struct device *dev = regmap_get_device(data->regmap);
if (t == FXOS8700_MAGN) {
- dev_err(dev, "Magnetometer scale is locked at 1200uT\n");
+ dev_err(dev, "Magnetometer scale is locked at 0.001Gs\n");
return -EINVAL;
}
+ /*
+ * When device is in active mode, it failed to set an ACCEL
+ * full-scale range(2g/4g/8g) in FXOS8700_XYZ_DATA_CFG.
+ * This is not align with the datasheet, but it is a fxos8700
+ * chip behavier. Set the device in standby mode before setting
+ * an ACCEL full-scale range.
+ */
+ ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
+ if (ret)
+ return ret;
+
+ active_mode = val & FXOS8700_ACTIVE;
+ if (active_mode) {
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ val & ~FXOS8700_ACTIVE);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < scale_num; i++)
if (fxos8700_accel_scale[i].uscale == uscale)
break;
@@ -361,8 +381,12 @@ static int fxos8700_set_scale(struct fxos8700_data *data,
if (i == scale_num)
return -EINVAL;
- return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
+ ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
fxos8700_accel_scale[i].bits);
+ if (ret)
+ return ret;
+ return regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ active_mode);
}
static int fxos8700_get_scale(struct fxos8700_data *data,
@@ -372,7 +396,7 @@ static int fxos8700_get_scale(struct fxos8700_data *data,
static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
if (t == FXOS8700_MAGN) {
- *uscale = 1200; /* Magnetometer is locked at 1200uT */
+ *uscale = 1000; /* Magnetometer is locked at 0.001Gs */
return 0;
}
@@ -394,22 +418,61 @@ static int fxos8700_get_data(struct fxos8700_data *data, int chan_type,
int axis, int *val)
{
u8 base, reg;
+ s16 tmp;
int ret;
- enum fxos8700_sensor type = fxos8700_to_sensor(chan_type);
- base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB;
+ /*
+ * Different register base addresses varies with channel types.
+ * This bug hasn't been noticed before because using an enum is
+ * really hard to read. Use an a switch statement to take over that.
+ */
+ switch (chan_type) {
+ case IIO_ACCEL:
+ base = FXOS8700_OUT_X_MSB;
+ break;
+ case IIO_MAGN:
+ base = FXOS8700_M_OUT_X_MSB;
+ break;
+ default:
+ return -EINVAL;
+ }
/* Block read 6 bytes of device output registers to avoid data loss */
ret = regmap_bulk_read(data->regmap, base, data->buf,
- FXOS8700_DATA_BUF_SIZE);
+ sizeof(data->buf));
if (ret)
return ret;
/* Convert axis to buffer index */
reg = axis - IIO_MOD_X;
+ /*
+ * Convert to native endianness. The accel data and magn data
+ * are signed, so a forced type conversion is needed.
+ */
+ tmp = be16_to_cpu(data->buf[reg]);
+
+ /*
+ * ACCEL output data registers contain the X-axis, Y-axis, and Z-axis
+ * 14-bit left-justified sample data and MAGN output data registers
+ * contain the X-axis, Y-axis, and Z-axis 16-bit sample data. Apply
+ * a signed 2 bits right shift to the readback raw data from ACCEL
+ * output data register and keep that from MAGN sensor as the origin.
+ * Value should be extended to 32 bit.
+ */
+ switch (chan_type) {
+ case IIO_ACCEL:
+ tmp = tmp >> 2;
+ break;
+ case IIO_MAGN:
+ /* Nothing to do */
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* Convert to native endianness */
- *val = sign_extend32(be16_to_cpu(data->buf[reg]), 15);
+ *val = sign_extend32(tmp, 15);
return 0;
}
@@ -445,10 +508,9 @@ static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
if (i >= odr_num)
return -EINVAL;
- return regmap_update_bits(data->regmap,
- FXOS8700_CTRL_REG1,
- FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE,
- fxos8700_odr[i].bits << 3 | active_mode);
+ val &= ~FXOS8700_CTRL_ODR_MSK;
+ val |= FIELD_PREP(FXOS8700_CTRL_ODR_MSK, fxos8700_odr[i].bits) | FXOS8700_ACTIVE;
+ return regmap_write(data->regmap, FXOS8700_CTRL_REG1, val);
}
static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
@@ -461,7 +523,7 @@ static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
if (ret)
return ret;
- val &= FXOS8700_CTRL_ODR_MSK;
+ val = FIELD_GET(FXOS8700_CTRL_ODR_MSK, val);
for (i = 0; i < odr_num; i++)
if (val == fxos8700_odr[i].bits)
@@ -526,7 +588,7 @@ static IIO_CONST_ATTR(in_accel_sampling_frequency_available,
static IIO_CONST_ATTR(in_magn_sampling_frequency_available,
"1.5625 6.25 12.5 50 100 200 400 800");
static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976");
-static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200");
+static IIO_CONST_ATTR(in_magn_scale_available, "0.001000");
static struct attribute *fxos8700_attrs[] = {
&iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
@@ -592,14 +654,19 @@ static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi)
if (ret)
return ret;
- /* Max ODR (800Hz individual or 400Hz hybrid), active mode */
- ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
- FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE);
+ /*
+ * Set max full-scale range (+/-8G) for ACCEL sensor in chip
+ * initialization then activate the device.
+ */
+ ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
if (ret)
return ret;
- /* Set for max full-scale range (+/-8G) */
- return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
+ /* Max ODR (800Hz individual or 400Hz hybrid), active mode */
+ return regmap_update_bits(data->regmap, FXOS8700_CTRL_REG1,
+ FXOS8700_CTRL_ODR_MSK | FXOS8700_ACTIVE,
+ FIELD_PREP(FXOS8700_CTRL_ODR_MSK, FXOS8700_CTRL_ODR_MAX) |
+ FXOS8700_ACTIVE);
}
static void fxos8700_chip_uninit(void *data)
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index f6660847fb58..8c16cdacf2f2 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -4,6 +4,7 @@ config IIO_ST_LSM6DSX
tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors"
depends on (I2C || SPI || I3C)
select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
select IIO_KFIFO_BUF
select IIO_ST_LSM6DSX_I2C if (I2C)
select IIO_ST_LSM6DSX_SPI if (SPI_MASTER)
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 001055d09750..b1674a5bfa36 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -440,6 +440,8 @@ static int cm32181_probe(struct i2c_client *client)
if (!indio_dev)
return -ENOMEM;
+ i2c_set_clientdata(client, indio_dev);
+
/*
* Some ACPI systems list 2 I2C resources for the CM3218 sensor, the
* SMBus Alert Response Address (ARA, 0x0c) and the actual I2C address.
@@ -460,8 +462,6 @@ static int cm32181_probe(struct i2c_client *client)
return PTR_ERR(client);
}
- i2c_set_clientdata(client, indio_dev);
-
cm32181 = iio_priv(indio_dev);
cm32181->client = client;
cm32181->dev = dev;
@@ -490,7 +490,8 @@ static int cm32181_probe(struct i2c_client *client)
static int cm32181_suspend(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
+ struct i2c_client *client = cm32181->client;
return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
CM32181_CMD_ALS_DISABLE);
@@ -498,8 +499,8 @@ static int cm32181_suspend(struct device *dev)
static int cm32181_resume(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
+ struct i2c_client *client = cm32181->client;
return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index 43b26bc12288..39357dc2d229 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -26,8 +26,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
if (umem_dmabuf->sgt)
goto wait_fence;
- sgt = dma_buf_map_attachment_unlocked(umem_dmabuf->attach,
- DMA_BIDIRECTIONAL);
+ sgt = dma_buf_map_attachment(umem_dmabuf->attach,
+ DMA_BIDIRECTIONAL);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
@@ -103,8 +103,8 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
umem_dmabuf->last_sg_trim = 0;
}
- dma_buf_unmap_attachment_unlocked(umem_dmabuf->attach, umem_dmabuf->sgt,
- DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
+ DMA_BIDIRECTIONAL);
umem_dmabuf->sgt = NULL;
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index f5f9269fdc16..7c5d487ec916 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1318,12 +1318,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
- return -EFAULT;
+ ret = -EFAULT;
addr = arg + offsetof(struct hfi1_tid_info, length);
- if (copy_to_user((void __user *)addr, &tinfo.length,
+ if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
sizeof(tinfo.length)))
ret = -EFAULT;
+
+ if (ret)
+ hfi1_user_exp_rcv_invalid(fd, &tinfo);
}
return ret;
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index b02f2f0809c8..350884d5f089 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -160,16 +160,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
{
int pinned;
- unsigned int npages;
+ unsigned int npages = tidbuf->npages;
unsigned long vaddr = tidbuf->vaddr;
struct page **pages = NULL;
struct hfi1_devdata *dd = fd->uctxt->dd;
- /* Get the number of pages the user buffer spans */
- npages = num_user_pages(vaddr, tidbuf->length);
- if (!npages)
- return -EINVAL;
-
if (npages > fd->uctxt->expected_count) {
dd_dev_err(dd, "Expected buffer too big\n");
return -EINVAL;
@@ -196,7 +191,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
return pinned;
}
tidbuf->pages = pages;
- tidbuf->npages = npages;
fd->tid_n_pinned += pinned;
return pinned;
}
@@ -274,6 +268,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
mutex_init(&tidbuf->cover_mutex);
tidbuf->vaddr = tinfo->vaddr;
tidbuf->length = tinfo->length;
+ tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 7b086fe63a24..195aa9ea18b6 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1722,6 +1722,9 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
continue;
idev = in_dev_get(ip_dev);
+ if (!idev)
+ continue;
+
in_dev_for_each_ifa_rtnl(ifa, idev) {
ibdev_dbg(&iwdev->ibdev,
"CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index ea15ec77e321..54b61930a7fd 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -289,7 +289,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
/* IB ports start with 1, MANA Ethernet ports start with 0 */
port = ucmd.port;
- if (ucmd.port > mc->num_ports)
+ if (port < 1 || port > mc->num_ports)
return -EINVAL;
if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index c301b3be9f30..a2857accc427 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -276,8 +276,8 @@ iter_chunk:
size = pa_end - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
- err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ err = iommu_map_atomic(pd->domain, va_start,
+ pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
@@ -293,8 +293,8 @@ iter_chunk:
size = pa - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
- err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ err = iommu_map_atomic(pd->domain, va_start,
+ pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ac25fc80fb33..f10d4bcf87d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2200,6 +2200,14 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
rn->hca = hca;
+
+ rc = netif_set_real_num_tx_queues(dev, 1);
+ if (rc)
+ goto out;
+
+ rc = netif_set_real_num_rx_queues(dev, 1);
+ if (rc)
+ goto out;
}
priv->rn_ops = dev->netdev_ops;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index c76ba29da1e2..5adba0f754b6 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -312,9 +312,8 @@ void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
if (srv_path->kobj.state_in_sysfs) {
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
- kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
- rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index fc3758a5bc1c..53e495223ea0 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -2149,8 +2149,6 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
if (ret)
return ret;
- q->streaming = 1;
-
/*
* Tell driver to start streaming provided sufficient buffers
* are available.
@@ -2161,12 +2159,13 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
goto unprepare;
}
+ q->streaming = 1;
+
dprintk(q, 3, "successful\n");
return 0;
unprepare:
call_void_qop(q, unprepare_streaming, q);
- q->streaming = 0;
return ret;
}
EXPORT_SYMBOL_GPL(vb2_core_streamon);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index 3d3b6dc24ca6..002ea6588edf 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -150,8 +150,8 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
* then return an error.
*/
if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
- ctrl->is_new = 1;
return -ERANGE;
+ ctrl->is_new = 1;
}
return ret;
default:
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 4f9b4a18c74c..594094526648 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
- if (d) {
+ if (!IS_ERR(d)) {
bond->debug_dir = d;
} else {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
index 3585f02575df..57eeb066a945 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -48,6 +48,7 @@ mcp251xfd_ring_set_ringparam(struct net_device *ndev,
priv->rx_obj_num = layout.cur_rx;
priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
return 0;
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index c26755f662c1..f6f3b43dfb06 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -35,12 +35,13 @@ config NET_DSA_LANTIQ_GSWIP
the xrx200 / VR9 SoC.
config NET_DSA_MT7530
- tristate "MediaTek MT753x and MT7621 Ethernet switch support"
+ tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
select MEDIATEK_GE_PHY
help
- This enables support for the MediaTek MT7530, MT7531, and MT7621
- Ethernet switch chips.
+ This enables support for the MediaTek MT7530 and MT7531 Ethernet
+ switch chips. Multi-chip module MT7530 in MT7621AT, MT7621DAT,
+ MT7621ST and MT7623AI SoCs is supported.
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 908fa89444c9..338f238f2043 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1309,14 +1309,26 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
if (!priv->ports[port].pvid)
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
- }
- /* Set the port as a user port which is to be able to recognize VID
- * from incoming packets before fetching entry within the VLAN table.
- */
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
- VLAN_ATTR(MT7530_VLAN_USER) |
- PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ /* Set the port as a user port which is to be able to recognize
+ * VID from incoming packets before fetching entry within the
+ * VLAN table.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port),
+ VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER) |
+ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ } else {
+ /* Also set CPU ports to the "user" VLAN port attribute, to
+ * allow VLAN classification, but keep the EG_TAG attribute as
+ * "consistent" (i.o.w. don't change its value) for packets
+ * received by the switch from the CPU, so that tagged packets
+ * are forwarded to user ports as tagged, and untagged as
+ * untagged.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER));
+ }
}
static void
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 72e42820713d..6cda31520c42 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4627,25 +4627,26 @@ static int init_reset_optional(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to init SGMII PHY\n");
- }
- ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
- if (!ret) {
- u32 pm_info[2];
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
- ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
- pm_info, ARRAY_SIZE(pm_info));
- if (ret) {
- dev_err(&pdev->dev, "Failed to read power management information\n");
- goto err_out_phy_exit;
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
}
- ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
- if (ret)
- goto err_out_phy_exit;
- ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
- if (ret)
- goto err_out_phy_exit;
}
/* Fully reset controller at hardware level if mapped in device tree */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 3f8032947d86..027fff9f7db0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2410,6 +2410,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
cleaned = qman_p_poll_dqrr(np->p, budget);
+ if (np->xdp_act & XDP_REDIRECT)
+ xdp_do_flush();
+
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
@@ -2417,9 +2420,6 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
- if (np->xdp_act & XDP_REDIRECT)
- xdp_do_flush();
-
return cleaned;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0c35abb7d065..2e79d18fc3c7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1993,10 +1993,15 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
if (rx_cleaned >= budget ||
txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
work_done = budget;
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
goto out;
}
} while (store_cleaned);
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+
/* Update NET DIM with the values for this CDAN */
dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
ch->stats.bytes_per_cdan);
@@ -2032,9 +2037,7 @@ out:
txc_fq->dq_bytes = 0;
}
- if (ch->xdp.res & XDP_REDIRECT)
- xdp_do_flush_map();
- else if (rx_cleaned && ch->xdp.res & XDP_TX)
+ if (rx_cleaned && ch->xdp.res & XDP_TX)
dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 9349f841bd06..587ad81a2dc3 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1055,6 +1055,9 @@ static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node,
return ERR_PTR(-EPROBE_DEFER);
pcs = lynx_pcs_create(mdiodev);
+ if (!pcs)
+ mdio_device_free(mdiodev);
+
return pcs;
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2f0b604abc5e..713069f809ec 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -880,7 +880,7 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
void
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index d02b55b6aa9c..3e08847505ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -5524,7 +5524,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
* returned by the firmware is a 16 bit * value, but is indexed
* by [fls(speed) - 1]
*/
-static const u32 ice_aq_to_link_speed[15] = {
+static const u32 ice_aq_to_link_speed[] = {
SPEED_10, /* BIT(0) */
SPEED_100,
SPEED_1000,
@@ -5536,10 +5536,6 @@ static const u32 ice_aq_to_link_speed[15] = {
SPEED_40000,
SPEED_50000,
SPEED_100000, /* BIT(10) */
- 0,
- 0,
- 0,
- 0 /* BIT(14) */
};
/**
@@ -5550,5 +5546,8 @@ static const u32 ice_aq_to_link_speed[15] = {
*/
u32 ice_get_link_speed(u16 index)
{
+ if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
+ return 0;
+
return ice_aq_to_link_speed[index];
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 4f24d441c35e..0a55c552189a 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -441,7 +441,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto out;
}
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
out:
/* enable previously downed VSIs */
@@ -731,12 +731,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
/**
* ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
* @pf: pointer to the PF struct
+ * @locked: is adev device lock held
*
* Assumed caller has already disabled all VSIs before
* calling this function. Reconfiguring DCB based on
* local_dcbx_cfg.
*/
-void ice_pf_dcb_recfg(struct ice_pf *pf)
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct iidc_event *event;
@@ -783,14 +784,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- /* Notify the AUX drivers that TC change is finished */
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return;
+ if (!locked) {
+ /* Notify the AUX drivers that TC change is finished */
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
}
/**
@@ -1044,7 +1047,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
/* changes in configuration update VSI */
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
/* enable previously downed VSIs */
ice_dcb_ena_dis_vsi(pf, true, true);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 4c421c842a13..800879a88c5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
-void ice_pf_dcb_recfg(struct ice_pf *pf);
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
@@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
return 0;
}
-static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
+static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 4191994d8f3a..a359f1610fc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3641,7 +3641,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int new_rx = 0, new_tx = 0;
+ bool locked = false;
u32 curr_combined;
+ int ret = 0;
/* do not support changing channels in Safe Mode */
if (ice_is_safe_mode(pf)) {
@@ -3705,15 +3707,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- ice_vsi_recfg_qs(vsi, new_rx, new_tx);
+ if (pf->adev) {
+ mutex_lock(&pf->adev_mutex);
+ device_lock(&pf->adev->dev);
+ locked = true;
+ if (pf->adev->dev.driver) {
+ netdev_err(dev, "Cannot change channels when RDMA is active\n");
+ ret = -EBUSY;
+ goto adev_unlock;
+ }
+ }
+
+ ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
- if (!netif_is_rxfh_configured(dev))
- return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ if (!netif_is_rxfh_configured(dev)) {
+ ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ goto adev_unlock;
+ }
/* Update rss_size due to change in Rx queues */
vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
- return 0;
+adev_unlock:
+ if (locked) {
+ device_unlock(&pf->adev->dev);
+ mutex_unlock(&pf->adev_mutex);
+ }
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 237ede2cffb0..b288a01a321a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4195,12 +4195,13 @@ bool ice_is_wol_supported(struct ice_hw *hw)
* @vsi: VSI being changed
* @new_rx: new number of Rx queues
* @new_tx: new number of Tx queues
+ * @locked: is adev device_lock held
*
* Only change the number of queues if new_tx, or new_rx is non-0.
*
* Returns 0 on success.
*/
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
int err = 0, timeout = 50;
@@ -4229,7 +4230,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
ice_vsi_close(vsi);
ice_vsi_rebuild(vsi, false);
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
@@ -5540,7 +5541,7 @@ static int __init ice_module_init(void)
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
- ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 9b762f7972ce..61f844d22512 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
- goto err_free_lkup_exts;
+ goto err_unroll;
/* Group match words into recipes using preferred recipe grouping
* criteria.
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index faba0f857cd9..95f392ab9670 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -1681,7 +1681,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
struct ice_vsi *ch_vsi = NULL;
u16 queue = act->rx_queue;
- if (queue > vsi->num_rxq) {
+ if (queue >= vsi->num_rxq) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because specified queue is invalid");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index d4a4001b6e5d..f56fa94ff3d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -39,7 +39,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
-static const u32 ice_legacy_aq_to_vc_speed[15] = {
+static const u32 ice_legacy_aq_to_vc_speed[] = {
VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
VIRTCHNL_LINK_SPEED_100MB,
VIRTCHNL_LINK_SPEED_1GB,
@@ -51,10 +51,6 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */
};
/**
@@ -71,21 +67,20 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
*/
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
- u32 speed;
+ /* convert a BIT() value into an array index */
+ u32 index = fls(link_speed) - 1;
- if (adv_link_support) {
- /* convert a BIT() value into an array index */
- speed = ice_get_link_speed(fls(link_speed) - 1);
- } else {
+ if (adv_link_support)
+ return ice_get_link_speed(index);
+ else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
- speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
- }
+ return ice_legacy_aq_to_vc_speed[index];
- return speed;
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
}
/* The mailbox overflow detection algorithm helps to check if there
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index 5ecc0ee9a78e..b1ffb81893d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
/* outer VLAN ops regardless of port VLAN config */
vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
if (ice_vf_is_port_vlan_ena(vf)) {
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
@@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
} else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
+
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
} else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 44b1740dc098..1dd2a7fee8d4 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
- !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
+ !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
+ (rd32(IGC_TDH(tx_ring->reg_idx)) !=
+ readl(tx_ring->tail))) {
/* detected Tx unit hang */
netdev_err(tx_ring->netdev,
"Detected Tx Unit Hang\n"
@@ -5069,6 +5071,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * igc_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: queue number that timed out
+ **/
+static void igc_tx_timeout(struct net_device *netdev,
+ unsigned int __always_unused txqueue)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ wr32(IGC_EICS,
+ (adapter->eims_enable_mask & ~adapter->eims_other));
+}
+
+/**
* igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
@@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work)
case SPEED_100:
case SPEED_1000:
case SPEED_2500:
- adapter->tx_timeout_factor = 7;
+ adapter->tx_timeout_factor = 1;
break;
}
@@ -6320,6 +6340,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
+ .ndo_tx_timeout = igc_tx_timeout,
.ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index c34734d432e0..4e10ced736db 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -417,10 +417,12 @@ static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
*
* We need to convert the system time value stored in the RX/TXSTMP registers
* into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * Returns 0 on success.
**/
-static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
- struct skb_shared_hwtstamps *hwtstamps,
- u64 systim)
+static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
{
switch (adapter->hw.mac.type) {
case igc_i225:
@@ -430,8 +432,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
systim & 0xFFFFFFFF);
break;
default:
- break;
+ return -EINVAL;
}
+ return 0;
}
/**
@@ -652,7 +655,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
- igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval))
+ return;
switch (adapter->link_speed) {
case SPEED_10:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index bda1a6fa2ec4..e4407f09c9d3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1500,6 +1500,9 @@ static const struct devlink_param rvu_af_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
+};
+
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
"npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1556,7 +1559,6 @@ int rvu_register_dl(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
struct devlink *dl;
- size_t size;
int err;
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
@@ -1578,21 +1580,32 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
+ err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl_health;
+ }
+
/* Register exact match devlink only for CN10K-B */
- size = ARRAY_SIZE(rvu_af_dl_params);
if (!rvu_npc_exact_has_match_table(rvu))
- size -= 1;
+ goto done;
- err = devlink_params_register(dl, rvu_af_dl_params, size);
+ err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
if (err) {
dev_err(rvu->dev,
- "devlink params register failed with error %d", err);
- goto err_dl_health;
+ "devlink exact match params register failed with error %d", err);
+ goto err_dl_exact_match;
}
+done:
devlink_register(dl);
return 0;
+err_dl_exact_match:
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
err_dl_health:
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
@@ -1605,8 +1618,14 @@ void rvu_unregister_dl(struct rvu *rvu)
struct devlink *dl = rvu_dl->dl;
devlink_unregister(dl);
- devlink_params_unregister(dl, rvu_af_dl_params,
- ARRAY_SIZE(rvu_af_dl_params));
+
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
+ /* Unregister exact match devlink only for CN10K-B */
+ if (rvu_npc_exact_has_match_table(rvu))
+ devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e3de9a53b2d9..e3123723522e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1570,8 +1570,8 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
if (IS_ERR(pp))
return pp;
- err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
- id, PAGE_SIZE);
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -1870,7 +1870,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
while (done < budget) {
unsigned int pktlen, *rxdcsum;
+ bool has_hwaccel_tag = false;
struct net_device *netdev;
+ u16 vlan_proto, vlan_tci;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@@ -2010,27 +2012,29 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- if (trxd.rxd3 & RX_DMA_VTAG_V2)
- __vlan_hwaccel_put_tag(skb,
- htons(RX_DMA_VPID(trxd.rxd4)),
- RX_DMA_VID(trxd.rxd4));
+ if (trxd.rxd3 & RX_DMA_VTAG_V2) {
+ vlan_proto = RX_DMA_VPID(trxd.rxd4);
+ vlan_tci = RX_DMA_VID(trxd.rxd4);
+ has_hwaccel_tag = true;
+ }
} else if (trxd.rxd2 & RX_DMA_VTAG) {
- __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
- RX_DMA_VID(trxd.rxd3));
+ vlan_proto = RX_DMA_VPID(trxd.rxd3);
+ vlan_tci = RX_DMA_VID(trxd.rxd3);
+ has_hwaccel_tag = true;
}
}
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
- if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
- unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
+ if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
+ unsigned int port = vlan_proto & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
eth->dsa_meta[port])
skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
-
- __vlan_hwaccel_clear_tag(skb);
+ } else if (has_hwaccel_tag) {
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
}
skb_record_rx_queue(skb, 0);
@@ -3111,7 +3115,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
- if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
+ if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 18a50529ce7b..2d9186d32bc0 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -519,7 +519,7 @@
#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1)
#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2)
-#define SGMII_DUPLEX_FULL BIT(4)
+#define SGMII_DUPLEX_HALF BIT(4)
#define SGMII_IF_MODE_BIT5 BIT(5)
#define SGMII_REMOTE_FAULT_DIS BIT(8)
#define SGMII_CODE_SYNC_SET_VAL BIT(9)
@@ -1036,11 +1036,13 @@ struct mtk_soc_data {
* @regmap: The register map pointing at the range used to setup
* SGMII modes
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ * @interface: Currently configured interface mode
* @pcs: Phylink PCS structure
*/
struct mtk_pcs {
struct regmap *regmap;
u32 ana_rgc3;
+ phy_interface_t interface;
struct phylink_pcs pcs;
};
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 269208a841c7..1ff024f42444 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -615,8 +615,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
- flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
- GFP_ATOMIC);
+ flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
if (!flow_info)
return;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index ea64fac1d425..b5e432031340 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -279,7 +279,6 @@ struct mtk_flow_entry {
struct {
struct mtk_flow_entry *base_flow;
struct hlist_node list;
- struct {} end;
} l2_data;
};
struct rhash_head node;
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 5c286f2c9418..bb00de1003ac 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -43,11 +43,6 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
int advertise, link_timer;
bool changed, use_an;
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- rgc3 = RG_PHY_SPEED_3_125G;
- else
- rgc3 = 0;
-
advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
advertising);
if (advertise < 0)
@@ -88,9 +83,22 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
bmcr = 0;
}
- /* Configure the underlying interface speed */
- regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
- RG_PHY_SPEED_3_125G, rgc3);
+ if (mpcs->interface != interface) {
+ /* PHYA power down */
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
+ SGMII_PHYA_PWD, SGMII_PHYA_PWD);
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ rgc3 = RG_PHY_SPEED_3_125G;
+ else
+ rgc3 = 0;
+
+ /* Configure the underlying interface speed */
+ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
+ RG_PHY_SPEED_3_125G, rgc3);
+
+ mpcs->interface = interface;
+ }
/* Update the advertisement, noting whether it has changed */
regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
@@ -108,9 +116,17 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
- /* Release PHYA power down state */
- regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
- SGMII_PHYA_PWD, 0);
+ /* Release PHYA power down state
+ * Only removing bit SGMII_PHYA_PWD isn't enough.
+ * There are cases when the SGMII_PHYA_PWD register contains 0x9 which
+ * prevents SGMII from working. The SGMII still shows link but no traffic
+ * can flow. Writing 0x0 to the PHYA_PWD register fix the issue. 0x0 was
+ * taken from a good working state of the SGMII interface.
+ * Unknown how much the QPHY needs but it is racy without a sleep.
+ * Tested on mt7622 & mt7986.
+ */
+ usleep_range(50, 100);
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
return changed;
}
@@ -138,11 +154,11 @@ static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
else
sgm_mode = SGMII_SPEED_1000;
- if (duplex == DUPLEX_FULL)
- sgm_mode |= SGMII_DUPLEX_FULL;
+ if (duplex != DUPLEX_FULL)
+ sgm_mode |= SGMII_DUPLEX_HALF;
regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
- SGMII_DUPLEX_FULL | SGMII_SPEED_MASK,
+ SGMII_DUPLEX_HALF | SGMII_SPEED_MASK,
sgm_mode);
}
}
@@ -171,6 +187,8 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
return PTR_ERR(ss->pcs[i].regmap);
ss->pcs[i].pcs.ops = &mtk_pcs_ops;
+ ss->pcs[i].pcs.poll = true;
+ ss->pcs[i].interface = PHY_INTERFACE_MODE_NA;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 3e232a65a0c3..bb95b40d25eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -245,8 +245,9 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
pages = dev->priv.dbg.pages_debugfs;
debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
- debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
- debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
+ debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
+ debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
+ debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 21831386b26e..5b05b884b5fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
MLX5_GET(mtrc_cap, out, num_string_trace);
tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+ tracer->str_db.loaded = false;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
@@ -756,6 +757,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
+ tracer->buff.consumer_index = 0;
return err;
}
@@ -820,7 +822,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
tracer->owner = false;
- tracer->buff.consumer_index = 0;
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 464eb3a18450..cdc87ecae5d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -87,7 +87,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_host_pf_cleanup(dev);
- err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
+ err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 8099a21e674c..ce85b48d327d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -438,10 +438,6 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
- /* only handle the event on native eswtich of representor */
- if (!mlx5_esw_bridge_is_local(dev, rep, esw))
- break;
-
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1892ccb889b3..7cd36f4ac3ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -443,7 +443,7 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (fs->vlan->cvlan_filter_disabled)
+ if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
return;
fs->vlan->cvlan_filter_disabled = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index abcc614b6191..6c24f33a5ea5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -591,7 +591,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->ix = c->ix;
rq->channel = c;
rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->hw_mtu =
+ MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
rq->xdpsq = &c->rq_xdpsq;
rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
@@ -1014,35 +1015,6 @@ int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
return mlx5e_rq_to_ready(rq, curr_state);
}
-static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
-{
- struct mlx5_core_dev *mdev = rq->mdev;
-
- void *in;
- void *rqc;
- int inlen;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
-
- MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
- MLX5_SET64(modify_rq_in, in, modify_bitmask,
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
- MLX5_SET(rqc, rqc, scatter_fcs, enable);
- MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
-
- err = mlx5_core_modify_rq(mdev, rq->rqn, in);
-
- kvfree(in);
-
- return err;
-}
-
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -3314,20 +3286,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tises(priv);
}
-static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
-{
- int err = 0;
- int i;
-
- for (i = 0; i < chs->num; i++) {
- err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
int err;
@@ -3903,41 +3861,27 @@ static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
+static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool enable = *(bool *)ctx;
+
+ return mlx5e_set_rx_port_ts(mdev, enable);
+}
+
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels *chs = &priv->channels;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_params new_params;
int err;
mutex_lock(&priv->state_lock);
- if (enable) {
- err = mlx5e_set_rx_port_ts(mdev, false);
- if (err)
- goto out;
-
- chs->params.scatter_fcs_en = true;
- err = mlx5e_modify_channels_scatter_fcs(chs, true);
- if (err) {
- chs->params.scatter_fcs_en = false;
- mlx5e_set_rx_port_ts(mdev, true);
- }
- } else {
- chs->params.scatter_fcs_en = false;
- err = mlx5e_modify_channels_scatter_fcs(chs, false);
- if (err) {
- chs->params.scatter_fcs_en = true;
- goto out;
- }
- err = mlx5e_set_rx_port_ts(mdev, true);
- if (err) {
- mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
- err = 0;
- }
- }
-
-out:
+ new_params = chs->params;
+ new_params.scatter_fcs_en = enable;
+ err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
+ &new_params.scatter_fcs_en, true);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -4074,6 +4018,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_GRO_HW)
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+
return features;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index b176648d1343..3cdcb0e0b20f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1715,7 +1715,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
struct mlx5_esw_bridge *bridge;
port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
- if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
+ if (!port)
return;
bridge = port->bridge;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index eff92dc0927c..e09518f887a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -189,16 +189,16 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
}
}
-static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
+static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
{
int rate, width;
rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper);
if (rate < 0)
- return -EINVAL;
+ return SPEED_UNKNOWN;
width = mlx5_ptys_width_enum_to_int(ib_link_width_oper);
if (width < 0)
- return -EINVAL;
+ return SPEED_UNKNOWN;
return rate * width;
}
@@ -221,16 +221,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper);
- if (speed < 0)
- return -EINVAL;
+ link_ksettings->base.speed = speed;
+ link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL;
- link_ksettings->base.duplex = DUPLEX_FULL;
link_ksettings->base.port = PORT_OTHER;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
- link_ksettings->base.speed = speed;
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3d5f2a4b1fed..4e1b5757528a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -2110,7 +2110,7 @@ static int __init mlx5_init(void)
mlx5_core_verify_params();
mlx5_register_debugfs();
- err = pci_register_driver(&mlx5_core_driver);
+ err = mlx5e_init();
if (err)
goto err_debug;
@@ -2118,16 +2118,16 @@ static int __init mlx5_init(void)
if (err)
goto err_sf;
- err = mlx5e_init();
+ err = pci_register_driver(&mlx5_core_driver);
if (err)
- goto err_en;
+ goto err_pci;
return 0;
-err_en:
+err_pci:
mlx5_sf_driver_unregister();
err_sf:
- pci_unregister_driver(&mlx5_core_driver);
+ mlx5e_cleanup();
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -2135,9 +2135,9 @@ err_debug:
static void __exit mlx5_cleanup(void)
{
- mlx5e_cleanup();
- mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
+ mlx5_sf_driver_unregister();
+ mlx5e_cleanup();
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 60596357bfc7..0eb50be175cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function)
return (u32)func_id | (ec_function << 16);
}
+static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
+{
+ if (!func_id)
+ return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
+
+ return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
+}
+
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
@@ -332,6 +340,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
int notify_fail = event;
+ u16 func_type;
u64 addr;
int err;
u32 *in;
@@ -383,11 +392,9 @@ retry:
goto out_dropped;
}
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] += npages;
dev->priv.fw_pages += npages;
- if (func_id)
- dev->priv.vfs_pages += npages;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
@@ -414,6 +421,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
struct rb_root *root;
struct rb_node *p;
int npages = 0;
+ u16 func_type;
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
@@ -428,11 +436,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
free_fwp(dev, fwp, fwp->free_count);
}
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] -= npages;
dev->priv.fw_pages -= npages;
- if (func_id)
- dev->priv.vfs_pages -= npages;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id);
@@ -498,6 +504,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
+ u16 func_type;
u32 *out;
int err;
int i;
@@ -549,11 +556,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
if (nclaimed)
*nclaimed = num_claimed;
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] -= num_claimed;
dev->priv.fw_pages -= num_claimed;
- if (func_id)
- dev->priv.vfs_pages -= num_claimed;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages -= num_claimed;
out_free:
kvfree(out);
@@ -706,12 +711,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages);
- WARN(dev->priv.vfs_pages,
+ WARN(dev->priv.page_counters[MLX5_VF],
"VFs FW pages counter is %d after reclaiming all pages\n",
- dev->priv.vfs_pages);
- WARN(dev->priv.host_pf_pages,
+ dev->priv.page_counters[MLX5_VF]);
+ WARN(dev->priv.page_counters[MLX5_HOST_PF],
"External host PF FW pages counter is %d after reclaiming all pages\n",
- dev->priv.host_pf_pages);
+ dev->priv.page_counters[MLX5_HOST_PF]);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index c0e6c487c63c..3008e9ce2bbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
- if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
+ if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b851141e03de..042ca0349124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -1138,12 +1138,14 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
rule->flow_source))
return 0;
+ mlx5dr_domain_nic_lock(nic_dmn);
+
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
- return ret;
+ goto err_unlock;
hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
if (likely(hw_ste_arr_is_opt)) {
@@ -1152,12 +1154,12 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
DR_STE_SIZE, GFP_KERNEL);
- if (!hw_ste_arr)
- return -ENOMEM;
+ if (!hw_ste_arr) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
}
- mlx5dr_domain_nic_lock(nic_dmn);
-
ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
if (ret)
goto free_hw_ste;
@@ -1223,7 +1225,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_unlock(nic_dmn);
- goto out;
+ if (unlikely(!hw_ste_arr_is_opt))
+ kfree(hw_ste_arr);
+
+ return 0;
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
@@ -1238,12 +1243,12 @@ remove_from_nic_tbl:
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste:
- mlx5dr_domain_nic_unlock(nic_dmn);
-
-out:
- if (unlikely(!hw_ste_arr_is_opt))
+ if (!hw_ste_arr_is_opt)
kfree(hw_ste_arr);
+err_unlock:
+ mlx5dr_domain_nic_unlock(nic_dmn);
+
return ret;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 5314c064ceae..55b484b10562 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -608,12 +608,12 @@ allocate_new:
lan966x_fdma_rx_reload(rx);
}
- if (counter < weight && napi_complete_done(napi, counter))
- lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
-
if (redirect)
xdp_do_flush();
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
return counter;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 0ed1ea7727c5..69e76634f9aa 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -633,7 +633,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
/* Enable master counters */
spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
- for (i = 0; i < sparx5->port_count; i++) {
+ for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
@@ -649,7 +649,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
- for (i = 0; i < sparx5->port_count; i++) {
+ for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index b144f2237748..f9b8f372ec8a 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1217,9 +1217,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_irq_context *gic;
- unsigned int max_irqs;
- u16 *cpus;
- cpumask_var_t req_mask;
+ unsigned int max_irqs, cpu;
int nvec, irq;
int err, i = 0, j;
@@ -1240,21 +1238,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq_vector;
}
- if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
- err = -ENOMEM;
- goto free_irq;
- }
-
- cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
- if (!cpus) {
- err = -ENOMEM;
- goto free_mask;
- }
- for (i = 0; i < nvec; i++)
- cpus[i] = cpumask_local_spread(i, gc->numa_node);
-
for (i = 0; i < nvec; i++) {
- cpumask_set_cpu(cpus[i], req_mask);
gic = &gc->irq_contexts[i];
gic->handler = NULL;
gic->arg = NULL;
@@ -1269,17 +1253,16 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq = pci_irq_vector(pdev, i);
if (irq < 0) {
err = irq;
- goto free_mask;
+ goto free_irq;
}
err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
if (err)
- goto free_mask;
- irq_set_affinity_and_hint(irq, req_mask);
- cpumask_clear(req_mask);
+ goto free_irq;
+
+ cpu = cpumask_local_spread(i, gc->numa_node);
+ irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}
- free_cpumask_var(req_mask);
- kfree(cpus);
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
if (err)
@@ -1290,13 +1273,12 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
return 0;
-free_mask:
- free_cpumask_var(req_mask);
- kfree(cpus);
free_irq:
for (j = i - 1; j >= 0; j--) {
irq = pci_irq_vector(pdev, j);
gic = &gc->irq_contexts[j];
+
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
@@ -1324,6 +1306,9 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
continue;
gic = &gc->irq_contexts[i];
+
+ /* Need to clear the hint before free_irq */
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 7c0897e779dc..ee052404eb55 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -605,6 +605,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
flow_rule_match_control(rule, &match);
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ filter->key_type = OCELOT_VCAP_KEY_ANY;
+ filter->vlan.vid.value = match.key->vlan_id;
+ filter->vlan.vid.mask = match.mask->vlan_id;
+ filter->vlan.pcp.value[0] = match.key->vlan_priority;
+ filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ match_protocol = false;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -737,18 +749,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
match_protocol = false;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_match_vlan match;
-
- flow_rule_match_vlan(rule, &match);
- filter->key_type = OCELOT_VCAP_KEY_ANY;
- filter->vlan.vid.value = match.key->vlan_id;
- filter->vlan.vid.mask = match.mask->vlan_id;
- filter->vlan.pcp.value[0] = match.key->vlan_priority;
- filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
- match_protocol = false;
- }
-
finished_key_parsing:
if (match_protocol && proto != ETH_P_ALL) {
if (filter->block_id == VCAP_ES0) {
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 1a82f10c8853..2180ae94c744 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -335,8 +335,8 @@ static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -355,8 +355,8 @@ static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index a8678d5612ee..060a77f2265d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -460,6 +460,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
sizeof(struct nfp_tun_neigh_v4);
unsigned long cookie = (unsigned long)neigh;
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_neigh_lag lag_info;
struct nfp_neigh_entry *nn_entry;
u32 port_id;
u8 mtype;
@@ -468,6 +469,11 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
if (!port_id)
return;
+ if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) {
+ memset(&lag_info, 0, sizeof(struct nfp_tun_neigh_lag));
+ nfp_flower_lag_get_info_from_netdev(app, netdev, &lag_info);
+ }
+
spin_lock_bh(&priv->predt_lock);
nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie,
neigh_table_params);
@@ -515,7 +521,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_ha_snapshot(common->dst_addr, neigh, netdev);
if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT)
- nfp_flower_lag_get_info_from_netdev(app, netdev, lag);
+ memcpy(lag, &lag_info, sizeof(struct nfp_tun_neigh_lag));
common->port_id = cpu_to_be32(port_id);
if (rhashtable_insert_fast(&priv->neigh_table,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index a4a89ef3f18b..cc97b3d00414 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -293,35 +293,131 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
}
}
-static const u16 nfp_eth_media_table[] = {
- [NFP_MEDIA_1000BASE_CX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- [NFP_MEDIA_1000BASE_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- [NFP_MEDIA_10GBASE_KX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- [NFP_MEDIA_10GBASE_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- [NFP_MEDIA_10GBASE_CX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- [NFP_MEDIA_10GBASE_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- [NFP_MEDIA_10GBASE_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- [NFP_MEDIA_10GBASE_ER] = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
- [NFP_MEDIA_25GBASE_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- [NFP_MEDIA_25GBASE_KR_S] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- [NFP_MEDIA_25GBASE_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- [NFP_MEDIA_25GBASE_CR_S] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- [NFP_MEDIA_25GBASE_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- [NFP_MEDIA_40GBASE_CR4] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- [NFP_MEDIA_40GBASE_KR4] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- [NFP_MEDIA_40GBASE_SR4] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- [NFP_MEDIA_40GBASE_LR4] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- [NFP_MEDIA_50GBASE_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
- [NFP_MEDIA_50GBASE_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
- [NFP_MEDIA_50GBASE_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
- [NFP_MEDIA_50GBASE_LR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_50GBASE_ER] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_50GBASE_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_100GBASE_KR4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- [NFP_MEDIA_100GBASE_SR4] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- [NFP_MEDIA_100GBASE_CR4] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- [NFP_MEDIA_100GBASE_KP4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- [NFP_MEDIA_100GBASE_CR10] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+static const struct nfp_eth_media_link_mode {
+ u16 ethtool_link_mode;
+ u16 speed;
+} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
+ [NFP_MEDIA_1000BASE_CX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_1000BASE_KX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_10GBASE_KX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_25GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_KR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_40GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_LR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_50GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_FR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_100GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_KP4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR10] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+};
+
+static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
+ [NFP_SPEED_1G] = SPEED_1000,
+ [NFP_SPEED_10G] = SPEED_10000,
+ [NFP_SPEED_25G] = SPEED_25000,
+ [NFP_SPEED_40G] = SPEED_40000,
+ [NFP_SPEED_50G] = SPEED_50000,
+ [NFP_SPEED_100G] = SPEED_100000,
};
static void nfp_add_media_link_mode(struct nfp_port *port,
@@ -334,8 +430,12 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
};
struct nfp_cpp *cpp = port->app->cpp;
- if (nfp_eth_read_media(cpp, &ethm))
+ if (nfp_eth_read_media(cpp, &ethm)) {
+ bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
return;
+ }
+
+ bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
for (u32 i = 0; i < 2; i++) {
supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
@@ -344,20 +444,26 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
if (i < 64) {
- if (supported_modes[0] & BIT_ULL(i))
- __set_bit(nfp_eth_media_table[i],
+ if (supported_modes[0] & BIT_ULL(i)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
if (advertised_modes[0] & BIT_ULL(i))
- __set_bit(nfp_eth_media_table[i],
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
} else {
- if (supported_modes[1] & BIT_ULL(i - 64))
- __set_bit(nfp_eth_media_table[i],
+ if (supported_modes[1] & BIT_ULL(i - 64)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
if (advertised_modes[1] & BIT_ULL(i - 64))
- __set_bit(nfp_eth_media_table[i],
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
}
}
@@ -468,6 +574,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed != SPEED_UNKNOWN) {
u32 speed = cmd->base.speed / eth_port->lanes;
+ bool is_supported = false;
+
+ for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
+ if (cmd->base.speed == nfp_eth_speed_map[i] &&
+ test_bit(i, port->speed_bitmap)) {
+ is_supported = true;
+ break;
+ }
+ }
+
+ if (!is_supported) {
+ netdev_err(netdev, "Speed %u is not supported.\n",
+ cmd->base.speed);
+ err = -EINVAL;
+ goto err_bad_set;
+ }
if (req_aneg) {
netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index f8cd157ca1d7..9c04f9f0e2c9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -38,6 +38,16 @@ enum nfp_port_flags {
NFP_PORT_CHANGED = 0,
};
+enum {
+ NFP_SPEED_1G,
+ NFP_SPEED_10G,
+ NFP_SPEED_25G,
+ NFP_SPEED_40G,
+ NFP_SPEED_50G,
+ NFP_SPEED_100G,
+ NFP_SUP_SPEED_NUMBER
+};
+
/**
* struct nfp_port - structure representing NFP port
* @netdev: backpointer to associated netdev
@@ -52,6 +62,7 @@ enum nfp_port_flags {
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
* @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
+ * @speed_bitmap: for %NFP_PORT_PHYS_PORT supported speed bitmap
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
* @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
@@ -78,6 +89,7 @@ struct nfp_port {
bool eth_forced;
struct nfp_eth_table_port *eth_port;
u8 __iomem *eth_stats;
+ DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER);
};
/* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
struct {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 626b9113e7c4..d911f4fd9af6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -708,9 +708,16 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
q->lif->index, q->name, q->hw_type, q->hw_index,
q->head_idx, ring_doorbell);
- if (ring_doorbell)
+ if (ring_doorbell) {
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
+
+ q->dbell_jiffies = jiffies;
+
+ if (q_to_qcq(q)->napi_qcq)
+ mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
+ }
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 2a1d7b9c07e7..bce3ca38669b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -25,6 +25,12 @@
#define IONIC_DEV_INFO_REG_COUNT 32
#define IONIC_DEV_CMD_REG_COUNT 32
+#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */
+#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
+#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
+#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
+#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
+
struct ionic_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
@@ -216,6 +222,8 @@ struct ionic_queue {
struct ionic_lif *lif;
struct ionic_desc_info *info;
u64 dbval;
+ unsigned long dbell_deadline;
+ unsigned long dbell_jiffies;
u16 head_idx;
u16 tail_idx;
unsigned int index;
@@ -361,4 +369,8 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
+bool ionic_txq_poke_doorbell(struct ionic_queue *q);
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q);
+
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 4dd16c487f2b..63a78a9ac241 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -16,6 +16,7 @@
#include "ionic.h"
#include "ionic_bus.h"
+#include "ionic_dev.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
#include "ionic_ethtool.h"
@@ -200,6 +201,13 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
}
}
+static void ionic_napi_deadline(struct timer_list *timer)
+{
+ struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
+
+ napi_schedule(&qcq->napi);
+}
+
static irqreturn_t ionic_isr(int irq, void *data)
{
struct napi_struct *napi = data;
@@ -269,6 +277,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
.oper = IONIC_Q_ENABLE,
},
};
+ int ret;
idev = &lif->ionic->idev;
dev = lif->ionic->dev;
@@ -276,16 +285,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ if (qcq->flags & IONIC_QCQ_F_INTR)
+ ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
+
+ ret = ionic_adminq_post_wait(lif, &ctx);
+ if (ret)
+ return ret;
+
+ if (qcq->napi.poll)
+ napi_enable(&qcq->napi);
+
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
- napi_enable(&qcq->napi);
- ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
- return ionic_adminq_post_wait(lif, &ctx);
+ return 0;
}
static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
@@ -316,6 +333,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
synchronize_irq(qcq->intr.vector);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
+ del_timer_sync(&qcq->napi_deadline);
}
/* If there was a previous fw communcation error, don't bother with
@@ -451,6 +469,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
+ n_qcq->napi_qcq = src_qcq->napi_qcq;
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -564,13 +583,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
if (flags & IONIC_QCQ_F_NOTIFYQ) {
- int q_size, cq_size;
+ int q_size;
- /* q & cq need to be contiguous in case of notifyq */
+ /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
+ * and don't alloc qc. We leave new->qc_size and new->qc_base
+ * as 0 to be sure we don't try to free it later.
+ */
q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
- cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
-
- new->q_size = PAGE_SIZE + q_size + cq_size;
+ new->q_size = PAGE_SIZE + q_size +
+ ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
new->q_base = dma_alloc_coherent(dev, new->q_size,
&new->q_base_pa, GFP_KERNEL);
if (!new->q_base) {
@@ -773,8 +794,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -828,11 +855,17 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
+ q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
qcq->flags |= IONIC_QCQ_F_INITED;
return 0;
@@ -1150,6 +1183,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev = &lif->ionic->idev;
unsigned long irqflags;
unsigned int flags = 0;
+ bool resched = false;
int rx_work = 0;
int tx_work = 0;
int n_work = 0;
@@ -1187,6 +1221,16 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
}
+ if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
+ resched = true;
+ if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
+ resched = true;
+ if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
+ resched = true;
+ if (resched)
+ mod_timer(&lif->adminqcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
@@ -3245,8 +3289,14 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
+ q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index a53984bf3544..734519895614 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -74,8 +74,10 @@ struct ionic_qcq {
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
+ struct timer_list napi_deadline;
struct napi_struct napi;
unsigned int flags;
+ struct ionic_qcq *napi_qcq;
struct dentry *dentry;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index a13530ec4dd8..08c42b039d92 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -289,6 +289,35 @@ static void ionic_adminq_cb(struct ionic_queue *q,
complete_all(&ctx->work);
}
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
+{
+ struct ionic_lif *lif = q->lif;
+ unsigned long now, then, dif;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&lif->adminq_lock, irqflags);
+
+ if (q->tail_idx == q->head_idx) {
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+ return false;
+ }
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+ }
+
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+
+ return true;
+}
+
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
struct ionic_desc_info *desc_info;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 0c3977416cd1..f761780f0162 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -22,6 +22,67 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
+bool ionic_txq_poke_doorbell(struct ionic_queue *q)
+{
+ unsigned long now, then, dif;
+ struct netdev_queue *netdev_txq;
+ struct net_device *netdev;
+
+ netdev = q->lif->netdev;
+ netdev_txq = netdev_get_tx_queue(netdev, q->index);
+
+ HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
+
+ if (q->tail_idx == q->head_idx) {
+ HARD_TX_UNLOCK(netdev, netdev_txq);
+ return false;
+ }
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+ }
+
+ HARD_TX_UNLOCK(netdev, netdev_txq);
+
+ return true;
+}
+
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
+{
+ unsigned long now, then, dif;
+
+ /* no lock, called from rx napi or txrx napi, nothing else can fill */
+
+ if (q->tail_idx == q->head_idx)
+ return false;
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+
+ dif = 2 * q->dbell_deadline;
+ if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
+ dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
+
+ q->dbell_deadline = dif;
+ }
+
+ return true;
+}
+
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
{
return netdev_get_tx_queue(q->lif->netdev, q->index);
@@ -424,6 +485,12 @@ void ionic_rx_fill(struct ionic_queue *q)
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
+
+ q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
+ mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
}
void ionic_rx_empty(struct ionic_queue *q)
@@ -511,6 +578,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
+ if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
+ mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
@@ -544,23 +614,29 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
+ if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
+ mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
int ionic_txrx_napi(struct napi_struct *napi, int budget)
{
- struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_qcq *rxqcq = napi_to_qcq(napi);
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
+ struct ionic_qcq *txqcq;
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
+ bool resched = false;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
+ txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
@@ -572,7 +648,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
- ionic_dim_update(qcq, 0);
+ ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
rxcq->bound_intr->rearm_count++;
}
@@ -583,6 +659,13 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
+ if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
+ resched = true;
+ if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
+ resched = true;
+ if (resched)
+ mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return rx_work_done;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 7c2af482192d..cb1746bc0e0c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1438,6 +1438,10 @@ int qede_poll(struct napi_struct *napi, int budget)
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
+
+ if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+ xdp_do_flush();
+
/* Handle case where we are called by netpoll with a budget of 0 */
if (rx_work_done < budget || !budget) {
if (!qede_poll_is_more_work(fp)) {
@@ -1457,9 +1461,6 @@ int qede_poll(struct napi_struct *napi, int budget)
qede_update_tx_producer(fp->xdp_tx);
}
- if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
- xdp_do_flush_map();
-
return rx_work_done;
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0556542d7a6b..3a86f1213a05 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1003,8 +1003,11 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
/* Determine netdevice features */
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
- if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
+ if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
net_dev->features |= NETIF_F_TSO6;
+ if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
+ net_dev->hw_enc_features |= NETIF_F_TSO6;
+ }
/* Check whether device supports TSO */
if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
net_dev->features &= ~NETIF_F_ALL_TSO;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 835caa15d55f..732774645c1a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -560,6 +560,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->has_gmac4 = 1;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
+ plat_dat->rx_clk_runs_in_lpi = 1;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b7e5af58ab75..1a5b8dab5e9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1080,7 +1080,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
- priv->eee_active = phy_init_eee(phy, 1) >= 0;
+ priv->eee_active =
+ phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9352dad58996..79f4e13620a4 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -987,9 +987,6 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
void netvsc_dma_unmap(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
int i;
if (!hv_is_isolation_supported())
@@ -998,7 +995,7 @@ void netvsc_dma_unmap(struct hv_device *hv_dev,
if (!packet->dma_range)
return;
- for (i = 0; i < page_count; i++)
+ for (i = 0; i < packet->page_buf_cnt; i++)
dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
packet->dma_range[i].mapping_size,
DMA_TO_DEVICE);
@@ -1028,9 +1025,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->page_buf_cnt;
dma_addr_t dma;
int i;
@@ -1039,7 +1034,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
packet->dma_range = kcalloc(page_count,
sizeof(*packet->dma_range),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!packet->dma_range)
return -ENOMEM;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index a6f05e35d91f..b7cb71817780 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -233,7 +233,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- if (!dp83822->fx_enabled)
+ /* Private data pointer is NULL on DP83825/26 */
+ if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
DP83822_SPEED_CHANGED_INT_EN;
@@ -253,7 +254,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- if (!dp83822->fx_enabled)
+ /* Private data pointer is NULL on DP83825/26 */
+ if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index c49062ad72c6..a6015cd03bff 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -261,6 +261,8 @@ static struct phy_driver meson_gxl_phy[] = {
.handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x01803301),
.name = "Meson G12A Internal PHY",
@@ -271,6 +273,8 @@ static struct phy_driver meson_gxl_phy[] = {
.handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
},
};
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 716870a4499c..607aa786c8cb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1517,7 +1517,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* another mac interface, so we should create a device link between
* phy dev and mac dev.
*/
- if (phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent)
+ if (dev && phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent)
phydev->devlink = device_link_add(dev->dev.parent, &phydev->mdio.dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 09cc65c0da93..4d2519cdb801 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1812,10 +1812,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
- if (ret) {
- phy_device_free(phy_dev);
+ phy_device_free(phy_dev);
+ if (ret)
return ret;
- }
ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 2c82fbcaab22..7a2b0094de51 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -57,9 +57,7 @@
static inline int
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
{
- return usbnet_read_cmd(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR |
- USB_RECIP_DEVICE,
+ return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
val, index, NULL, 0);
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 18b3de854aeb..61e33e4dd0cd 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1677,13 +1677,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
received = virtnet_receive(rq, budget, &xdp_xmit);
+ if (xdp_xmit & VIRTIO_XDP_REDIR)
+ xdp_do_flush();
+
/* Out of packets? */
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
- if (xdp_xmit & VIRTIO_XDP_REDIR)
- xdp_do_flush();
-
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_get_sq(vi);
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
@@ -2158,8 +2158,8 @@ static int virtnet_close(struct net_device *dev)
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
- xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
napi_disable(&vi->rq[i].napi);
+ xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
index 7eff3531b9a5..7ff33c1d6ac7 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
@@ -152,6 +152,15 @@ static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
}
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data)
+{
+ struct dpmaif_isr_para *isr_para = data;
+ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
+
t7xx_dpmaif_irq_cb(isr_para);
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
return IRQ_HANDLED;
@@ -188,7 +197,7 @@ static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
- t7xx_dev->intr_thread[int_type] = NULL;
+ t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread;
t7xx_dev->callback_param[int_type] = isr_para;
t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index aa2174a10437..f4ff2198b5ef 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -840,14 +840,13 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
return work_done;
}
- if (!rxq->sleep_lock_pending) {
- pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev);
+ if (!rxq->sleep_lock_pending)
t7xx_pci_disable_sleep(t7xx_dev);
- }
ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
if (!ret) {
@@ -876,22 +875,22 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
napi_complete_done(napi, work_done);
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
+ atomic_set(&rxq->rx_processing, 0);
} else {
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
}
- t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
- pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
- pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev);
- atomic_set(&rxq->rx_processing, 0);
-
return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
{
struct dpmaif_rx_queue *rxq;
- int qno;
+ struct dpmaif_ctrl *ctrl;
+ int qno, ret;
qno = ffs(que_mask) - 1;
if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
@@ -900,6 +899,18 @@ void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int
}
rxq = &dpmaif_ctrl->rxq[qno];
+ ctrl = rxq->dpmaif_ctrl;
+ /* We need to make sure that the modem has been resumed before
+ * calling napi. This can't be done inside the polling function
+ * as we could be blocked waiting for device to be resumed,
+ * which can't be done from softirq context the poll function
+ * is running in.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
+ return;
+ }
napi_schedule(&rxq->napi);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index 494a28e386a3..3ef4a8a4f8fd 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
@@ -45,12 +46,25 @@
static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
{
- int i;
+ struct dpmaif_ctrl *ctrl;
+ int i, ret;
+
+ ctrl = ctlb->hif_ctrl;
if (ctlb->is_napi_en)
return;
for (i = 0; i < RXQ_NUM; i++) {
+ /* The usage count has to be bumped every time before calling
+ * napi_schedule. It will be decresed in the poll routine,
+ * right after napi_complete_done is called.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n",
+ ret);
+ return;
+ }
napi_enable(ctlb->napi[i]);
napi_schedule(ctlb->napi[i]);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 871f2a27a398..226fc1703e90 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -121,6 +121,8 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+ pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
+ pm_runtime_allow(&t7xx_dev->pdev->dev);
pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
}
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 79d93126453d..77b06d54cc62 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -102,6 +102,25 @@ config NVDIMM_KEYS
depends on ENCRYPTED_KEYS
depends on (LIBNVDIMM=ENCRYPTED_KEYS) || LIBNVDIMM=m
+config NVDIMM_KMSAN
+ bool
+ depends on KMSAN
+ help
+ KMSAN, and other memory debug facilities, increase the size of
+ 'struct page' to contain extra metadata. This collides with
+ the NVDIMM capability to store a potentially
+ larger-than-"System RAM" size 'struct page' array in a
+ reservation of persistent memory rather than limited /
+ precious DRAM. However, that reservation needs to persist for
+ the life of the given NVDIMM namespace. If you are using KMSAN
+ to debug an issue unrelated to NVDIMMs or DAX then say N to this
+ option. Otherwise, say Y but understand that any namespaces
+ (with the page array stored pmem) created with this build of
+ the kernel will permanently reserve and strand excess
+ capacity compared to the CONFIG_KMSAN=n case.
+
+ Select N if unsure.
+
config NVDIMM_TEST_BUILD
tristate "Build the unit test core"
depends on m
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 85ca5b4da3cf..ec5219680092 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
/* max struct page size independent of kernel config */
-#define MAX_STRUCT_PAGE_SIZE 128
+#define MAX_STRUCT_PAGE_SIZE 64
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 61af072ac98f..af7d9301520c 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -13,6 +13,8 @@
#include "pfn.h"
#include "nd.h"
+static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
+
static void nd_pfn_release(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -758,12 +760,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
return -ENXIO;
}
- /*
- * Note, we use 64 here for the standard size of struct page,
- * debugging options may cause it to be larger in which case the
- * implementation will limit the pfns advertised through
- * ->direct_access() to those that are included in the memmap.
- */
start = nsio->res.start;
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
@@ -782,20 +778,33 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
}
end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
+ unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
+
/*
* The altmap should be padded out to the block size used
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*
- * Also make sure size of struct page is less than 128. We
- * want to make sure we use large enough size here so that
- * we don't have a dynamic reserve space depending on
- * struct page size. But we also want to make sure we notice
- * when we end up adding new elements to struct page.
+ * Also make sure size of struct page is less than
+ * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
+ * face of production kernel configurations that reduce the
+ * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
+ * kernel configurations that increase the 'struct page' size
+ * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
+ * for continuing with the capacity that will be wasted when
+ * reverting to a production kernel configuration. Otherwise,
+ * those configurations are blocked by default.
*/
- BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
- offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
- - start;
+ if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
+ if (page_struct_override)
+ page_map_size = sizeof(struct page) * npfns;
+ else {
+ dev_err(&nd_pfn->dev,
+ "Memory debug options prevent using pmem for the page map\n");
+ return -EINVAL;
+ }
+ }
+ offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(start + SZ_8K, align) - start;
else
@@ -818,7 +827,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
pfn_sb->version_minor = cpu_to_le16(4);
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
- pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
+ if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
+ pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
+ else
+ pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 4424f53a8a0a..bdb97496ba2d 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -45,6 +45,8 @@ struct nvme_dhchap_queue_context {
int sess_key_len;
};
+static struct workqueue_struct *nvme_auth_wq;
+
#define nvme_auth_flags_from_qid(qid) \
(qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
#define nvme_auth_queue_from_qid(ctrl, qid) \
@@ -866,7 +868,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
chap = &ctrl->dhchap_ctxs[qid];
cancel_work_sync(&chap->auth_work);
- queue_work(nvme_wq, &chap->auth_work);
+ queue_work(nvme_auth_wq, &chap->auth_work);
return 0;
}
EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
@@ -1008,10 +1010,15 @@ EXPORT_SYMBOL_GPL(nvme_auth_free);
int __init nvme_init_auth(void)
{
+ nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_auth_wq)
+ return -ENOMEM;
+
nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!nvme_chap_buf_cache)
- return -ENOMEM;
+ goto err_destroy_workqueue;
nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
mempool_free_slab, nvme_chap_buf_cache);
@@ -1021,6 +1028,8 @@ int __init nvme_init_auth(void)
return 0;
err_destroy_chap_buf_cache:
kmem_cache_destroy(nvme_chap_buf_cache);
+err_destroy_workqueue:
+ destroy_workqueue(nvme_auth_wq);
return -ENOMEM;
}
@@ -1028,4 +1037,5 @@ void __exit nvme_exit_auth(void)
{
mempool_destroy(nvme_chap_buf_pool);
kmem_cache_destroy(nvme_chap_buf_cache);
+ destroy_workqueue(nvme_auth_wq);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 505e16f20e57..8b6421141162 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4921,7 +4921,9 @@ out_cleanup_admin_q:
blk_mq_destroy_queue(ctrl->admin_q);
blk_put_queue(ctrl->admin_q);
out_free_tagset:
- blk_mq_free_tag_set(ctrl->admin_tagset);
+ blk_mq_free_tag_set(set);
+ ctrl->admin_q = NULL;
+ ctrl->fabrics_q = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
@@ -4983,6 +4985,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
out_free_tag_set:
blk_mq_free_tag_set(set);
+ ctrl->connect_q = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index ab2627e17bb9..1ab6601fdd5c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
else {
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
be16_to_cpu(rqst->assoc_cmd.sqsize));
- if (!queue)
+ if (!queue) {
ret = VERR_QUEUE_ALLOC_FAIL;
+ nvmet_fc_tgt_a_put(iod->assoc);
+ }
}
}
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 34130449f2d2..39aa27942f28 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -98,6 +98,9 @@ static int brcm_nvram_parse(struct brcm_nvram *priv)
len = le32_to_cpu(header.len);
data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy_fromio(data, priv->base, len);
data[len - 1] = '\0';
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 321d7d63e068..34ee9d36ee7b 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -770,31 +770,32 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
return ERR_PTR(rval);
}
- if (config->wp_gpio)
- nvmem->wp_gpio = config->wp_gpio;
- else if (!config->ignore_wp)
+ nvmem->id = rval;
+
+ nvmem->dev.type = &nvmem_provider_type;
+ nvmem->dev.bus = &nvmem_bus_type;
+ nvmem->dev.parent = config->dev;
+
+ device_initialize(&nvmem->dev);
+
+ if (!config->ignore_wp)
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
if (IS_ERR(nvmem->wp_gpio)) {
- ida_free(&nvmem_ida, nvmem->id);
rval = PTR_ERR(nvmem->wp_gpio);
- kfree(nvmem);
- return ERR_PTR(rval);
+ nvmem->wp_gpio = NULL;
+ goto err_put_device;
}
kref_init(&nvmem->refcnt);
INIT_LIST_HEAD(&nvmem->cells);
- nvmem->id = rval;
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
nvmem->owner = config->dev->driver->owner;
nvmem->stride = config->stride ?: 1;
nvmem->word_size = config->word_size ?: 1;
nvmem->size = config->size;
- nvmem->dev.type = &nvmem_provider_type;
- nvmem->dev.bus = &nvmem_bus_type;
- nvmem->dev.parent = config->dev;
nvmem->root_only = config->root_only;
nvmem->priv = config->priv;
nvmem->type = config->type;
@@ -822,11 +823,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
break;
}
- if (rval) {
- ida_free(&nvmem_ida, nvmem->id);
- kfree(nvmem);
- return ERR_PTR(rval);
- }
+ if (rval)
+ goto err_put_device;
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
@@ -835,28 +833,22 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->dev.groups = nvmem_dev_groups;
#endif
- dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
-
- rval = device_register(&nvmem->dev);
- if (rval)
- goto err_put_device;
-
if (nvmem->nkeepout) {
rval = nvmem_validate_keepouts(nvmem);
if (rval)
- goto err_device_del;
+ goto err_put_device;
}
if (config->compat) {
rval = nvmem_sysfs_setup_compat(nvmem, config);
if (rval)
- goto err_device_del;
+ goto err_put_device;
}
if (config->cells) {
rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
if (rval)
- goto err_teardown_compat;
+ goto err_remove_cells;
}
rval = nvmem_add_cells_from_table(nvmem);
@@ -867,17 +859,20 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
+ dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+
+ rval = device_add(&nvmem->dev);
+ if (rval)
+ goto err_remove_cells;
+
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
return nvmem;
err_remove_cells:
nvmem_device_remove_all_cells(nvmem);
-err_teardown_compat:
if (config->compat)
nvmem_sysfs_remove_compat(nvmem, config);
-err_device_del:
- device_del(&nvmem->dev);
err_put_device:
put_device(&nvmem->dev);
@@ -1242,16 +1237,21 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
if (!cell_np)
return ERR_PTR(-ENOENT);
- nvmem_np = of_get_next_parent(cell_np);
- if (!nvmem_np)
+ nvmem_np = of_get_parent(cell_np);
+ if (!nvmem_np) {
+ of_node_put(cell_np);
return ERR_PTR(-EINVAL);
+ }
nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
of_node_put(nvmem_np);
- if (IS_ERR(nvmem))
+ if (IS_ERR(nvmem)) {
+ of_node_put(cell_np);
return ERR_CAST(nvmem);
+ }
cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
+ of_node_put(cell_np);
if (!cell_entry) {
__nvmem_device_put(nvmem);
return ERR_PTR(-ENOENT);
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index 4fcb63507ecd..8499892044b7 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -166,6 +166,7 @@ static const struct of_device_id sdam_match_table[] = {
{ .compatible = "qcom,spmi-sdam" },
{},
};
+MODULE_DEVICE_TABLE(of, sdam_match_table);
static struct platform_driver sdam_driver = {
.driver = {
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 5750e1f4bcdb..92dfe4cb10e3 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -41,8 +41,21 @@ static int sunxi_sid_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct sunxi_sid *sid = context;
+ u32 word;
+
+ /* .stride = 4 so offset is guaranteed to be aligned */
+ __ioread32_copy(val, sid->base + sid->value_offset + offset, bytes / 4);
- memcpy_fromio(val, sid->base + sid->value_offset + offset, bytes);
+ val += round_down(bytes, 4);
+ offset += round_down(bytes, 4);
+ bytes = bytes % 4;
+
+ if (!bytes)
+ return 0;
+
+ /* Handle any trailing bytes */
+ word = readl_relaxed(sid->base + sid->value_offset + offset);
+ memcpy(val, &word, bytes);
return 0;
}
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c34ac33b7338..67763e5b8c0e 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -965,8 +965,19 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
}
of_dma_range_parser_init(&parser, node);
- for_each_of_range(&parser, &range)
+ for_each_of_range(&parser, &range) {
+ if (range.cpu_addr == OF_BAD_ADDR) {
+ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+ range.bus_addr, node);
+ continue;
+ }
num_ranges++;
+ }
+
+ if (!num_ranges) {
+ ret = -EINVAL;
+ goto out;
+ }
r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
if (!r) {
@@ -975,18 +986,16 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
}
/*
- * Record all info in the generic DMA ranges array for struct device.
+ * Record all info in the generic DMA ranges array for struct device,
+ * returning an error if we don't find any parsable ranges.
*/
*map = r;
of_dma_range_parser_init(&parser, node);
for_each_of_range(&parser, &range) {
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
range.bus_addr, range.cpu_addr, range.size);
- if (range.cpu_addr == OF_BAD_ADDR) {
- pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
- range.bus_addr, node);
+ if (range.cpu_addr == OF_BAD_ADDR)
continue;
- }
r->cpu_start = range.cpu_addr;
r->dma_start = range.bus_addr;
r->size = range.size;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index f08b25195ae7..d1a68b6d03b3 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,7 +26,6 @@
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
-#include <linux/kmemleak.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -525,12 +524,9 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
- early_init_dt_reserve_memory(base, size, nomap) == 0) {
+ early_init_dt_reserve_memory(base, size, nomap) == 0)
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
- if (!nomap)
- kmemleak_alloc_phys(base, size, 0);
- }
else
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 81c8c227ab6b..b3878a98d27f 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -525,6 +525,7 @@ static int __init of_platform_default_populate_init(void)
if (IS_ENABLED(CONFIG_PPC)) {
struct device_node *boot_display = NULL;
struct platform_device *dev;
+ int display_number = 0;
int ret;
/* Check if we have a MacOS display without a node spec */
@@ -555,16 +556,23 @@ static int __init of_platform_default_populate_init(void)
if (!of_get_property(node, "linux,opened", NULL) ||
!of_get_property(node, "linux,boot-display", NULL))
continue;
- dev = of_platform_device_create(node, "of-display", NULL);
+ dev = of_platform_device_create(node, "of-display.0", NULL);
+ of_node_put(node);
if (WARN_ON(!dev))
return -ENOMEM;
boot_display = node;
+ display_number++;
break;
}
for_each_node_by_type(node, "display") {
+ char buf[14];
+ const char *of_display_format = "of-display.%d";
+
if (!of_get_property(node, "linux,opened", NULL) || node == boot_display)
continue;
- of_platform_device_create(node, "of-display", NULL);
+ ret = snprintf(buf, sizeof(buf), of_display_format, display_number++);
+ if (ret < sizeof(buf))
+ of_platform_device_create(node, buf, NULL);
}
} else {
diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig
index e8ce47b32735..d7c649a1a981 100644
--- a/drivers/opp/Kconfig
+++ b/drivers/opp/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config PM_OPP
bool
- select SRCU
help
SOCs have a standard set of tuples consisting of frequency and
voltage pairs that the device will support per voltage domain. This
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 96a30a032c5f..2c7fb683441e 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -235,7 +235,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev,
dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
opp_table->dentry_name);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
__func__, dev_name(opp_dev->dev), dev_name(dev));
return;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index d6af5726ddf3..2a18f7ba2398 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -274,8 +274,7 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
/* We'll use a local copy of buf */
count = min_t(size_t, count, sizeof(in)-1);
- strncpy(in, buf, count);
- in[count] = '\0';
+ strscpy(in, buf, count + 1);
/* Let's clean up the target. 0xff is a blank pattern */
memset(&hwpath, 0xff, sizeof(hwpath));
@@ -388,8 +387,7 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count
/* We'll use a local copy of buf */
count = min_t(size_t, count, sizeof(in)-1);
- strncpy(in, buf, count);
- in[count] = '\0';
+ strscpy(in, buf, count + 1);
/* Let's clean up the target. 0 is a blank pattern */
memset(&layers, 0, sizeof(layers));
@@ -756,8 +754,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
/* We'll use a local copy of buf */
count = min_t(size_t, count, sizeof(in)-1);
- strncpy(in, buf, count);
- in[count] = '\0';
+ strscpy(in, buf, count + 1);
/* Current flags are stored in primary boot path entry */
pathentry = &pdcspath_entry_primary;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index fba95486caaf..5641786bd020 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1665,7 +1665,6 @@ int pci_save_state(struct pci_dev *dev)
return i;
pci_save_ltr_state(dev);
- pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1772,7 +1771,6 @@ void pci_restore_state(struct pci_dev *dev)
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
- pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
@@ -3465,11 +3463,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
- error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
- 2 * sizeof(u32));
- if (error)
- pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
-
pci_allocate_vc_save_buffers(dev);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9ed3b5550043..9049d07d3aae 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -566,14 +566,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 53a1fa306e1e..4b4184563a92 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -470,31 +470,6 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
pci_write_config_dword(pdev, pos, val);
}
-static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2)
-{
- u16 l1ss = dev->l1ss;
- u32 l1_2_enable;
-
- /*
- * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be
- * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1.
- */
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2);
-
- /*
- * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in
- * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
- * enable bits, even though they're all in PCI_L1SS_CTL1.
- */
- l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
- ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
-
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1);
- if (l1_2_enable)
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1,
- ctl1 | l1_2_enable);
-}
-
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -504,6 +479,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
u32 ctl1 = 0, ctl2 = 0;
u32 pctl1, pctl2, cctl1, cctl2;
+ u32 pl1_2_enables, cl1_2_enables;
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
@@ -552,21 +528,39 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
ctl2 == pctl2 && ctl2 == cctl2)
return;
- pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
- pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
- aspm_program_l1ss(parent, pctl1, ctl2);
-
- cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
- cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
- aspm_program_l1ss(child, cctl1, ctl2);
+ /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
+ pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ }
+
+ /* Program T_POWER_ON times in both ports */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
+ pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+
+ /* Program Common_Mode_Restore_Time in upstream device */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+
+ /* Program LTR_L1.2_THRESHOLD time in both ports */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
+ pl1_2_enables);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
+ cl1_2_enables);
+ }
}
static void aspm_l1ss_init(struct pcie_link_state *link)
@@ -757,43 +751,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_L1SS_CTL1_L1SS_MASK, val);
}
-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- u16 l1ss = dev->l1ss;
- u32 *cap;
-
- if (!l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++);
- pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++);
-}
-
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- u32 *cap, ctl1, ctl2;
- u16 l1ss = dev->l1ss;
-
- if (!l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- ctl2 = *cap++;
- ctl1 = *cap;
- aspm_program_l1ss(dev, ctl1, ctl2);
-}
-
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 3945612900e6..9c6ee46ac7a0 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -93,10 +93,19 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr)
{
+ int ret;
+
pr_debug("Disabling signal %s for %s\n", expr->signal,
expr->function);
- return aspeed_sig_expr_set(ctx, expr, false);
+ ret = aspeed_sig_expr_eval(ctx, expr, true);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return aspeed_sig_expr_set(ctx, expr, false);
+
+ return 0;
}
/**
@@ -114,7 +123,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
int ret = 0;
if (!exprs)
- return true;
+ return -EINVAL;
while (*exprs && !ret) {
ret = aspeed_sig_expr_disable(ctx, *exprs);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index cc3aaba24188..e49f271de936 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1709,6 +1709,12 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data);
#ifdef CONFIG_PM_SLEEP
+static bool __intel_gpio_is_direct_irq(u32 value)
+{
+ return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+ (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
+}
+
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
@@ -1742,8 +1748,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
* See https://bugzilla.kernel.org/show_bug.cgi?id=214749.
*/
value = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
- if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
- (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO))
+ if (__intel_gpio_is_direct_irq(value))
return true;
return false;
@@ -1873,7 +1878,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
- if (!intel_pinctrl_should_save(pctrl, desc->number))
+ if (!(intel_pinctrl_should_save(pctrl, desc->number) ||
+ /*
+ * If the firmware mangled the register contents too much,
+ * check the saved value for the Direct IRQ mode.
+ */
+ __intel_gpio_is_direct_irq(pads[i].padcfg0)))
continue;
intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
index 89557c7ed2ab..09c4dcef9338 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8195.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
@@ -659,7 +659,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
PIN_FIELD_BASE(10, 10, 4, 0x010, 0x10, 9, 3),
PIN_FIELD_BASE(11, 11, 4, 0x000, 0x10, 24, 3),
PIN_FIELD_BASE(12, 12, 4, 0x010, 0x10, 12, 3),
- PIN_FIELD_BASE(13, 13, 4, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(13, 13, 4, 0x000, 0x10, 27, 3),
PIN_FIELD_BASE(14, 14, 4, 0x010, 0x10, 15, 3),
PIN_FIELD_BASE(15, 15, 4, 0x010, 0x10, 0, 3),
PIN_FIELD_BASE(16, 16, 4, 0x010, 0x10, 18, 3),
@@ -708,7 +708,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
PIN_FIELD_BASE(78, 78, 3, 0x000, 0x10, 15, 3),
PIN_FIELD_BASE(79, 79, 3, 0x000, 0x10, 18, 3),
PIN_FIELD_BASE(80, 80, 3, 0x000, 0x10, 21, 3),
- PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 28, 3),
+ PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 24, 3),
PIN_FIELD_BASE(82, 82, 3, 0x000, 0x10, 27, 3),
PIN_FIELD_BASE(83, 83, 3, 0x010, 0x10, 0, 3),
PIN_FIELD_BASE(84, 84, 3, 0x010, 0x10, 3, 3),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 9bc6e3922e78..32c3edaf9038 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -365,6 +365,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
} else {
debounce_enable = " ∅";
+ time = 0;
}
snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
seq_printf(s, "debounce %s (🕑 %sus)| ", debounce_enable, debounce_value);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 99c3745da456..190923757cda 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -372,6 +372,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
if (!pcs->fmask)
return 0;
function = pinmux_generic_get_function(pctldev, fselector);
+ if (!function)
+ return -EINVAL;
func = function->data;
if (!func)
return -EINVAL;
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
index c3c8c34148f1..e22d03ce292e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
@@ -105,7 +105,7 @@ static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
static const char * const swr_tx_clk_groups[] = { "gpio0" };
static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
static const char * const swr_rx_clk_groups[] = { "gpio3" };
-static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
static const char * const dmic1_clk_groups[] = { "gpio6" };
static const char * const dmic1_data_groups[] = { "gpio7" };
static const char * const dmic2_clk_groups[] = { "gpio8" };
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index a825af8126c8..2ce8cb2170df 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -8,6 +8,7 @@ source "drivers/platform/x86/amd/pmf/Kconfig"
config AMD_PMC
tristate "AMD SoC PMC driver"
depends on ACPI && PCI && RTC_CLASS
+ select SERIO
help
The driver provides support for AMD Power Management Controller
primarily responsible for S2Idle transactions that are driven from
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
index 644af42e07cf..96a8e1832c05 100644
--- a/drivers/platform/x86/amd/pmf/auto-mode.c
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -275,13 +275,8 @@ int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
*/
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
- int mode = amd_pmf_get_pprof_modes(dev);
-
- if (mode < 0)
- return mode;
-
dev_dbg(dev->dev, "resetting AMT thermals\n");
- amd_pmf_update_slider(dev, SLIDER_OP_SET, mode, NULL);
+ amd_pmf_set_sps_power_limits(dev);
}
return 0;
}
@@ -299,7 +294,5 @@ void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
{
amd_pmf_load_defaults_auto_mode(dev);
- /* update the thermal limits for Automode */
- amd_pmf_set_automode(dev, config_store.current_mode, NULL);
amd_pmf_init_metrics_table(dev);
}
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
index 3f9731a2ac28..4beb22a19466 100644
--- a/drivers/platform/x86/amd/pmf/cnqf.c
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -103,7 +103,7 @@ int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_l
src = amd_pmf_cnqf_get_power_source(dev);
- if (dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (is_pprof_balanced(dev)) {
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
} else {
/*
@@ -307,13 +307,9 @@ static ssize_t cnqf_enable_store(struct device *dev,
const char *buf, size_t count)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- int mode, result, src;
+ int result, src;
bool input;
- mode = amd_pmf_get_pprof_modes(pdev);
- if (mode < 0)
- return mode;
-
result = kstrtobool(buf, &input);
if (result)
return result;
@@ -321,11 +317,11 @@ static ssize_t cnqf_enable_store(struct device *dev,
src = amd_pmf_cnqf_get_power_source(pdev);
pdev->cnqf_enabled = input;
- if (pdev->cnqf_enabled && pdev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) {
amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
} else {
if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
- amd_pmf_update_slider(pdev, SLIDER_OP_SET, mode, NULL);
+ amd_pmf_set_sps_power_limits(pdev);
}
dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off");
@@ -386,7 +382,7 @@ int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
dev->cnqf_enabled = amd_pmf_check_flags(dev);
/* update the thermal for CnQF */
- if (dev->cnqf_enabled && dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (dev->cnqf_enabled && is_pprof_balanced(dev)) {
src = amd_pmf_cnqf_get_power_source(dev);
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index a5f5a4bcff6d..da23639071d7 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -58,6 +58,25 @@ static bool force_load;
module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
+static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
+
+ if (event != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
+ if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
+ return NOTIFY_DONE;
+ }
+
+ amd_pmf_set_sps_power_limits(pmf);
+
+ return NOTIFY_OK;
+}
+
static int current_power_limits_show(struct seq_file *seq, void *unused)
{
struct amd_pmf_dev *dev = seq->private;
@@ -366,14 +385,18 @@ static int amd_pmf_probe(struct platform_device *pdev)
if (!dev->regbase)
return -ENOMEM;
+ mutex_init(&dev->lock);
+ mutex_init(&dev->update_mutex);
+
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_init_features(dev);
apmf_install_handler(dev);
amd_pmf_dbgfs_register(dev);
- mutex_init(&dev->lock);
- mutex_init(&dev->update_mutex);
+ dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+ power_supply_reg_notifier(&dev->pwr_src_notifier);
+
dev_info(dev->dev, "registered PMF device successfully\n");
return 0;
@@ -383,11 +406,12 @@ static int amd_pmf_remove(struct platform_device *pdev)
{
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
- mutex_destroy(&dev->lock);
- mutex_destroy(&dev->update_mutex);
+ power_supply_unreg_notifier(&dev->pwr_src_notifier);
amd_pmf_deinit_features(dev);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
+ mutex_destroy(&dev->lock);
+ mutex_destroy(&dev->update_mutex);
kfree(dev->buf);
return 0;
}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 84bbe2c6ea61..06c30cdc0573 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -169,6 +169,7 @@ struct amd_pmf_dev {
struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */
bool cnqf_enabled;
bool cnqf_supported;
+ struct notifier_block pwr_src_notifier;
};
struct apmf_sps_prop_granular {
@@ -391,9 +392,11 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev);
void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *output);
+bool is_pprof_balanced(struct amd_pmf_dev *pmf);
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
/* Auto Mode Layer */
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index dba7e36962dc..bed762d47a14 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -70,6 +70,24 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
}
}
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ mode = amd_pmf_get_pprof_modes(pmf);
+ if (mode < 0)
+ return mode;
+
+ amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+
+ return 0;
+}
+
+bool is_pprof_balanced(struct amd_pmf_dev *pmf)
+{
+ return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
+}
+
static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
@@ -105,15 +123,10 @@ static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
- int mode;
pmf->current_profile = profile;
- mode = amd_pmf_get_pprof_modes(pmf);
- if (mode < 0)
- return mode;
- amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
- return 0;
+ return amd_pmf_set_sps_power_limits(pmf);
}
int amd_pmf_init_sps(struct amd_pmf_dev *dev)
@@ -123,6 +136,9 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
dev->current_profile = PLATFORM_PROFILE_BALANCED;
amd_pmf_load_defaults_sps(dev);
+ /* update SPS balanced power mode thermals */
+ amd_pmf_set_sps_power_limits(dev);
+
dev->pprof.profile_get = amd_pmf_profile_get;
dev->pprof.profile_set = amd_pmf_profile_set;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 02860c32625e..32c10457399e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5563,7 +5563,7 @@ static int light_sysfs_set(struct led_classdev *led_cdev,
static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
{
- return (light_get_status() == 1) ? LED_FULL : LED_OFF;
+ return (light_get_status() == 1) ? LED_ON : LED_OFF;
}
static struct tpacpi_led_classdev tpacpi_led_thinklight = {
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index f00995390fdf..13802a3c3591 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -1098,6 +1098,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Vi8 (CWI501) */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index fe86a09e3b67..c03b5402c03b 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -155,10 +155,12 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev,
unsigned int run_duration_us,
unsigned int idle_duration_us)
{
- if (run_duration_us && idle_duration_us) {
+ if (run_duration_us + idle_duration_us) {
WRITE_ONCE(ii_dev->run_duration_us, run_duration_us);
WRITE_ONCE(ii_dev->idle_duration_us, idle_duration_us);
}
+ if (!run_duration_us)
+ pr_debug("CPU is forced to 100 percent idle\n");
}
/**
@@ -201,7 +203,7 @@ int idle_inject_start(struct idle_inject_device *ii_dev)
unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us);
- if (!idle_duration_us || !run_duration_us)
+ if (!(idle_duration_us + run_duration_us))
return -EINVAL;
pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 26d00b1853b4..8970c7b80884 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -999,7 +999,15 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
do_div(value, rp->time_unit);
y = ilog2(value);
- f = div64_u64(4 * (value - (1 << y)), 1 << y);
+
+ /*
+ * The target hardware field is 7 bits wide, so return all ones
+ * if the exponent is too large.
+ */
+ if (y > 0x1f)
+ return 0x7f;
+
+ f = div64_u64(4 * (value - (1ULL << y)), 1ULL << y);
value = (y & 0x1f) | ((f & 0x3) << 5);
}
return value;
@@ -1113,7 +1121,10 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
+ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 1f968353d479..e180dee0f83d 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -530,9 +530,6 @@ struct powercap_zone *powercap_register_zone(
power_zone->name = kstrdup(name, GFP_KERNEL);
if (!power_zone->name)
goto err_name_alloc;
- dev_set_name(&power_zone->dev, "%s:%x",
- dev_name(power_zone->dev.parent),
- power_zone->id);
power_zone->constraints = kcalloc(nr_constraints,
sizeof(*power_zone->constraints),
GFP_KERNEL);
@@ -555,9 +552,16 @@ struct powercap_zone *powercap_register_zone(
power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
power_zone->dev_attr_groups[1] = NULL;
power_zone->dev.groups = power_zone->dev_attr_groups;
+ dev_set_name(&power_zone->dev, "%s:%x",
+ dev_name(power_zone->dev.parent),
+ power_zone->id);
result = device_register(&power_zone->dev);
- if (result)
- goto err_dev_ret;
+ if (result) {
+ put_device(&power_zone->dev);
+ mutex_unlock(&control_type->lock);
+
+ return ERR_PTR(result);
+ }
control_type->nr_zones++;
mutex_unlock(&control_type->lock);
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index e991cccdb6e9..1e8bc6cc1e12 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -188,9 +188,10 @@ static int efi_set_time(struct device *dev, struct rtc_time *tm)
static int efi_procfs(struct device *dev, struct seq_file *seq)
{
- efi_time_t eft, alm;
- efi_time_cap_t cap;
- efi_bool_t enabled, pending;
+ efi_time_t eft, alm;
+ efi_time_cap_t cap;
+ efi_bool_t enabled, pending;
+ struct rtc_device *rtc = dev_get_drvdata(dev);
memset(&eft, 0, sizeof(eft));
memset(&alm, 0, sizeof(alm));
@@ -213,23 +214,25 @@ static int efi_procfs(struct device *dev, struct seq_file *seq)
/* XXX fixme: convert to string? */
seq_printf(seq, "Timezone\t: %u\n", eft.timezone);
- seq_printf(seq,
- "Alarm Time\t: %u:%u:%u.%09u\n"
- "Alarm Date\t: %u-%u-%u\n"
- "Alarm Daylight\t: %u\n"
- "Enabled\t\t: %s\n"
- "Pending\t\t: %s\n",
- alm.hour, alm.minute, alm.second, alm.nanosecond,
- alm.year, alm.month, alm.day,
- alm.daylight,
- enabled == 1 ? "yes" : "no",
- pending == 1 ? "yes" : "no");
-
- if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
- seq_puts(seq, "Timezone\t: unspecified\n");
- else
- /* XXX fixme: convert to string? */
- seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
+ if (test_bit(RTC_FEATURE_ALARM, rtc->features)) {
+ seq_printf(seq,
+ "Alarm Time\t: %u:%u:%u.%09u\n"
+ "Alarm Date\t: %u-%u-%u\n"
+ "Alarm Daylight\t: %u\n"
+ "Enabled\t\t: %s\n"
+ "Pending\t\t: %s\n",
+ alm.hour, alm.minute, alm.second, alm.nanosecond,
+ alm.year, alm.month, alm.day,
+ alm.daylight,
+ enabled == 1 ? "yes" : "no",
+ pending == 1 ? "yes" : "no");
+
+ if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+ seq_puts(seq, "Timezone\t: unspecified\n");
+ else
+ /* XXX fixme: convert to string? */
+ seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
+ }
/*
* now prints the capabilities
@@ -269,7 +272,10 @@ static int __init efi_rtc_probe(struct platform_device *dev)
rtc->ops = &efi_rtc_ops;
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
- set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_WAKEUP_SERVICES))
+ set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+ else
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
device_init_wakeup(&dev->dev, true);
diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
index e8e2ab1103fc..4b578e4d44f6 100644
--- a/drivers/rtc/rtc-sunplus.c
+++ b/drivers/rtc/rtc-sunplus.c
@@ -240,8 +240,8 @@ static int sp_rtc_probe(struct platform_device *plat_dev)
if (IS_ERR(sp_rtc->reg_base))
return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->reg_base),
"%s devm_ioremap_resource fail\n", RTC_REG_NAME);
- dev_dbg(&plat_dev->dev, "res = 0x%x, reg_base = 0x%lx\n",
- sp_rtc->res->start, (unsigned long)sp_rtc->reg_base);
+ dev_dbg(&plat_dev->dev, "res = %pR, reg_base = %p\n",
+ sp_rtc->res, sp_rtc->reg_base);
sp_rtc->irq = platform_get_irq(plat_dev, 0);
if (sp_rtc->irq < 0)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1426b9b03612..9feb0323bc44 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -588,8 +588,6 @@ void scsi_device_put(struct scsi_device *sdev)
{
struct module *mod = sdev->host->hostt->module;
- might_sleep();
-
put_device(&sdev->sdev_gendev);
module_put(mod);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 7a6904a3928e..f9b18fdc7b3c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1232,8 +1232,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* that no LUN is present, so don't add sdev in these cases.
* Two specific examples are:
* 1) NetApp targets: return PQ=1, PDT=0x1f
- * 2) IBM/2145 targets: return PQ=1, PDT=0
- * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
+ * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
* in the UFI 1.0 spec (we cannot rely on reserved bits).
*
* References:
@@ -1247,8 +1246,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* PDT=00h Direct-access device (floppy)
* PDT=1Fh none (no FDD connected to the requested logical unit)
*/
- if (((result[0] >> 5) == 1 ||
- (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
+ if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
+ (result[0] & 0x1f) == 0x1f &&
!scsi_is_wlun(lun)) {
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: peripheral device type"
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 981d1bab2120..8ef9a5494340 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -451,6 +451,8 @@ static void scsi_device_dev_release(struct device *dev)
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
+ might_sleep();
+
scsi_dh_release_device(sdev);
parent = sdev->sdev_gendev.parent;
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index 99edddf9958b..c3bfb6c84cab 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -366,7 +366,7 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
* will be adjusted at the final stage of the IRQ-based SPI transfer
* execution so not to lose the leftover of the incoming data.
*/
- level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
+ level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
dw_writel(dws, DW_SPI_TXFTLR, level);
dw_writel(dws, DW_SPI_RXFTLR, level - 1);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 1935ca613447..a1ea093795cf 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -90,9 +90,21 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
/*-------------------------------------------------------------------------*/
static ssize_t
+spidev_sync_unlocked(struct spi_device *spi, struct spi_message *message)
+{
+ ssize_t status;
+
+ status = spi_sync(spi, message);
+ if (status == 0)
+ status = message->actual_length;
+
+ return status;
+}
+
+static ssize_t
spidev_sync(struct spidev_data *spidev, struct spi_message *message)
{
- int status;
+ ssize_t status;
struct spi_device *spi;
mutex_lock(&spidev->spi_lock);
@@ -101,12 +113,10 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
if (spi == NULL)
status = -ESHUTDOWN;
else
- status = spi_sync(spi, message);
-
- if (status == 0)
- status = message->actual_length;
+ status = spidev_sync_unlocked(spi, message);
mutex_unlock(&spidev->spi_lock);
+
return status;
}
@@ -294,7 +304,7 @@ static int spidev_message(struct spidev_data *spidev,
spi_message_add_tail(k_tmp, &msg);
}
- status = spidev_sync(spidev, &msg);
+ status = spidev_sync_unlocked(spidev->spi, &msg);
if (status < 0)
goto done;
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 37d6af2ec427..7fa66501792d 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -43,15 +43,23 @@ static void __dma_rx_complete(struct uart_8250_port *p)
struct uart_8250_dma *dma = p->dma;
struct tty_port *tty_port = &p->port.state->port;
struct dma_tx_state state;
+ enum dma_status dma_status;
int count;
- dma->rx_running = 0;
- dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ /*
+ * New DMA Rx can be started during the completion handler before it
+ * could acquire port's lock and it might still be ongoing. Don't to
+ * anything in such case.
+ */
+ dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ if (dma_status == DMA_IN_PROGRESS)
+ return;
count = dma->rx_size - state.residue;
tty_insert_flip_string(tty_port, dma->rx_buf, count);
p->port.icount.rx += count;
+ dma->rx_running = 0;
tty_flip_buffer_push(tty_port);
}
@@ -62,9 +70,14 @@ static void dma_rx_complete(void *param)
struct uart_8250_dma *dma = p->dma;
unsigned long flags;
- __dma_rx_complete(p);
-
spin_lock_irqsave(&p->port.lock, flags);
+ if (dma->rx_running)
+ __dma_rx_complete(p);
+
+ /*
+ * Cannot be combined with the previous check because __dma_rx_complete()
+ * changes dma->rx_running.
+ */
if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
p->dma->rx_dma(p);
spin_unlock_irqrestore(&p->port.lock, flags);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index a1490033aa16..409e91d6829a 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -797,25 +797,11 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
spin_unlock(&port->lock);
}
- if (stm32_usart_rx_dma_enabled(port))
- return IRQ_WAKE_THREAD;
- else
- return IRQ_HANDLED;
-}
-
-static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
-{
- struct uart_port *port = ptr;
- struct tty_port *tport = &port->state->port;
- struct stm32_port *stm32_port = to_stm32_port(port);
- unsigned int size;
- unsigned long flags;
-
/* Receiver timeout irq for DMA RX */
- if (!stm32_port->throttled) {
- spin_lock_irqsave(&port->lock, flags);
+ if (stm32_usart_rx_dma_enabled(port) && !stm32_port->throttled) {
+ spin_lock(&port->lock);
size = stm32_usart_receive_chars(port, false);
- uart_unlock_and_check_sysrq_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
}
@@ -1015,10 +1001,8 @@ static int stm32_usart_startup(struct uart_port *port)
u32 val;
int ret;
- ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
- stm32_usart_threaded_interrupt,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- name, port);
+ ret = request_irq(port->irq, stm32_usart_interrupt,
+ IRQF_NO_SUSPEND, name, port);
if (ret)
return ret;
@@ -1601,13 +1585,6 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
struct dma_slave_config config;
int ret;
- /*
- * Using DMA and threaded handler for the console could lead to
- * deadlocks.
- */
- if (uart_console(port))
- return -ENODEV;
-
stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
&stm32port->rx_dma_buf,
GFP_KERNEL);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 1850bacdb5b0..f566eb1839dc 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -386,10 +386,6 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
uni_mode = use_unicode(inode);
attr = use_attributes(inode);
- ret = -ENXIO;
- vc = vcs_vc(inode, &viewed);
- if (!vc)
- goto unlock_out;
ret = -EINVAL;
if (pos < 0)
@@ -407,6 +403,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
unsigned int this_round, skip = 0;
int size;
+ ret = -ENXIO;
+ vc = vcs_vc(inode, &viewed);
+ if (!vc)
+ goto unlock_out;
+
/* Check whether we are above size each round,
* as copy_to_user at the end of this loop
* could sleep.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 079e183cf3bf..934b3d997702 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -526,6 +526,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
+ { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
+
/* DELL USB GEN2 */
{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index b0a0351d2d8b..959fc925ca7c 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -901,7 +901,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
/* enable vbus override for device mode */
- if (qcom->mode == USB_DR_MODE_PERIPHERAL)
+ if (qcom->mode != USB_DR_MODE_HOST)
dwc3_qcom_vbus_override_enable(qcom, true);
/* register extcon to override sw_vbus on Vbus change later */
diff --git a/drivers/usb/fotg210/fotg210-udc.c b/drivers/usb/fotg210/fotg210-udc.c
index 87cca81bf4ac..eb076746f032 100644
--- a/drivers/usb/fotg210/fotg210-udc.c
+++ b/drivers/usb/fotg210/fotg210-udc.c
@@ -1014,7 +1014,6 @@ static int fotg210_udc_start(struct usb_gadget *g,
int ret;
/* hook up the driver */
- driver->driver.bus = NULL;
fotg210->driver = driver;
if (!IS_ERR_OR_NULL(fotg210->phy)) {
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 523a961b910b..8ad354741380 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -279,8 +279,10 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
struct usb_request *req = ffs->ep0req;
int ret;
- if (!req)
+ if (!req) {
+ spin_unlock_irq(&ffs->ev.waitq.lock);
return -EINVAL;
+ }
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 08726e4c68a5..0219cd79493a 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1142,6 +1142,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
std_as_out_if0_desc.bInterfaceNumber = ret;
std_as_out_if1_desc.bInterfaceNumber = ret;
+ std_as_out_if1_desc.bNumEndpoints = 1;
uac2->as_out_intf = ret;
uac2->as_out_alt = 0;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 8f12f3f8f6ee..e06022873df1 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -798,6 +798,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
SET_NETDEV_DEVTYPE(net, &gadget_type);
status = register_netdev(net);
@@ -872,6 +873,8 @@ int gether_register_netdev(struct net_device *net)
struct usb_gadget *g;
int status;
+ if (!net->dev.parent)
+ return -EINVAL;
dev = netdev_priv(net);
g = dev->gadget;
@@ -902,6 +905,7 @@ void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
dev = netdev_priv(net);
dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
}
EXPORT_SYMBOL_GPL(gether_set_gadget);
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 2cdb07905bde..d04d72f5816e 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -1830,7 +1830,6 @@ static int bcm63xx_udc_start(struct usb_gadget *gadget,
bcm63xx_select_phy_mode(udc, true);
udc->driver = driver;
- driver->driver.bus = NULL;
udc->gadget.dev.of_node = udc->dev->of_node;
spin_unlock_irqrestore(&udc->lock, flags);
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index bf745358e28e..3b1cc8fa30c8 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2285,7 +2285,6 @@ static int fsl_qe_start(struct usb_gadget *gadget,
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc->lock, flags);
- driver->driver.bus = NULL;
/* hook up the driver */
udc->driver = driver;
udc->gadget.speed = driver->max_speed;
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 50435e804118..a67873a074b7 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1943,7 +1943,6 @@ static int fsl_udc_start(struct usb_gadget *g,
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc_controller->lock, flags);
- driver->driver.bus = NULL;
/* hook up the driver */
udc_controller->driver = driver;
spin_unlock_irqrestore(&udc_controller->lock, flags);
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 9af8b415f303..5954800d652c 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1311,7 +1311,6 @@ static int fusb300_udc_start(struct usb_gadget *g,
struct fusb300 *fusb300 = to_fusb300(g);
/* hook up the driver */
- driver->driver.bus = NULL;
fusb300->driver = driver;
return 0;
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index bdc56b24b5c9..5ffb3d5c635b 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1375,7 +1375,6 @@ static int goku_udc_start(struct usb_gadget *g,
struct goku_udc *dev = to_goku_udc(g);
/* hook up the driver */
- driver->driver.bus = NULL;
dev->driver = driver;
/*
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 22096f8505de..85cdc0af3bf9 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1906,7 +1906,6 @@ static int gr_udc_start(struct usb_gadget *gadget,
spin_lock(&dev->lock);
/* Hook up the driver */
- driver->driver.bus = NULL;
dev->driver = driver;
/* Get ready for host detection */
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index c7e421b449f3..06e21cee431b 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1454,7 +1454,6 @@ static int m66592_udc_start(struct usb_gadget *g,
struct m66592 *m66592 = to_m66592(g);
/* hook up the driver */
- driver->driver.bus = NULL;
m66592->driver = driver;
m66592_bset(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 3074da00c3df..ddf0ed3eb4f2 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -1108,7 +1108,6 @@ static int max3420_udc_start(struct usb_gadget *gadget,
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver */
- driver->driver.bus = NULL;
udc->driver = driver;
udc->gadget.speed = USB_SPEED_FULL;
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 598654a3cb41..411b6179782c 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1243,7 +1243,6 @@ static int mv_u3d_start(struct usb_gadget *g,
}
/* hook up the driver ... */
- driver->driver.bus = NULL;
u3d->driver = driver;
u3d->ep0_dir = USB_DIR_OUT;
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index fdb17d86cd65..b397f3a848cf 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1359,7 +1359,6 @@ static int mv_udc_start(struct usb_gadget *gadget,
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver ... */
- driver->driver.bus = NULL;
udc->driver = driver;
udc->usb_state = USB_STATE_ATTACHED;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 84605a4d0715..538c1b9a2883 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1451,7 +1451,6 @@ static int net2272_start(struct usb_gadget *_gadget,
dev->ep[i].irqs = 0;
/* hook up the driver ... */
dev->softconnect = 1;
- driver->driver.bus = NULL;
dev->driver = driver;
/* ... then enable host detection and ep0; and we're ready
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index d6a68631354a..1b929c519cd7 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2423,7 +2423,6 @@ static int net2280_start(struct usb_gadget *_gadget,
dev->ep[i].irqs = 0;
/* hook up the driver ... */
- driver->driver.bus = NULL;
dev->driver = driver;
retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index bea346e362b2..f660ebfa1379 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2066,7 +2066,6 @@ static int omap_udc_start(struct usb_gadget *g,
udc->softconnect = 1;
/* hook up the driver */
- driver->driver.bus = NULL;
udc->driver = driver;
spin_unlock_irqrestore(&udc->lock, flags);
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 9bb7a9d7a2fb..4f8617210d85 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -2908,7 +2908,6 @@ static int pch_udc_start(struct usb_gadget *g,
{
struct pch_udc_dev *dev = to_pch_udc(g);
- driver->driver.bus = NULL;
dev->driver = driver;
/* get ready for ep0 traffic */
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index 52ea4dcf6a92..2fc5d4d277bc 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -1933,7 +1933,6 @@ static int amd5536_udc_start(struct usb_gadget *g,
struct udc *dev = to_amd5536_udc(g);
u32 tmp;
- driver->driver.bus = NULL;
dev->driver = driver;
/* Some gadget drivers use both ep0 directions.
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 9a6860285fbe..50b24096eb7f 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -535,10 +535,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
/* FIXME: Port can only be DFP_U. */
/* Make sure we have compatiple pin configurations */
- if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
- !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
+ if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
+ !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 1292241d581a..1cf8947c6d66 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1269,6 +1269,9 @@ err_unregister:
con->port = NULL;
}
+ kfree(ucsi->connector);
+ ucsi->connector = NULL;
+
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
@@ -1300,7 +1303,8 @@ static void ucsi_resume_work(struct work_struct *work)
int ucsi_resume(struct ucsi *ucsi)
{
- queue_work(system_long_wq, &ucsi->resume_work);
+ if (ucsi->connector)
+ queue_work(system_long_wq, &ucsi->resume_work);
return 0;
}
EXPORT_SYMBOL_GPL(ucsi_resume);
@@ -1420,6 +1424,9 @@ void ucsi_unregister(struct ucsi *ucsi)
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+ if (!ucsi->connector)
+ return;
+
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
ucsi_unregister_partner(&ucsi->connector[i]);
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index f9c0044c6442..44b29289aa19 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -849,7 +849,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = ifcvf_init_hw(vf, pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
- return ret;
+ goto err;
}
for (i = 0; i < vf->nr_vring; i++)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9af19b0cf3b7..4c538b30fd76 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1511,6 +1511,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
nvq = &n->vqs[index];
mutex_lock(&vq->mutex);
+ if (fd == -1)
+ vhost_clear_msg(&n->dev);
+
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(vq)) {
r = -EFAULT;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index dca6346d75b3..d5ecb8876fc9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -80,7 +80,7 @@ struct vhost_scsi_cmd {
struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
/* Pointer to response header iovec */
- struct iovec tvc_resp_iov;
+ struct iovec *tvc_resp_iov;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */
@@ -563,7 +563,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
se_cmd->scsi_sense_length);
- iov_iter_init(&iov_iter, ITER_DEST, &cmd->tvc_resp_iov,
+ iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
@@ -594,6 +594,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
struct vhost_scsi_cmd *cmd;
struct vhost_scsi_nexus *tv_nexus;
struct scatterlist *sg, *prot_sg;
+ struct iovec *tvc_resp_iov;
struct page **pages;
int tag;
@@ -613,6 +614,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
sg = cmd->tvc_sgl;
prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages;
+ tvc_resp_iov = cmd->tvc_resp_iov;
memset(cmd, 0, sizeof(*cmd));
cmd->tvc_sgl = sg;
cmd->tvc_prot_sgl = prot_sg;
@@ -625,6 +627,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
cmd->tvc_data_direction = data_direction;
cmd->tvc_nexus = tv_nexus;
cmd->inflight = vhost_scsi_get_inflight(vq);
+ cmd->tvc_resp_iov = tvc_resp_iov;
memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
@@ -935,7 +938,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- int ret, prot_bytes, c = 0;
+ int ret, prot_bytes, i, c = 0;
u16 lun;
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
@@ -1092,7 +1095,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
}
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
- cmd->tvc_resp_iov = vq->iov[vc.out];
+ for (i = 0; i < vc.in ; i++)
+ cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
cmd->tvc_in_iovs = vc.in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
@@ -1461,6 +1465,7 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
kfree(tv_cmd->tvc_sgl);
kfree(tv_cmd->tvc_prot_sgl);
kfree(tv_cmd->tvc_upages);
+ kfree(tv_cmd->tvc_resp_iov);
}
sbitmap_free(&svq->scsi_tags);
@@ -1508,6 +1513,14 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
goto out;
}
+ tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
+ sizeof(struct iovec),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_resp_iov) {
+ pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
+ goto out;
+ }
+
tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
sizeof(struct scatterlist),
GFP_KERNEL);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index cbe72bfd2f1f..43c9770b86e5 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -661,7 +661,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
-static void vhost_clear_msg(struct vhost_dev *dev)
+void vhost_clear_msg(struct vhost_dev *dev)
{
struct vhost_msg_node *node, *n;
@@ -679,6 +679,7 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock);
}
+EXPORT_SYMBOL_GPL(vhost_clear_msg);
void vhost_dev_cleanup(struct vhost_dev *dev)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d9109107af08..790b296271f1 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -181,6 +181,7 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
bool vhost_log_access_ok(struct vhost_dev *);
+void vhost_clear_msg(struct vhost_dev *dev);
int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count,
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 1fc8de4ecbeb..8187a7c4f910 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -49,7 +49,6 @@ struct atmel_lcdfb_info {
struct clk *lcdc_clk;
struct backlight_device *backlight;
- u8 bl_power;
u8 saved_lcdcon;
u32 pseudo_palette[16];
@@ -109,22 +108,7 @@ static u32 contrast_ctr = ATMEL_LCDC_PS_DIV8
static int atmel_bl_update_status(struct backlight_device *bl)
{
struct atmel_lcdfb_info *sinfo = bl_get_data(bl);
- int power = sinfo->bl_power;
- int brightness = bl->props.brightness;
-
- /* REVISIT there may be a meaningful difference between
- * fb_blank and power ... there seem to be some cases
- * this doesn't handle correctly.
- */
- if (bl->props.fb_blank != sinfo->bl_power)
- power = bl->props.fb_blank;
- else if (bl->props.power != sinfo->bl_power)
- power = bl->props.power;
-
- if (brightness < 0 && power == FB_BLANK_UNBLANK)
- brightness = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
- else if (power != FB_BLANK_UNBLANK)
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, brightness);
if (contrast_ctr & ATMEL_LCDC_POL_POSITIVE)
@@ -133,8 +117,6 @@ static int atmel_bl_update_status(struct backlight_device *bl)
else
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
- bl->props.fb_blank = bl->props.power = sinfo->bl_power = power;
-
return 0;
}
@@ -155,8 +137,6 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
struct backlight_properties props;
struct backlight_device *bl;
- sinfo->bl_power = FB_BLANK_UNBLANK;
-
if (sinfo->backlight)
return;
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index dd31b9d7d337..36a9ac05a340 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -1766,12 +1766,10 @@ static int aty128_bl_update_status(struct backlight_device *bd)
unsigned int reg = aty_ld_le32(LVDS_GEN_CNTL);
int level;
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK ||
- !par->lcd_on)
+ if (!par->lcd_on)
level = 0;
else
- level = bd->props.brightness;
+ level = backlight_get_brightness(bd);
reg |= LVDS_BL_MOD_EN | LVDS_BLON;
if (level > 0) {
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index d59215a4992e..b02e4e645035 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2219,13 +2219,7 @@ static int aty_bl_update_status(struct backlight_device *bd)
{
struct atyfb_par *par = bl_get_data(bd);
unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par);
- int level;
-
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
+ int level = backlight_get_brightness(bd);
reg |= (BLMOD_EN | BIASMOD_EN);
if (level > 0) {
diff --git a/drivers/video/fbdev/aty/radeon_backlight.c b/drivers/video/fbdev/aty/radeon_backlight.c
index d2c1263ad260..427adc838f77 100644
--- a/drivers/video/fbdev/aty/radeon_backlight.c
+++ b/drivers/video/fbdev/aty/radeon_backlight.c
@@ -57,11 +57,7 @@ static int radeon_bl_update_status(struct backlight_device *bd)
* backlight. This provides some greater power saving and the display
* is useless without backlight anyway.
*/
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
+ level = backlight_get_brightness(bd);
del_timer_sync(&rinfo->lvds_timer);
radeon_engine_idle();
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 14a7d404062c..1b14c21af2b7 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2495,9 +2495,12 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
return -EINVAL;
+ if (font->width > 32 || font->height > 32)
+ return -EINVAL;
+
/* Make sure drawing engine can handle the font */
- if (!(info->pixmap.blit_x & (1 << (font->width - 1))) ||
- !(info->pixmap.blit_y & (1 << (font->height - 1))))
+ if (!(info->pixmap.blit_x & BIT(font->width - 1)) ||
+ !(info->pixmap.blit_y & BIT(font->height - 1)))
return -EINVAL;
/* Make sure driver can handle the font length */
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index b0e690f41025..79e5bfbdd34c 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1050,7 +1050,7 @@ static u32 fb_get_vblank(u32 hfreq)
}
/**
- * fb_get_hblank_by_freq - get horizontal blank time given hfreq
+ * fb_get_hblank_by_hfreq - get horizontal blank time given hfreq
* @hfreq: horizontal freq
* @xres: horizontal resolution in pixels
*
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index b945b68984b9..76771e126d0a 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -283,12 +283,7 @@ static int mx3fb_bl_get_brightness(struct backlight_device *bl)
static int mx3fb_bl_update_status(struct backlight_device *bl)
{
struct mx3fb_data *fbd = bl_get_data(bl);
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
fbd->backlight_level = (fbd->backlight_level & ~0xFF) | brightness;
diff --git a/drivers/video/fbdev/nvidia/nv_backlight.c b/drivers/video/fbdev/nvidia/nv_backlight.c
index 2ce53529f636..503a7a683855 100644
--- a/drivers/video/fbdev/nvidia/nv_backlight.c
+++ b/drivers/video/fbdev/nvidia/nv_backlight.c
@@ -49,17 +49,11 @@ static int nvidia_bl_update_status(struct backlight_device *bd)
{
struct nvidia_par *par = bl_get_data(bd);
u32 tmp_pcrt, tmp_pmc, fpcontrol;
- int level;
+ int level = backlight_get_brightness(bd);
if (!par->FlatPanel)
return 0;
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
-
tmp_pmc = NV_RD32(par->PMC, 0x10F0) & 0x0000FFFF;
tmp_pcrt = NV_RD32(par->PCRTC0, 0x081C) & 0xFFFFFFFC;
fpcontrol = NV_RD32(par->PRAMDAC, 0x0848) & 0xCFFFFFCC;
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index 1960916098d4..e60a276b4855 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -1197,17 +1197,17 @@ static int nvidia_set_fbinfo(struct fb_info *info)
return nvidiafb_check_var(&info->var, info);
}
-static u32 nvidia_get_chipset(struct fb_info *info)
+static u32 nvidia_get_chipset(struct pci_dev *pci_dev,
+ volatile u32 __iomem *REGS)
{
- struct nvidia_par *par = info->par;
- u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
+ u32 id = (pci_dev->vendor << 16) | pci_dev->device;
printk(KERN_INFO PFX "Device ID: %x \n", id);
if ((id & 0xfff0) == 0x00f0 ||
(id & 0xfff0) == 0x02e0) {
/* pci-e */
- id = NV_RD32(par->REGS, 0x1800);
+ id = NV_RD32(REGS, 0x1800);
if ((id & 0x0000ffff) == 0x000010DE)
id = 0x10DE0000 | (id >> 16);
@@ -1220,12 +1220,11 @@ static u32 nvidia_get_chipset(struct fb_info *info)
return id;
}
-static u32 nvidia_get_arch(struct fb_info *info)
+static u32 nvidia_get_arch(u32 Chipset)
{
- struct nvidia_par *par = info->par;
u32 arch = 0;
- switch (par->Chipset & 0x0ff0) {
+ switch (Chipset & 0x0ff0) {
case 0x0100: /* GeForce 256 */
case 0x0110: /* GeForce2 MX */
case 0x0150: /* GeForce2 */
@@ -1278,16 +1277,44 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
struct fb_info *info;
unsigned short cmd;
int ret;
+ volatile u32 __iomem *REGS;
+ int Chipset;
+ u32 Architecture;
NVTRACE_ENTER();
assert(pd != NULL);
+ if (pci_enable_device(pd)) {
+ printk(KERN_ERR PFX "cannot enable PCI device\n");
+ return -ENODEV;
+ }
+
+ /* enable IO and mem if not already done */
+ pci_read_config_word(pd, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pd, PCI_COMMAND, cmd);
+
+ nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
+ nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
+
+ REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
+ if (!REGS) {
+ printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
+ return -ENODEV;
+ }
+
+ Chipset = nvidia_get_chipset(pd, REGS);
+ Architecture = nvidia_get_arch(Chipset);
+ if (Architecture == 0) {
+ printk(KERN_ERR PFX "unknown NV_ARCH\n");
+ goto err_out;
+ }
+
ret = aperture_remove_conflicting_pci_devices(pd, "nvidiafb");
if (ret)
- return ret;
+ goto err_out;
info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev);
-
if (!info)
goto err_out;
@@ -1298,11 +1325,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
if (info->pixmap.addr == NULL)
goto err_out_kfree;
- if (pci_enable_device(pd)) {
- printk(KERN_ERR PFX "cannot enable PCI device\n");
- goto err_out_enable;
- }
-
if (pci_request_regions(pd, "nvidiafb")) {
printk(KERN_ERR PFX "cannot request PCI regions\n");
goto err_out_enable;
@@ -1318,34 +1340,17 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
par->paneltweak = paneltweak;
par->reverse_i2c = reverse_i2c;
- /* enable IO and mem if not already done */
- pci_read_config_word(pd, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pd, PCI_COMMAND, cmd);
-
- nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
nvidiafb_fix.smem_start = pci_resource_start(pd, 1);
- nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
-
- par->REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
- if (!par->REGS) {
- printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
- goto err_out_free_base0;
- }
+ par->REGS = REGS;
- par->Chipset = nvidia_get_chipset(info);
- par->Architecture = nvidia_get_arch(info);
-
- if (par->Architecture == 0) {
- printk(KERN_ERR PFX "unknown NV_ARCH\n");
- goto err_out_arch;
- }
+ par->Chipset = Chipset;
+ par->Architecture = Architecture;
sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
if (NVCommonSetup(info))
- goto err_out_arch;
+ goto err_out_free_base0;
par->FbAddress = nvidiafb_fix.smem_start;
par->FbMapSize = par->RamAmountKBytes * 1024;
@@ -1401,7 +1406,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
goto err_out_iounmap_fb;
}
-
printk(KERN_INFO PFX
"PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
info->fix.id,
@@ -1415,15 +1419,14 @@ err_out_iounmap_fb:
err_out_free_base1:
fb_destroy_modedb(info->monspecs.modedb);
nvidia_delete_i2c_busses(par);
-err_out_arch:
- iounmap(par->REGS);
- err_out_free_base0:
+err_out_free_base0:
pci_release_regions(pd);
err_out_enable:
kfree(info->pixmap.addr);
err_out_kfree:
framebuffer_release(info);
err_out:
+ iounmap(REGS);
return -ENODEV;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 4fc4b26a8d30..ba94a0a7bd4f 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -331,13 +331,7 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
struct omap_dss_device *in = ddata->in;
int r;
- int level;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
+ int level = backlight_get_brightness(dev);
dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index bc5a44c2a144..ae937854403b 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -10,6 +10,7 @@
#define DSS_SUBSYS_NAME "DISPLAY"
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
@@ -36,7 +37,7 @@ static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
int r;
bool enable;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
@@ -73,7 +74,7 @@ static ssize_t display_tear_store(struct omap_dss_device *dssdev,
if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
return -ENOENT;
- r = strtobool(buf, &te);
+ r = kstrtobool(buf, &te);
if (r)
return r;
@@ -183,7 +184,7 @@ static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
return -ENOENT;
- r = strtobool(buf, &mirror);
+ r = kstrtobool(buf, &mirror);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index ba21c4a2633d..1b644be5fe2e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -10,6 +10,7 @@
#define DSS_SUBSYS_NAME "MANAGER"
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -246,7 +247,7 @@ static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
bool enable;
int r;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
@@ -290,7 +291,7 @@ static ssize_t manager_alpha_blending_enabled_store(
if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return -ENODEV;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
@@ -329,7 +330,7 @@ static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
if (!dss_has_feature(FEAT_CPR))
return -ENODEV;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 601c0beb6de9..1da4fb1c77b4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
+#include <linux/kstrtox.h>
#include <linux/platform_device.h>
#include <video/omapfb_dss.h>
@@ -210,7 +211,7 @@ static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
int r;
bool enable;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 06dc41aa0354..831b2c2fbdf9 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -15,6 +15,7 @@
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/mm.h>
#include <linux/omapfb.h>
@@ -96,7 +97,7 @@ static ssize_t store_mirror(struct device *dev,
int r;
struct fb_var_screeninfo new_var;
- r = strtobool(buf, &mirror);
+ r = kstrtobool(buf, &mirror);
if (r)
return r;
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index 644278146d3b..41edc6e79460 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -293,13 +293,7 @@ static int riva_bl_update_status(struct backlight_device *bd)
{
struct riva_par *par = bl_get_data(bd);
U032 tmp_pcrt, tmp_pmc;
- int level;
-
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
+ int level = backlight_get_brightness(bd);
tmp_pmc = NV_RD32(par->riva.PMC, 0x10F0) & 0x0000FFFF;
tmp_pcrt = NV_RD32(par->riva.PCRTC0, 0x081C) & 0xFFFFFFFC;
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index 4cb10877017c..6ca5d9515d85 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -86,7 +86,7 @@ static int __diag288(unsigned int func, unsigned int timeout,
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (err) : "d"(__func), "d"(__timeout),
- "d"(__action), "d"(__len) : "1", "cc");
+ "d"(__action), "d"(__len) : "1", "cc", "memory");
return err;
}
@@ -268,12 +268,21 @@ static int __init diag288_init(void)
char ebc_begin[] = {
194, 197, 199, 201, 213
};
+ char *ebc_cmd;
watchdog_set_nowayout(&wdt_dev, nowayout_info);
if (MACHINE_IS_VM) {
- if (__diag288_vm(WDT_FUNC_INIT, 15,
- ebc_begin, sizeof(ebc_begin)) != 0) {
+ ebc_cmd = kmalloc(sizeof(ebc_begin), GFP_KERNEL);
+ if (!ebc_cmd) {
+ pr_err("The watchdog cannot be initialized\n");
+ return -ENOMEM;
+ }
+ memcpy(ebc_cmd, ebc_begin, sizeof(ebc_begin));
+ ret = __diag288_vm(WDT_FUNC_INIT, 15,
+ ebc_cmd, sizeof(ebc_begin));
+ kfree(ebc_cmd);
+ if (ret != 0) {
pr_err("The watchdog cannot be initialized\n");
return -EINVAL;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 9bd32daa9b9a..3bbf8703db2a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3826,6 +3826,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
lockend = round_up(start + len, inode->root->fs_info->sectorsize);
prev_extent_end = lockstart;
+ btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
@@ -4019,6 +4020,7 @@ check_eof_delalloc:
out_unlock:
unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
out:
free_extent_state(delalloc_cached_state);
btrfs_free_backref_share_ctx(backref_ctx);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 6a2cf754912d..ff4b1d583788 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1426,12 +1426,20 @@ static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bi
u32 bio_size = 0;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
+ int i;
bio_for_each_segment_all(bvec, bio, iter_all)
bio_size += bvec->bv_len;
- bitmap_set(rbio->error_bitmap, total_sector_nr,
- bio_size >> rbio->bioc->fs_info->sectorsize_bits);
+ /*
+ * Since we can have multiple bios touching the error_bitmap, we cannot
+ * call bitmap_set() without protection.
+ *
+ * Instead use set_bit() for each bit, as set_bit() itself is atomic.
+ */
+ for (i = total_sector_nr; i < total_sector_nr +
+ (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
+ set_bit(i, rbio->error_bitmap);
}
/* Verify the data sectors at read time. */
@@ -1886,7 +1894,7 @@ pstripe:
sector->uptodate = 1;
}
if (failb >= 0) {
- ret = verify_one_sector(rbio, faila, sector_nr);
+ ret = verify_one_sector(rbio, failb, sector_nr);
if (ret < 0)
goto cleanup;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index e65e6b6600a7..d50182b6deec 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -8073,10 +8073,10 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
/*
* Check that we don't overflow at later allocations, we request
* clone_sources_count + 1 items, and compare to unsigned long inside
- * access_ok.
+ * access_ok. Also set an upper limit for allocation size so this can't
+ * easily exhaust memory. Max number of clone sources is about 200K.
*/
- if (arg->clone_sources_count >
- ULONG_MAX / sizeof(struct clone_root) - 1) {
+ if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
ret = -EINVAL;
goto out;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d43261545264..58599189bd18 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3576,17 +3576,19 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
}
static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
- struct btrfs_root *log,
+ struct btrfs_inode *inode,
struct extent_buffer *src,
struct btrfs_path *dst_path,
int start_slot,
int count)
{
+ struct btrfs_root *log = inode->root->log_root;
char *ins_data = NULL;
struct btrfs_item_batch batch;
struct extent_buffer *dst;
unsigned long src_offset;
unsigned long dst_offset;
+ u64 last_index;
struct btrfs_key key;
u32 item_size;
int ret;
@@ -3644,6 +3646,19 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1);
copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size);
btrfs_release_path(dst_path);
+
+ last_index = batch.keys[count - 1].offset;
+ ASSERT(last_index > inode->last_dir_index_offset);
+
+ /*
+ * If for some unexpected reason the last item's index is not greater
+ * than the last index we logged, warn and return an error to fallback
+ * to a transaction commit.
+ */
+ if (WARN_ON(last_index <= inode->last_dir_index_offset))
+ ret = -EUCLEAN;
+ else
+ inode->last_dir_index_offset = last_index;
out:
kfree(ins_data);
@@ -3693,7 +3708,6 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
}
di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
- ctx->last_dir_item_offset = key.offset;
/*
* Skip ranges of items that consist only of dir item keys created
@@ -3756,7 +3770,7 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
if (batch_size > 0) {
int ret;
- ret = flush_dir_items_batch(trans, log, src, dst_path,
+ ret = flush_dir_items_batch(trans, inode, src, dst_path,
batch_start, batch_size);
if (ret < 0)
return ret;
@@ -4044,7 +4058,6 @@ static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
min_key = BTRFS_DIR_START_INDEX;
max_key = 0;
- ctx->last_dir_item_offset = inode->last_dir_index_offset;
while (1) {
ret = log_dir_items(trans, inode, path, dst_path,
@@ -4056,8 +4069,6 @@ static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
min_key = max_key + 1;
}
- inode->last_dir_index_offset = ctx->last_dir_item_offset;
-
return 0;
}
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 85b43075ac58..85cd24cb0540 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -24,8 +24,6 @@ struct btrfs_log_ctx {
bool logging_new_delayed_dentries;
/* Indicate if the inode being logged was logged before. */
bool logged_before;
- /* Tracks the last logged dir item/index key offset. */
- u64 last_dir_item_offset;
struct inode *inode;
struct list_head list;
/* Only used for fast fsyncs. */
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index bcfef75b97da..df43093b7a46 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -403,6 +403,7 @@ void btrfs_free_device(struct btrfs_device *device)
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device;
+
WARN_ON(fs_devices->opened);
while (!list_empty(&fs_devices->devices)) {
device = list_entry(fs_devices->devices.next,
@@ -1181,9 +1182,22 @@ void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
mutex_lock(&uuid_mutex);
close_fs_devices(fs_devices);
- if (!fs_devices->opened)
+ if (!fs_devices->opened) {
list_splice_init(&fs_devices->seed_list, &list);
+ /*
+ * If the struct btrfs_fs_devices is not assembled with any
+ * other device, it can be re-initialized during the next mount
+ * without the needing device-scan step. Therefore, it can be
+ * fully freed.
+ */
+ if (fs_devices->num_devices == 1) {
+ list_del(&fs_devices->fs_list);
+ free_fs_devices(fs_devices);
+ }
+ }
+
+
list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
close_fs_devices(fs_devices);
list_del(&fs_devices->seed_list);
@@ -1600,7 +1614,7 @@ again:
if (ret < 0)
goto out;
- while (1) {
+ while (search_start < search_end) {
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
@@ -1623,6 +1637,9 @@ again:
if (key.type != BTRFS_DEV_EXTENT_KEY)
goto next;
+ if (key.offset > search_end)
+ break;
+
if (key.offset > search_start) {
hole_size = key.offset - search_start;
dev_extent_hole_check(device, &search_start, &hole_size,
@@ -1683,6 +1700,7 @@ next:
else
ret = 0;
+ ASSERT(max_hole_start + max_hole_size <= search_end);
out:
btrfs_free_path(path);
*start = max_hole_start;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 01a13de11832..da7bb9187b68 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -63,7 +63,7 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
- workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
+ workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL);
workspace->level = level;
workspace->buf = NULL;
/*
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 8c74871e37c9..cac4083e387a 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -305,7 +305,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_osd_request *req;
+ struct ceph_osd_request *req = NULL;
struct ceph_vino vino = ceph_vino(inode);
struct iov_iter iter;
struct page **pages;
@@ -313,6 +313,11 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
int err = 0;
u64 len = subreq->len;
+ if (ceph_inode_is_shutdown(inode)) {
+ err = -EIO;
+ goto out;
+ }
+
if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
return;
@@ -563,6 +568,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
dout("writepage %p idx %lu\n", page, page->index);
+ if (ceph_inode_is_shutdown(inode))
+ return -EIO;
+
/* verify this is a writeable snap context */
snapc = page_snap_context(page);
if (!snapc) {
@@ -1643,7 +1651,7 @@ int ceph_uninline_data(struct file *file)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_request *req = NULL;
- struct ceph_cap_flush *prealloc_cf;
+ struct ceph_cap_flush *prealloc_cf = NULL;
struct folio *folio = NULL;
u64 inline_version = CEPH_INLINE_NONE;
struct page *pages[1];
@@ -1657,6 +1665,11 @@ int ceph_uninline_data(struct file *file)
dout("uninline_data %p %llx.%llx inline_version %llu\n",
inode, ceph_vinop(inode), inline_version);
+ if (ceph_inode_is_shutdown(inode)) {
+ err = -EIO;
+ goto out;
+ }
+
if (inline_version == CEPH_INLINE_NONE)
return 0;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f75ad432f375..210e40037881 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -4078,6 +4078,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
void *p, *end;
struct cap_extra_info extra_info = {};
bool queue_trunc;
+ bool close_sessions = false;
dout("handle_caps from mds%d\n", session->s_mds);
@@ -4215,9 +4216,13 @@ void ceph_handle_caps(struct ceph_mds_session *session,
realm = NULL;
if (snaptrace_len) {
down_write(&mdsc->snap_rwsem);
- ceph_update_snap_trace(mdsc, snaptrace,
- snaptrace + snaptrace_len,
- false, &realm);
+ if (ceph_update_snap_trace(mdsc, snaptrace,
+ snaptrace + snaptrace_len,
+ false, &realm)) {
+ up_write(&mdsc->snap_rwsem);
+ close_sessions = true;
+ goto done;
+ }
downgrade_write(&mdsc->snap_rwsem);
} else {
down_read(&mdsc->snap_rwsem);
@@ -4277,6 +4282,11 @@ done_unlocked:
iput(inode);
out:
ceph_put_string(extra_info.pool_ns);
+
+ /* Defer closing the sessions after s_mutex lock being released */
+ if (close_sessions)
+ ceph_mdsc_close_sessions(mdsc);
+
return;
flush_cap_releases:
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 764598e1efd9..b5cff85925a1 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -2011,6 +2011,9 @@ static int ceph_zero_partial_object(struct inode *inode,
loff_t zero = 0;
int op;
+ if (ceph_inode_is_shutdown(inode))
+ return -EIO;
+
if (!length) {
op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
length = &zero;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 26a0a8b9975e..27a245d959c0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -806,6 +806,9 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
{
struct ceph_mds_session *s;
+ if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
+ return ERR_PTR(-EIO);
+
if (mds >= mdsc->mdsmap->possible_max_rank)
return ERR_PTR(-EINVAL);
@@ -1478,6 +1481,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
int mstate;
int mds = session->s_mds;
+ if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
+ return -EIO;
+
/* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
dout("open_session to mds%d (%s)\n", mds,
@@ -2860,6 +2866,11 @@ static void __do_request(struct ceph_mds_client *mdsc,
return;
}
+ if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
+ dout("do_request metadata corrupted\n");
+ err = -EIO;
+ goto finish;
+ }
if (req->r_timeout &&
time_after_eq(jiffies, req->r_started + req->r_timeout)) {
dout("do_request timed out\n");
@@ -3245,6 +3256,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
u64 tid;
int err, result;
int mds = session->s_mds;
+ bool close_sessions = false;
if (msg->front.iov_len < sizeof(*head)) {
pr_err("mdsc_handle_reply got corrupt (short) reply\n");
@@ -3351,10 +3363,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
realm = NULL;
if (rinfo->snapblob_len) {
down_write(&mdsc->snap_rwsem);
- ceph_update_snap_trace(mdsc, rinfo->snapblob,
+ err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
rinfo->snapblob + rinfo->snapblob_len,
le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
&realm);
+ if (err) {
+ up_write(&mdsc->snap_rwsem);
+ close_sessions = true;
+ if (err == -EIO)
+ ceph_msg_dump(msg);
+ goto out_err;
+ }
downgrade_write(&mdsc->snap_rwsem);
} else {
down_read(&mdsc->snap_rwsem);
@@ -3412,6 +3431,10 @@ out_err:
req->r_end_latency, err);
out:
ceph_mdsc_put_request(req);
+
+ /* Defer closing the sessions after s_mutex lock being released */
+ if (close_sessions)
+ ceph_mdsc_close_sessions(mdsc);
return;
}
@@ -3662,6 +3685,12 @@ static void handle_session(struct ceph_mds_session *session,
break;
case CEPH_SESSION_FLUSHMSG:
+ /* flush cap releases */
+ spin_lock(&session->s_cap_lock);
+ if (session->s_num_cap_releases)
+ ceph_flush_cap_releases(mdsc, session);
+ spin_unlock(&session->s_cap_lock);
+
send_flushmsg_ack(mdsc, session, seq);
break;
@@ -5011,7 +5040,7 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
}
/*
- * called after sb is ro.
+ * called after sb is ro or when metadata corrupted.
*/
void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
{
@@ -5301,7 +5330,8 @@ static void mds_peer_reset(struct ceph_connection *con)
struct ceph_mds_client *mdsc = s->s_mdsc;
pr_warn("mds%d closed our session\n", s->s_mds);
- send_mds_reconnect(mdsc, s);
+ if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
+ send_mds_reconnect(mdsc, s);
}
static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e4151852184e..87007203f130 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
+#include <linux/fs.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/iversion.h>
@@ -766,8 +767,10 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm;
struct ceph_snap_realm *first_realm = NULL;
struct ceph_snap_realm *realm_to_rebuild = NULL;
+ struct ceph_client *client = mdsc->fsc->client;
int rebuild_snapcs;
int err = -ENOMEM;
+ int ret;
LIST_HEAD(dirty_realms);
lockdep_assert_held_write(&mdsc->snap_rwsem);
@@ -884,6 +887,27 @@ fail:
if (first_realm)
ceph_put_snap_realm(mdsc, first_realm);
pr_err("%s error %d\n", __func__, err);
+
+ /*
+ * When receiving a corrupted snap trace we don't know what
+ * exactly has happened in MDS side. And we shouldn't continue
+ * writing to OSD, which may corrupt the snapshot contents.
+ *
+ * Just try to blocklist this kclient and then this kclient
+ * must be remounted to continue after the corrupted metadata
+ * fixed in the MDS side.
+ */
+ WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO);
+ ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr);
+ if (ret)
+ pr_err("%s failed to blocklist %s: %d\n", __func__,
+ ceph_pr_addr(&client->msgr.inst.addr), ret);
+
+ WARN(1, "%s: %s%sdo remount to continue%s",
+ __func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
+ ret ? "" : " was blocklisted, ",
+ err == -EIO ? " after corrupted snaptrace is fixed" : "");
+
return err;
}
@@ -984,6 +1008,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
__le64 *split_inos = NULL, *split_realms = NULL;
int i;
int locked_rwsem = 0;
+ bool close_sessions = false;
/* decode */
if (msg->front.iov_len < sizeof(*h))
@@ -1092,8 +1117,12 @@ skip_inode:
* update using the provided snap trace. if we are deleting a
* snap, we can avoid queueing cap_snaps.
*/
- ceph_update_snap_trace(mdsc, p, e,
- op == CEPH_SNAP_OP_DESTROY, NULL);
+ if (ceph_update_snap_trace(mdsc, p, e,
+ op == CEPH_SNAP_OP_DESTROY,
+ NULL)) {
+ close_sessions = true;
+ goto bad;
+ }
if (op == CEPH_SNAP_OP_SPLIT)
/* we took a reference when we created the realm, above */
@@ -1112,6 +1141,9 @@ bad:
out:
if (locked_rwsem)
up_write(&mdsc->snap_rwsem);
+
+ if (close_sessions)
+ ceph_mdsc_close_sessions(mdsc);
return;
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 0ed3be75bb9a..07c6906cda70 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -100,6 +100,17 @@ struct ceph_mount_options {
char *mon_addr;
};
+/* mount state */
+enum {
+ CEPH_MOUNT_MOUNTING,
+ CEPH_MOUNT_MOUNTED,
+ CEPH_MOUNT_UNMOUNTING,
+ CEPH_MOUNT_UNMOUNTED,
+ CEPH_MOUNT_SHUTDOWN,
+ CEPH_MOUNT_RECOVER,
+ CEPH_MOUNT_FENCE_IO,
+};
+
#define CEPH_ASYNC_CREATE_CONFLICT_BITS 8
struct ceph_fs_client {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 22dfc1f8b4f1..b8d1cbadb689 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3889,7 +3889,7 @@ uncached_fill_pages(struct TCP_Server_Info *server,
rdata->got_bytes += result;
}
- return rdata->got_bytes > 0 && result != -ECONNABORTED ?
+ return result != -ECONNABORTED && rdata->got_bytes > 0 ?
rdata->got_bytes : result;
}
@@ -4665,7 +4665,7 @@ readpages_fill_pages(struct TCP_Server_Info *server,
rdata->got_bytes += result;
}
- return rdata->got_bytes > 0 && result != -ECONNABORTED ?
+ return result != -ECONNABORTED && rdata->got_bytes > 0 ?
rdata->got_bytes : result;
}
diff --git a/fs/coredump.c b/fs/coredump.c
index de78bde2991b..a25ecec9ca7c 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -838,6 +838,30 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
}
}
+int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
+{
+ if (cprm->to_skip) {
+ if (!__dump_skip(cprm, cprm->to_skip))
+ return 0;
+ cprm->to_skip = 0;
+ }
+ return __dump_emit(cprm, addr, nr);
+}
+EXPORT_SYMBOL(dump_emit);
+
+void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
+{
+ cprm->to_skip = pos - cprm->pos;
+}
+EXPORT_SYMBOL(dump_skip_to);
+
+void dump_skip(struct coredump_params *cprm, size_t nr)
+{
+ cprm->to_skip += nr;
+}
+EXPORT_SYMBOL(dump_skip);
+
+#ifdef CONFIG_ELF_CORE
static int dump_emit_page(struct coredump_params *cprm, struct page *page)
{
struct bio_vec bvec = {
@@ -871,30 +895,6 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
return 1;
}
-int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
-{
- if (cprm->to_skip) {
- if (!__dump_skip(cprm, cprm->to_skip))
- return 0;
- cprm->to_skip = 0;
- }
- return __dump_emit(cprm, addr, nr);
-}
-EXPORT_SYMBOL(dump_emit);
-
-void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
-{
- cprm->to_skip = pos - cprm->pos;
-}
-EXPORT_SYMBOL(dump_skip_to);
-
-void dump_skip(struct coredump_params *cprm, size_t nr)
-{
- cprm->to_skip += nr;
-}
-EXPORT_SYMBOL(dump_skip);
-
-#ifdef CONFIG_ELF_CORE
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len)
{
diff --git a/fs/freevxfs/Kconfig b/fs/freevxfs/Kconfig
index c05c71d57291..0e2fc08f7de4 100644
--- a/fs/freevxfs/Kconfig
+++ b/fs/freevxfs/Kconfig
@@ -8,7 +8,7 @@ config VXFS_FS
of SCO UnixWare (and possibly others) and optionally available
for Sunsoft Solaris, HP-UX and many other operating systems. However
these particular OS implementations of vxfs may differ in on-disk
- data endianess and/or superblock offset. The vxfs module has been
+ data endianness and/or superblock offset. The vxfs module has been
tested with SCO UnixWare and HP-UX B.10.20 (pa-risc 1.1 arch.)
Currently only readonly access is supported and VxFX versions
2, 3 and 4. Tests were performed with HP-UX VxFS version 3.
diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c
index ab8ceddf9efa..cdf991bdd9de 100644
--- a/fs/fscache/volume.c
+++ b/fs/fscache/volume.c
@@ -141,13 +141,14 @@ static bool fscache_is_acquire_pending(struct fscache_volume *volume)
static void fscache_wait_on_volume_collision(struct fscache_volume *candidate,
unsigned int collidee_debug_id)
{
- wait_var_event_timeout(&candidate->flags,
- !fscache_is_acquire_pending(candidate), 20 * HZ);
+ wait_on_bit_timeout(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
+ TASK_UNINTERRUPTIBLE, 20 * HZ);
if (fscache_is_acquire_pending(candidate)) {
pr_notice("Potential volume collision new=%08x old=%08x",
candidate->debug_id, collidee_debug_id);
fscache_stat(&fscache_n_volumes_collision);
- wait_var_event(&candidate->flags, !fscache_is_acquire_pending(candidate));
+ wait_on_bit(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
+ TASK_UNINTERRUPTIBLE);
}
}
@@ -279,8 +280,7 @@ static void fscache_create_volume_work(struct work_struct *work)
fscache_end_cache_access(volume->cache,
fscache_access_acquire_volume_end);
- clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
- wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
+ clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
fscache_put_volume(volume, fscache_volume_put_create_work);
}
@@ -347,8 +347,8 @@ static void fscache_wake_pending_volume(struct fscache_volume *volume,
hlist_bl_for_each_entry(cursor, p, h, hash_link) {
if (fscache_volume_same(cursor, volume)) {
fscache_see_volume(cursor, fscache_volume_see_hash_wake);
- clear_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &cursor->flags);
- wake_up_bit(&cursor->flags, FSCACHE_VOLUME_ACQUIRE_PENDING);
+ clear_and_wake_up_bit(FSCACHE_VOLUME_ACQUIRE_PENDING,
+ &cursor->flags);
return;
}
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e35a0398db63..af1c49ae11b1 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -745,9 +745,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
page = pfn_swap_entry_to_page(swpent);
}
if (page) {
- int mapcount = page_mapcount(page);
-
- if (mapcount >= 2)
+ if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
else
mss->private_hugetlb += huge_page_size(hstate_vma(vma));
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index b3fdc8212c5f..95f8e8901768 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -183,7 +183,7 @@ static inline int squashfs_block_size(__le32 raw)
#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\
sizeof(u64))
/* xattr id lookup table defines */
-#define SQUASHFS_XATTR_BYTES(A) ((A) * sizeof(struct squashfs_xattr_id))
+#define SQUASHFS_XATTR_BYTES(A) (((u64) (A)) * sizeof(struct squashfs_xattr_id))
#define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \
SQUASHFS_METADATA_SIZE)
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 659082e9e51d..72f6f4b37863 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -63,7 +63,7 @@ struct squashfs_sb_info {
long long bytes_used;
unsigned int inodes;
unsigned int fragments;
- int xattr_ids;
+ unsigned int xattr_ids;
unsigned int ids;
bool panic_on_errors;
const struct squashfs_decompressor_thread_ops *thread_ops;
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
index d8a270d3ac4c..f1a463d8bfa0 100644
--- a/fs/squashfs/xattr.h
+++ b/fs/squashfs/xattr.h
@@ -10,12 +10,12 @@
#ifdef CONFIG_SQUASHFS_XATTR
extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64,
- u64 *, int *);
+ u64 *, unsigned int *);
extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
unsigned int *, unsigned long long *);
#else
static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
- u64 start, u64 *xattr_table_start, int *xattr_ids)
+ u64 start, u64 *xattr_table_start, unsigned int *xattr_ids)
{
struct squashfs_xattr_id_table *id_table;
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
index 087cab8c78f4..b88d19e9581e 100644
--- a/fs/squashfs/xattr_id.c
+++ b/fs/squashfs/xattr_id.c
@@ -56,7 +56,7 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
* Read uncompressed xattr id lookup table indexes from disk into memory
*/
__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
- u64 *xattr_table_start, int *xattr_ids)
+ u64 *xattr_table_start, unsigned int *xattr_ids)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
unsigned int len, indexes;
@@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
/* Sanity check values */
/* there is always at least one xattr id */
- if (*xattr_ids == 0)
+ if (*xattr_ids <= 0)
return ERR_PTR(-EINVAL);
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index c5614444031f..6b487a5bd638 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -108,12 +108,14 @@ struct cppc_perf_caps {
u32 lowest_nonlinear_perf;
u32 lowest_freq;
u32 nominal_freq;
+ u32 energy_perf;
};
struct cppc_perf_ctrls {
u32 max_perf;
u32 min_perf;
u32 desired_perf;
+ u32 energy_perf;
};
struct cppc_perf_fb_ctrs {
@@ -149,6 +151,8 @@ extern bool cpc_ffh_supported(void);
extern bool cpc_supported_by_cpu(void);
extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
+extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf);
+extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
#else /* !CONFIG_ACPI_CPPC_LIB */
static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
@@ -202,6 +206,14 @@ static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{
return -ENOTSUPP;
}
+static inline int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
+{
+ return -ENOTSUPP;
+}
+static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
+{
+ return -ENOTSUPP;
+}
#endif /* !CONFIG_ACPI_CPPC_LIB */
#endif /* _CPPC_ACPI_H*/
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 4fc8018eddda..1220d185c776 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -127,11 +127,6 @@ struct drm_client_buffer {
struct drm_client_dev *client;
/**
- * @handle: Buffer handle
- */
- u32 handle;
-
- /**
* @pitch: Buffer pitch
*/
u32 pitch;
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 87ea90576b50..08d3559dd703 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -303,7 +303,6 @@ static inline int kunit_run_all_tests(void)
*/
#define kunit_test_init_section_suites(__suites...) \
__kunit_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
- CONCATENATE(__UNIQUE_ID(suites), _probe), \
##__suites)
#define kunit_test_init_section_suite(suite) \
@@ -683,8 +682,9 @@ do { \
.right_text = #right, \
}; \
\
- if (likely(memcmp(__left, __right, __size) op 0)) \
- break; \
+ if (likely(__left && __right)) \
+ if (likely(memcmp(__left, __right, __size) op 0)) \
+ break; \
\
_KUNIT_FAILED(test, \
assert_type, \
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 9270cd87da3f..6470f67e63c4 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -263,7 +263,7 @@ struct vgic_dist {
struct vgic_io_device dist_iodev;
bool has_its;
- bool save_its_tables_in_progress;
+ bool table_write_in_progress;
/*
* Contains the attributes and gpa of the LPI configuration table.
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
index 1c4b8659f171..f5f22418e64b 100644
--- a/include/linux/amd-pstate.h
+++ b/include/linux/amd-pstate.h
@@ -12,6 +12,11 @@
#include <linux/pm_qos.h>
+#define AMD_CPPC_EPP_PERFORMANCE 0x00
+#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
+#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
+#define AMD_CPPC_EPP_POWERSAVE 0xFF
+
/*********************************************************************
* AMD P-state INTERFACE *
*********************************************************************/
@@ -47,6 +52,10 @@ struct amd_aperf_mperf {
* @prev: Last Aperf/Mperf/tsc count value read from register
* @freq: current cpu frequency value
* @boost_supported: check whether the Processor or SBIOS supports boost mode
+ * @epp_policy: Last saved policy used to set energy-performance preference
+ * @epp_cached: Cached CPPC energy-performance preference value
+ * @policy: Cpufreq policy value
+ * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
*
* The amd_cpudata is key private data for each CPU thread in AMD P-State, and
* represents all the attributes and goals that AMD P-State requests at runtime.
@@ -72,6 +81,29 @@ struct amd_cpudata {
u64 freq;
bool boost_supported;
+
+ /* EPP feature related attributes*/
+ s16 epp_policy;
+ s16 epp_cached;
+ u32 policy;
+ u64 cppc_cap1_cached;
+ bool suspended;
};
+/*
+ * enum amd_pstate_mode - driver working mode of amd pstate
+ */
+enum amd_pstate_mode {
+ AMD_PSTATE_DISABLE = 0,
+ AMD_PSTATE_PASSIVE,
+ AMD_PSTATE_ACTIVE,
+ AMD_PSTATE_MAX,
+};
+
+static const char * const amd_pstate_mode_string[] = {
+ [AMD_PSTATE_DISABLE] = "disable",
+ [AMD_PSTATE_PASSIVE] = "passive",
+ [AMD_PSTATE_ACTIVE] = "active",
+ NULL,
+};
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 00af2c98da75..4497d0a6772c 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -99,16 +99,6 @@ struct ceph_options {
#define CEPH_AUTH_NAME_DEFAULT "guest"
-/* mount state */
-enum {
- CEPH_MOUNT_MOUNTING,
- CEPH_MOUNT_MOUNTED,
- CEPH_MOUNT_UNMOUNTING,
- CEPH_MOUNT_UNMOUNTED,
- CEPH_MOUNT_SHUTDOWN,
- CEPH_MOUNT_RECOVER,
-};
-
static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
{
return timeout ?: MAX_SCHEDULE_TIMEOUT;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 6a94a6eaad27..65623233ab2f 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -448,7 +448,7 @@ struct cpufreq_driver {
#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
-int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+void cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
bool cpufreq_driver_test_flags(u16 flags);
const char *cpufreq_get_current_driver(void);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 4b27519143f5..98598bd1d2fa 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -668,7 +668,8 @@ extern struct efi {
#define EFI_RT_SUPPORTED_ALL 0x3fff
-#define EFI_RT_SUPPORTED_TIME_SERVICES 0x000f
+#define EFI_RT_SUPPORTED_TIME_SERVICES 0x0003
+#define EFI_RT_SUPPORTED_WAKEUP_SERVICES 0x000c
#define EFI_RT_SUPPORTED_VARIABLE_SERVICES 0x0070
extern struct mm_struct efi_mm;
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 034b1106d022..e098f38422af 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -200,7 +200,7 @@ static inline void *kmap_local_pfn(unsigned long pfn)
static inline void __kunmap_local(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
- kunmap_flush_on_unmap(addr);
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif
}
@@ -227,7 +227,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn)
static inline void __kunmap_atomic(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
- kunmap_flush_on_unmap(addr);
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif
pagefault_enable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 551834cd5299..db194e2ba69f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
+#include <linux/page_ref.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/pgtable.h>
@@ -1187,6 +1188,18 @@ static inline __init void hugetlb_cma_reserve(int order)
}
#endif
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return page_count(virt_to_page(pte)) > 1;
+}
+#else
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return false;
+}
+#endif
+
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d3c8203cab6c..85dc9b88ea37 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1666,10 +1666,13 @@ void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
+ struct mem_cgroup *memcg;
+
if (mem_cgroup_disabled())
return;
- if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
+ memcg = folio_memcg(folio);
+ if (unlikely(memcg && &memcg->css != wb->memcg_css))
mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
}
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 76ef2e4fde38..333c1fec72f8 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -573,6 +573,14 @@ struct mlx5_debugfs_entries {
struct dentry *lag_debugfs;
};
+enum mlx5_func_type {
+ MLX5_PF,
+ MLX5_VF,
+ MLX5_SF,
+ MLX5_HOST_PF,
+ MLX5_FUNC_TYPE_NUM,
+};
+
struct mlx5_ft_pool;
struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */
@@ -583,11 +591,10 @@ struct mlx5_priv {
struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
struct xarray page_root_xa;
- u32 fw_pages;
atomic_t reg_pages;
struct list_head free_list;
- u32 vfs_pages;
- u32 host_pf_pages;
+ u32 fw_pages;
+ u32 page_counters[MLX5_FUNC_TYPE_NUM];
u32 fw_pages_alloc_failed;
u32 give_pages_dropped;
u32 reclaim_pages_discard;
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 50caa117cb62..bb15c9234e21 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -70,7 +70,6 @@ struct nvmem_keepout {
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
* @priv: User context passed to read/write callbacks.
- * @wp-gpio: Write protect pin
* @ignore_wp: Write Protect pin is managed by the provider.
*
* Note: A default "nvmem<id>" name will be assigned to the device if
@@ -85,7 +84,6 @@ struct nvmem_config {
const char *name;
int id;
struct module *owner;
- struct gpio_desc *wp_gpio;
const struct nvmem_cell_info *cells;
int ncells;
const struct nvmem_keepout *keepout;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 93cd34f00822..035d9649eba4 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -379,9 +379,13 @@ const struct dev_pm_ops name = { \
const struct dev_pm_ops name; \
__EXPORT_SYMBOL(name, sec, ns); \
const struct dev_pm_ops name
+#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
+#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
#else
#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
static __maybe_unused const struct dev_pm_ops __static_##name
+#define EXPORT_PM_FN_GPL(name)
+#define EXPORT_PM_FN_NS_GPL(name, ns)
#endif
#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 1341f7d62da4..be48f1cb1878 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -476,6 +476,15 @@ extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
+#define atomic_dec_and_raw_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
+
+extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
+ unsigned long *flags);
+#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
+ __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
+
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp, const char *name,
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 83ca2e8eb6b5..a152678b82b7 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -252,6 +252,7 @@ struct plat_stmmacenet_data {
int rss_en;
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
+ bool rx_clk_runs_in_lpi;
int has_xgmac;
bool vlan_fail_q_en;
u8 vlan_fail_q;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2787b84eaf12..0ceed49516ad 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -418,8 +418,7 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
- unsigned int reclaim_options,
- nodemask_t *nodemask);
+ unsigned int reclaim_options);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 4342e996bcdb..0e373222a6df 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -270,6 +270,7 @@ struct trace_event_fields {
const int align;
const int is_signed;
const int filter_type;
+ const int len;
};
int (*define_fields)(struct trace_event_call *);
};
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
index 72299f261b25..43db6e47503c 100644
--- a/include/linux/util_macros.h
+++ b/include/linux/util_macros.h
@@ -38,4 +38,16 @@
*/
#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
+/**
+ * is_insidevar - check if the @ptr points inside the @var memory range.
+ * @ptr: the pointer to a memory address.
+ * @var: the variable which address and size identify the memory range.
+ *
+ * Evaluates to true if the address in @ptr lies within the memory
+ * range allocated to @var.
+ */
+#define is_insidevar(ptr, var) \
+ ((uintptr_t)(ptr) >= (uintptr_t)(var) && \
+ (uintptr_t)(ptr) < (uintptr_t)(var) + sizeof(var))
+
#endif
diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h
index affd541fd25e..b6f679ae21aa 100644
--- a/include/trace/stages/stage4_event_fields.h
+++ b/include/trace/stages/stage4_event_fields.h
@@ -26,7 +26,8 @@
#define __array(_type, _item, _len) { \
.type = #_type"["__stringify(_len)"]", .name = #_item, \
.size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type), \
- .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER,\
+ .len = _len },
#undef __dynamic_array
#define __dynamic_array(_type, _item, _len) { \
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 0512fde5e697..7b158fcb02b4 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -64,6 +64,7 @@ struct drm_virtgpu_map {
__u32 pad;
};
+/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
__u32 size;
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 874a92349bf5..283dec7e3645 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -18,6 +18,7 @@
#ifndef _UAPI_LINUX_IP_H
#define _UAPI_LINUX_IP_H
#include <linux/types.h>
+#include <linux/stddef.h>
#include <asm/byteorder.h>
#define IPTOS_TOS_MASK 0x1E
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 81f4243bebb1..53326dfc59ec 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -4,6 +4,7 @@
#include <linux/libc-compat.h>
#include <linux/types.h>
+#include <linux/stddef.h>
#include <linux/in6.h>
#include <asm/byteorder.h>
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index a4a41ee3e80b..e14c822f8911 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -51,7 +51,6 @@ BTF_SET_END(bpf_lsm_current_hooks)
*/
BTF_SET_START(bpf_lsm_locked_sockopt_hooks)
#ifdef CONFIG_SECURITY_NETWORK
-BTF_ID(func, bpf_lsm_socket_sock_rcv_skb)
BTF_ID(func, bpf_lsm_sock_graft)
BTF_ID(func, bpf_lsm_inet_csk_clone)
BTF_ID(func, bpf_lsm_inet_conn_established)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index f7dd8af06413..b7017cae6fd1 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -7782,9 +7782,9 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c
sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
- return 0;
end:
- btf_free_dtor_kfunc_tab(btf);
+ if (ret)
+ btf_free_dtor_kfunc_tab(btf);
btf_put(btf);
return ret;
}
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index ebcc3dd0fa19..1db156405b68 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -71,7 +71,7 @@ static int bpf_mem_cache_idx(size_t size)
if (size <= 192)
return size_index[(size - 1) / 8] - 1;
- return fls(size - 1) - 1;
+ return fls(size - 1) - 2;
}
#define NUM_CACHES 11
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index dbef0b0967ae..7ee218827259 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3243,13 +3243,24 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
return reg->type != SCALAR_VALUE;
}
+/* Copy src state preserving dst->parent and dst->live fields */
+static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
+{
+ struct bpf_reg_state *parent = dst->parent;
+ enum bpf_reg_liveness live = dst->live;
+
+ *dst = *src;
+ dst->parent = parent;
+ dst->live = live;
+}
+
static void save_register_state(struct bpf_func_state *state,
int spi, struct bpf_reg_state *reg,
int size)
{
int i;
- state->stack[spi].spilled_ptr = *reg;
+ copy_register_state(&state->stack[spi].spilled_ptr, reg);
if (size == BPF_REG_SIZE)
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
@@ -3577,7 +3588,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
*/
s32 subreg_def = state->regs[dst_regno].subreg_def;
- state->regs[dst_regno] = *reg;
+ copy_register_state(&state->regs[dst_regno], reg);
state->regs[dst_regno].subreg_def = subreg_def;
} else {
for (i = 0; i < size; i++) {
@@ -3598,7 +3609,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
if (dst_regno >= 0) {
/* restore register state from stack */
- state->regs[dst_regno] = *reg;
+ copy_register_state(&state->regs[dst_regno], reg);
/* mark reg as written since spilled pointer state likely
* has its liveness marks cleared by is_state_visited()
* which resets stack/reg liveness for state transitions
@@ -9592,7 +9603,7 @@ do_sim:
*/
if (!ptr_is_dst_reg) {
tmp = *dst_reg;
- *dst_reg = *ptr_reg;
+ copy_register_state(dst_reg, ptr_reg);
}
ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
env->insn_idx);
@@ -10845,7 +10856,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
* to propagate min/max range.
*/
src_reg->id = ++env->id_gen;
- *dst_reg = *src_reg;
+ copy_register_state(dst_reg, src_reg);
dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = DEF_NOT_SUBREG;
} else {
@@ -10856,7 +10867,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
insn->src_reg);
return -EACCES;
} else if (src_reg->type == SCALAR_VALUE) {
- *dst_reg = *src_reg;
+ copy_register_state(dst_reg, src_reg);
/* Make sure ID is cleared otherwise
* dst_reg min/max could be incorrectly
* propagated into src_reg by find_equal_scalars()
@@ -11655,7 +11666,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
bpf_for_each_reg_in_vstate(vstate, state, reg, ({
if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
- *reg = *known_reg;
+ copy_register_state(reg, known_reg);
}));
}
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index a29c0b13706b..ca826bd1eba3 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1205,12 +1205,13 @@ void rebuild_sched_domains(void)
/**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
+ * @new_cpus: the temp variable for the new effective_cpus mask
*
* Iterate through each task of @cs updating its cpus_allowed to the
* effective cpuset's. As this function is called with cpuset_rwsem held,
* cpuset membership stays stable.
*/
-static void update_tasks_cpumask(struct cpuset *cs)
+static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
{
struct css_task_iter it;
struct task_struct *task;
@@ -1224,7 +1225,10 @@ static void update_tasks_cpumask(struct cpuset *cs)
if (top_cs && (task->flags & PF_KTHREAD) &&
kthread_is_per_cpu(task))
continue;
- set_cpus_allowed_ptr(task, cs->effective_cpus);
+
+ cpumask_and(new_cpus, cs->effective_cpus,
+ task_cpu_possible_mask(task));
+ set_cpus_allowed_ptr(task, new_cpus);
}
css_task_iter_end(&it);
}
@@ -1346,7 +1350,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
* A parent can be left with no CPU as long as there is no
* task directly associated with the parent partition.
*/
- if (!cpumask_intersects(cs->cpus_allowed, parent->effective_cpus) &&
+ if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) &&
partition_is_populated(parent, cs))
return PERR_NOCPUS;
@@ -1509,7 +1513,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
spin_unlock_irq(&callback_lock);
if (adding || deleting)
- update_tasks_cpumask(parent);
+ update_tasks_cpumask(parent, tmp->new_cpus);
/*
* Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
@@ -1661,7 +1665,7 @@ update_parent_subparts:
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
- update_tasks_cpumask(cp);
+ update_tasks_cpumask(cp, tmp->new_cpus);
/*
* On legacy hierarchy, if the effective cpumask of any non-
@@ -2309,7 +2313,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
}
}
- update_tasks_cpumask(parent);
+ update_tasks_cpumask(parent, tmpmask.new_cpus);
if (parent->child_ecpus_count)
update_sibling_cpumasks(parent, cs, &tmpmask);
@@ -2324,6 +2328,7 @@ out:
new_prs = -new_prs;
spin_lock_irq(&callback_lock);
cs->partition_root_state = new_prs;
+ WRITE_ONCE(cs->prs_err, err);
spin_unlock_irq(&callback_lock);
/*
* Update child cpusets, if present.
@@ -3347,7 +3352,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
* as the tasks will be migrated to an ancestor.
*/
if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
- update_tasks_cpumask(cs);
+ update_tasks_cpumask(cs, new_cpus);
if (mems_updated && !nodes_empty(cs->mems_allowed))
update_tasks_nodemask(cs);
@@ -3384,7 +3389,7 @@ hotplug_update_tasks(struct cpuset *cs,
spin_unlock_irq(&callback_lock);
if (cpus_updated)
- update_tasks_cpumask(cs);
+ update_tasks_cpumask(cs, new_cpus);
if (mems_updated)
update_tasks_nodemask(cs);
}
@@ -3691,15 +3696,38 @@ void __init cpuset_init_smp(void)
* Description: Returns the cpumask_var_t cpus_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
* subset of cpu_online_mask, even if this means going outside the
- * tasks cpuset.
+ * tasks cpuset, except when the task is in the top cpuset.
**/
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
unsigned long flags;
+ struct cpuset *cs;
spin_lock_irqsave(&callback_lock, flags);
- guarantee_online_cpus(tsk, pmask);
+ rcu_read_lock();
+
+ cs = task_cs(tsk);
+ if (cs != &top_cpuset)
+ guarantee_online_cpus(tsk, pmask);
+ /*
+ * Tasks in the top cpuset won't get update to their cpumasks
+ * when a hotplug online/offline event happens. So we include all
+ * offline cpus in the allowed cpu list.
+ */
+ if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
+ const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
+
+ /*
+ * We first exclude cpus allocated to partitions. If there is no
+ * allowable online cpu left, we fall back to all possible cpus.
+ */
+ cpumask_andnot(pmask, possible_mask, top_cpuset.subparts_cpus);
+ if (!cpumask_intersects(pmask, cpu_online_mask))
+ cpumask_copy(pmask, possible_mask);
+ }
+
+ rcu_read_unlock();
spin_unlock_irqrestore(&callback_lock, flags);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d56328e5080e..c4be13e50547 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4813,19 +4813,17 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
epc = &cpc->epc;
-
+ raw_spin_lock_irq(&ctx->lock);
if (!epc->ctx) {
atomic_set(&epc->refcount, 1);
epc->embedded = 1;
- raw_spin_lock_irq(&ctx->lock);
list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
epc->ctx = ctx;
- raw_spin_unlock_irq(&ctx->lock);
} else {
WARN_ON_ONCE(epc->ctx != ctx);
atomic_inc(&epc->refcount);
}
-
+ raw_spin_unlock_irq(&ctx->lock);
return epc;
}
@@ -4896,33 +4894,30 @@ static void free_epc_rcu(struct rcu_head *head)
static void put_pmu_ctx(struct perf_event_pmu_context *epc)
{
+ struct perf_event_context *ctx = epc->ctx;
unsigned long flags;
- if (!atomic_dec_and_test(&epc->refcount))
+ /*
+ * XXX
+ *
+ * lockdep_assert_held(&ctx->mutex);
+ *
+ * can't because of the call-site in _free_event()/put_event()
+ * which isn't always called under ctx->mutex.
+ */
+ if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
return;
- if (epc->ctx) {
- struct perf_event_context *ctx = epc->ctx;
+ WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
- /*
- * XXX
- *
- * lockdep_assert_held(&ctx->mutex);
- *
- * can't because of the call-site in _free_event()/put_event()
- * which isn't always called under ctx->mutex.
- */
-
- WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
- raw_spin_lock_irqsave(&ctx->lock, flags);
- list_del_init(&epc->pmu_ctx_entry);
- epc->ctx = NULL;
- raw_spin_unlock_irqrestore(&ctx->lock, flags);
- }
+ list_del_init(&epc->pmu_ctx_entry);
+ epc->ctx = NULL;
WARN_ON_ONCE(!list_empty(&epc->pinned_active));
WARN_ON_ONCE(!list_empty(&epc->flexible_active));
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
+
if (epc->embedded)
return;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5c3fb6168eef..798a9042421f 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1915,7 +1915,7 @@ static void debugfs_add_domain_dir(struct irq_domain *d)
static void debugfs_remove_domain_dir(struct irq_domain *d)
{
- debugfs_remove(debugfs_lookup(d->name, domain_dir));
+ debugfs_lookup_and_remove(d->name, domain_dir);
}
void __init irq_domain_debugfs_init(struct dentry *root)
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 010cf4e6d0b8..728f434de2bb 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -901,8 +901,9 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
* then we need to wake the new top waiter up to try
* to get the lock.
*/
- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
- wake_up_state(waiter->task, waiter->wake_state);
+ top_waiter = rt_mutex_top_waiter(lock);
+ if (prerequeue_top_waiter != top_waiter)
+ wake_up_state(top_waiter->task, top_waiter->wake_state);
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 60a1d3051cc7..4b31629c5be4 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -118,7 +118,6 @@ config PM_SLEEP
def_bool y
depends on SUSPEND || HIBERNATE_CALLBACKS
select PM
- select SRCU
config PM_SLEEP_SMP
def_bool y
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index f82111837b8d..7b44f5b89fa1 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -87,10 +87,7 @@ static void em_debug_create_pd(struct device *dev)
static void em_debug_remove_pd(struct device *dev)
{
- struct dentry *debug_dir;
-
- debug_dir = debugfs_lookup(dev_name(dev), rootdir);
- debugfs_remove_recursive(debug_dir);
+ debugfs_lookup_and_remove(dev_name(dev), rootdir);
}
static int __init em_debug_init(void)
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 277434b6c0bf..36a1df48280c 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -581,7 +581,7 @@ static int save_image(struct swap_map_handle *handle,
return ret;
}
-/**
+/*
* Structure used for CRC32.
*/
struct crc_data {
@@ -596,7 +596,7 @@ struct crc_data {
unsigned char *unc[LZO_THREADS]; /* uncompressed data */
};
-/**
+/*
* CRC32 update function that runs in its own thread.
*/
static int crc32_threadfn(void *data)
@@ -623,7 +623,7 @@ static int crc32_threadfn(void *data)
}
return 0;
}
-/**
+/*
* Structure used for LZO data compression.
*/
struct cmp_data {
@@ -640,7 +640,7 @@ struct cmp_data {
unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
};
-/**
+/*
* Compression function that runs in its own thread.
*/
static int lzo_compress_threadfn(void *data)
@@ -948,9 +948,9 @@ out_finish:
return error;
}
-/**
+/*
* The following functions allow us to read data using a swap map
- * in a file-alike way
+ * in a file-like way.
*/
static void release_swap_reader(struct swap_map_handle *handle)
@@ -1107,7 +1107,7 @@ static int load_image(struct swap_map_handle *handle,
return ret;
}
-/**
+/*
* Structure used for LZO data decompression.
*/
struct dec_data {
@@ -1123,7 +1123,7 @@ struct dec_data {
unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
};
-/**
+/*
* Decompression function that runs in its own thread.
*/
static int lzo_decompress_threadfn(void *data)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index f47274de012b..c09792c551bf 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -833,6 +833,7 @@ static void do_bpf_send_signal(struct irq_work *entry)
work = container_of(entry, struct send_signal_irq_work, irq_work);
group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
+ put_task_struct(work->task);
}
static int bpf_send_signal_common(u32 sig, enum pid_type type)
@@ -867,7 +868,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
* to the irq_work. The current task may change when queued
* irq works get executed.
*/
- work->task = current;
+ work->task = get_task_struct(current);
work->sig = sig;
work->type = type;
irq_work_queue(&work->irq_work);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 78ed5f1baa8c..c9e40f692650 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9148,9 +9148,6 @@ buffer_percent_write(struct file *filp, const char __user *ubuf,
if (val > 100)
return -EINVAL;
- if (!val)
- val = 1;
-
tr->buffer_percent = val;
(*ppos)++;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4eb6d6b97a9f..085a31b978a5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1282,6 +1282,7 @@ struct ftrace_event_field {
int offset;
int size;
int is_signed;
+ int len;
};
struct prog_entry;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 33e0b4f8ebe6..6a4696719297 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -114,7 +114,7 @@ trace_find_event_field(struct trace_event_call *call, char *name)
static int __trace_define_field(struct list_head *head, const char *type,
const char *name, int offset, int size,
- int is_signed, int filter_type)
+ int is_signed, int filter_type, int len)
{
struct ftrace_event_field *field;
@@ -133,6 +133,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
field->offset = offset;
field->size = size;
field->is_signed = is_signed;
+ field->len = len;
list_add(&field->link, head);
@@ -150,14 +151,28 @@ int trace_define_field(struct trace_event_call *call, const char *type,
head = trace_get_fields(call);
return __trace_define_field(head, type, name, offset, size,
- is_signed, filter_type);
+ is_signed, filter_type, 0);
}
EXPORT_SYMBOL_GPL(trace_define_field);
+int trace_define_field_ext(struct trace_event_call *call, const char *type,
+ const char *name, int offset, int size, int is_signed,
+ int filter_type, int len)
+{
+ struct list_head *head;
+
+ if (WARN_ON(!call->class))
+ return 0;
+
+ head = trace_get_fields(call);
+ return __trace_define_field(head, type, name, offset, size,
+ is_signed, filter_type, len);
+}
+
#define __generic_field(type, item, filter_type) \
ret = __trace_define_field(&ftrace_generic_fields, #type, \
#item, 0, 0, is_signed_type(type), \
- filter_type); \
+ filter_type, 0); \
if (ret) \
return ret;
@@ -166,7 +181,7 @@ EXPORT_SYMBOL_GPL(trace_define_field);
"common_" #item, \
offsetof(typeof(ent), item), \
sizeof(ent.item), \
- is_signed_type(type), FILTER_OTHER); \
+ is_signed_type(type), FILTER_OTHER, 0); \
if (ret) \
return ret;
@@ -1588,12 +1603,17 @@ static int f_show(struct seq_file *m, void *v)
seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
field->type, field->name, field->offset,
field->size, !!field->is_signed);
- else
- seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+ else if (field->len)
+ seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
(int)(array_descriptor - field->type),
field->type, field->name,
- array_descriptor, field->offset,
+ field->len, field->offset,
field->size, !!field->is_signed);
+ else
+ seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+ (int)(array_descriptor - field->type),
+ field->type, field->name,
+ field->offset, field->size, !!field->is_signed);
return 0;
}
@@ -2379,9 +2399,10 @@ event_define_fields(struct trace_event_call *call)
}
offset = ALIGN(offset, field->align);
- ret = trace_define_field(call, field->type, field->name,
+ ret = trace_define_field_ext(call, field->type, field->name,
offset, field->size,
- field->is_signed, field->filter_type);
+ field->is_signed, field->filter_type,
+ field->len);
if (WARN_ON_ONCE(ret)) {
pr_err("error code is %d\n", ret);
break;
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index d960f6b11b5e..58f3946081e2 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -111,7 +111,8 @@ static void __always_unused ____ftrace_check_##name(void) \
#define __array(_type, _item, _len) { \
.type = #_type"["__stringify(_len)"]", .name = #_item, \
.size = sizeof(_type[_len]), .align = __alignof__(_type), \
- is_signed_type(_type), .filter_type = FILTER_OTHER },
+ is_signed_type(_type), .filter_type = FILTER_OTHER, \
+ .len = _len },
#undef __array_desc
#define __array_desc(_type, _container, _item, _len) __array(_type, _item, _len)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 61a9425a311f..02ee440f7be3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -754,6 +754,7 @@ config DEBUG_KMEMLEAK
select KALLSYMS
select CRC32
select STACKDEPOT
+ select STACKDEPOT_ALWAYS_INIT if !DEBUG_KMEMLEAK_DEFAULT_OFF
help
Say Y here if you want to enable the memory leak
detector. The memory allocation/freeing is traced in a way
@@ -1207,7 +1208,7 @@ config SCHED_DEBUG
depends on DEBUG_KERNEL && PROC_FS
default y
help
- If you say Y here, the /proc/sched_debug file will be provided
+ If you say Y here, the /sys/kernel/debug/sched file will be provided
that can help debug the scheduler. The runtime overhead of this
option is minimal.
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 9555b68bb774..1dcca8f2e194 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
+
+int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock(lock);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
+
+int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index f5b50babe38d..05a09652f5a1 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -241,24 +241,34 @@ void kunit_mem_assert_format(const struct kunit_assert *assert,
mem_assert = container_of(assert, struct kunit_mem_assert,
assert);
- string_stream_add(stream,
- KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
- mem_assert->text->left_text,
- mem_assert->text->operation,
- mem_assert->text->right_text);
+ if (!mem_assert->left_value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ mem_assert->text->left_text);
+ } else if (!mem_assert->right_value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ mem_assert->text->right_text);
+ } else {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ mem_assert->text->left_text,
+ mem_assert->text->operation,
+ mem_assert->text->right_text);
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
- mem_assert->text->left_text);
- kunit_assert_hexdump(stream, mem_assert->left_value,
- mem_assert->right_value, mem_assert->size);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->left_text);
+ kunit_assert_hexdump(stream, mem_assert->left_value,
+ mem_assert->right_value, mem_assert->size);
- string_stream_add(stream, "\n");
+ string_stream_add(stream, "\n");
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
- mem_assert->text->right_text);
- kunit_assert_hexdump(stream, mem_assert->right_value,
- mem_assert->left_value, mem_assert->size);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->right_text);
+ kunit_assert_hexdump(stream, mem_assert->right_value,
+ mem_assert->left_value, mem_assert->size);
- kunit_assert_print_msg(message, stream);
+ kunit_assert_print_msg(message, stream);
+ }
}
EXPORT_SYMBOL_GPL(kunit_mem_assert_format);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index c9ebf975e56b..890ba5b3a981 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -21,6 +21,7 @@
#include "try-catch-impl.h"
DEFINE_STATIC_KEY_FALSE(kunit_running);
+EXPORT_SYMBOL_GPL(kunit_running);
#if IS_BUILTIN(CONFIG_KUNIT)
/*
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 26e2045d3cda..5a976393c9ae 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -670,12 +670,13 @@ static inline unsigned long mte_pivot(const struct maple_enode *mn,
unsigned char piv)
{
struct maple_node *node = mte_to_node(mn);
+ enum maple_type type = mte_node_type(mn);
- if (piv >= mt_pivots[piv]) {
+ if (piv >= mt_pivots[type]) {
WARN_ON(1);
return 0;
}
- switch (mte_node_type(mn)) {
+ switch (type) {
case maple_arange_64:
return node->ma64.pivot[piv];
case maple_range_64:
@@ -4887,7 +4888,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
unsigned long *pivots, *gaps;
void __rcu **slots;
unsigned long gap = 0;
- unsigned long max, min, index;
+ unsigned long max, min;
unsigned char offset;
if (unlikely(mas_is_err(mas)))
@@ -4909,8 +4910,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
min = mas_safe_min(mas, pivots, --offset);
max = mas_safe_pivot(mas, pivots, offset, type);
- index = mas->index;
- while (index <= max) {
+ while (mas->index <= max) {
gap = 0;
if (gaps)
gap = gaps[offset];
@@ -4941,10 +4941,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
min = mas_safe_min(mas, pivots, offset);
}
- if (unlikely(index > max)) {
- mas_set_err(mas, -EBUSY);
- return false;
- }
+ if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
+ goto no_space;
if (unlikely(ma_is_leaf(type))) {
mas->offset = offset;
@@ -4961,9 +4959,11 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
return false;
ascend:
- if (mte_is_root(mas->node))
- mas_set_err(mas, -EBUSY);
+ if (!mte_is_root(mas->node))
+ return false;
+no_space:
+ mas_set_err(mas, -EBUSY);
return false;
}
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 497fc93ccf9e..ec847bf4dcb4 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -2517,6 +2517,91 @@ static noinline void check_bnode_min_spanning(struct maple_tree *mt)
mt_set_non_kernel(0);
}
+static noinline void check_empty_area_window(struct maple_tree *mt)
+{
+ unsigned long i, nr_entries = 20;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 1; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 9,
+ xa_mk_value(i), GFP_KERNEL);
+
+ /* Create another hole besides the one at 0 */
+ mtree_store_range(mt, 160, 169, NULL, GFP_KERNEL);
+
+ /* Check lower bounds that don't fit */
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 10) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 6, 90, 5) != -EBUSY);
+
+ /* Check lower bound that does fit */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 5) != 0);
+ MT_BUG_ON(mt, mas.index != 5);
+ MT_BUG_ON(mt, mas.last != 9);
+ rcu_read_unlock();
+
+ /* Check one gap that doesn't fit and one that does */
+ rcu_read_lock();
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 217, 9) != 0);
+ MT_BUG_ON(mt, mas.index != 161);
+ MT_BUG_ON(mt, mas.last != 169);
+
+ /* Check one gap that does fit above the min */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 3) != 0);
+ MT_BUG_ON(mt, mas.index != 216);
+ MT_BUG_ON(mt, mas.last != 218);
+
+ /* Check size that doesn't fit any gap */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 16) != -EBUSY);
+
+ /*
+ * Check size that doesn't fit the lower end of the window but
+ * does fit the gap
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 167, 200, 4) != -EBUSY);
+
+ /*
+ * Check size that doesn't fit the upper end of the window but
+ * does fit the gap
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 162, 4) != -EBUSY);
+
+ /* Check mas_empty_area forward */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 9) != 0);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 8);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 4) != 0);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 3);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 11) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EBUSY);
+
+ mas_reset(&mas);
+ mas_empty_area(&mas, 100, 165, 3);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 100, 163, 6) != -EBUSY);
+ rcu_read_unlock();
+}
+
static DEFINE_MTREE(tree);
static int maple_tree_seed(void)
{
@@ -2765,6 +2850,10 @@ static int maple_tree_seed(void)
check_bnode_min_spanning(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_empty_area_window(&tree);
+ mtree_destroy(&tree);
+
#if defined(BENCH)
skip:
#endif
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7fcdb98c9e68..bdbfeb6fb393 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5051,6 +5051,9 @@ again:
entry = huge_pte_clear_uffd_wp(entry);
set_huge_pte_at(dst, addr, dst_pte, entry);
} else if (unlikely(is_pte_marker(entry))) {
+ /* No swap on hugetlb */
+ WARN_ON_ONCE(
+ is_swapin_error_entry(pte_to_swp_entry(entry)));
/*
* We copy the pte marker only if the dst vma has
* uffd-wp enabled.
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 79be13133322..90acfea40c13 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -847,6 +847,10 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
return SCAN_SUCCEED;
}
+/*
+ * See pmd_trans_unstable() for how the result may change out from
+ * underneath us, even if we hold mmap_lock in read.
+ */
static int find_pmd_or_thp_or_none(struct mm_struct *mm,
unsigned long address,
pmd_t **pmd)
@@ -865,8 +869,12 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
#endif
if (pmd_none(pmde))
return SCAN_PMD_NONE;
+ if (!pmd_present(pmde))
+ return SCAN_PMD_NULL;
if (pmd_trans_huge(pmde))
return SCAN_PMD_MAPPED;
+ if (pmd_devmap(pmde))
+ return SCAN_PMD_NULL;
if (pmd_bad(pmde))
return SCAN_PMD_NULL;
return SCAN_SUCCEED;
@@ -1642,7 +1650,7 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
* has higher cost too. It would also probably require locking
* the anon_vma.
*/
- if (vma->anon_vma) {
+ if (READ_ONCE(vma->anon_vma)) {
result = SCAN_PAGE_ANON;
goto next;
}
@@ -1671,6 +1679,18 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
if ((cc->is_khugepaged || is_target) &&
mmap_write_trylock(mm)) {
/*
+ * Re-check whether we have an ->anon_vma, because
+ * collapse_and_free_pmd() requires that either no
+ * ->anon_vma exists or the anon_vma is locked.
+ * We already checked ->anon_vma above, but that check
+ * is racy because ->anon_vma can be populated under the
+ * mmap lock in read mode.
+ */
+ if (vma->anon_vma) {
+ result = SCAN_PAGE_ANON;
+ goto unlock_next;
+ }
+ /*
* When a vma is registered with uffd-wp, we can't
* recycle the pmd pgtable because there can be pte
* markers installed. Skip it only, so the rest mm/vma
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 92f670edbf51..55dc8b8b0616 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -2070,8 +2070,10 @@ static int __init kmemleak_boot_config(char *str)
return -EINVAL;
if (strcmp(str, "off") == 0)
kmemleak_disable();
- else if (strcmp(str, "on") == 0)
+ else if (strcmp(str, "on") == 0) {
kmemleak_skip_disable = 1;
+ stack_depot_want_early_init();
+ }
else
return -EINVAL;
return 0;
@@ -2093,7 +2095,6 @@ void __init kmemleak_init(void)
if (kmemleak_error)
return;
- stack_depot_init();
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
diff --git a/mm/memblock.c b/mm/memblock.c
index 685e30e6d27c..d036c7861310 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1640,13 +1640,7 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
end = PFN_DOWN(base + size);
for (; cursor < end; cursor++) {
- /*
- * Reserved pages are always initialized by the end of
- * memblock_free_all() (by memmap_init() and, if deferred
- * initialization is enabled, memmap_init_reserved_pages()), so
- * these pages can be released directly to the buddy allocator.
- */
- __free_pages_core(pfn_to_page(cursor), 0);
+ memblock_free_pages(pfn_to_page(cursor), cursor, 0);
totalram_pages_inc();
}
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ab457f0394ab..73afff8062f9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -63,7 +63,6 @@
#include <linux/resume_user_mode.h>
#include <linux/psi.h>
#include <linux/seq_buf.h>
-#include <linux/parser.h>
#include "internal.h"
#include <net/sock.h>
#include <net/ip.h>
@@ -2393,8 +2392,7 @@ static unsigned long reclaim_high(struct mem_cgroup *memcg,
psi_memstall_enter(&pflags);
nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
gfp_mask,
- MEMCG_RECLAIM_MAY_SWAP,
- NULL);
+ MEMCG_RECLAIM_MAY_SWAP);
psi_memstall_leave(&pflags);
} while ((memcg = parent_mem_cgroup(memcg)) &&
!mem_cgroup_is_root(memcg));
@@ -2685,8 +2683,7 @@ retry:
psi_memstall_enter(&pflags);
nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
- gfp_mask, reclaim_options,
- NULL);
+ gfp_mask, reclaim_options);
psi_memstall_leave(&pflags);
if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
@@ -3506,8 +3503,7 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
}
if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
- memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP,
- NULL)) {
+ memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
ret = -EBUSY;
break;
}
@@ -3618,8 +3614,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
return -EINTR;
if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
- MEMCG_RECLAIM_MAY_SWAP,
- NULL))
+ MEMCG_RECLAIM_MAY_SWAP))
nr_retries--;
}
@@ -6429,8 +6424,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
}
reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
- GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP,
- NULL);
+ GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
if (!reclaimed && !nr_retries--)
break;
@@ -6479,8 +6473,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
if (nr_reclaims) {
if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
- GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP,
- NULL))
+ GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
nr_reclaims--;
continue;
}
@@ -6603,54 +6596,21 @@ static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
return nbytes;
}
-enum {
- MEMORY_RECLAIM_NODES = 0,
- MEMORY_RECLAIM_NULL,
-};
-
-static const match_table_t if_tokens = {
- { MEMORY_RECLAIM_NODES, "nodes=%s" },
- { MEMORY_RECLAIM_NULL, NULL },
-};
-
static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
unsigned int nr_retries = MAX_RECLAIM_RETRIES;
unsigned long nr_to_reclaim, nr_reclaimed = 0;
- unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP |
- MEMCG_RECLAIM_PROACTIVE;
- char *old_buf, *start;
- substring_t args[MAX_OPT_ARGS];
- int token;
- char value[256];
- nodemask_t nodemask = NODE_MASK_ALL;
-
- buf = strstrip(buf);
-
- old_buf = buf;
- nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
- if (buf == old_buf)
- return -EINVAL;
+ unsigned int reclaim_options;
+ int err;
buf = strstrip(buf);
+ err = page_counter_memparse(buf, "", &nr_to_reclaim);
+ if (err)
+ return err;
- while ((start = strsep(&buf, " ")) != NULL) {
- if (!strlen(start))
- continue;
- token = match_token(start, if_tokens, args);
- match_strlcpy(value, args, sizeof(value));
- switch (token) {
- case MEMORY_RECLAIM_NODES:
- if (nodelist_parse(value, nodemask) < 0)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
- }
-
+ reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
while (nr_reclaimed < nr_to_reclaim) {
unsigned long reclaimed;
@@ -6667,8 +6627,7 @@ static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
reclaimed = try_to_free_mem_cgroup_pages(memcg,
nr_to_reclaim - nr_reclaimed,
- GFP_KERNEL, reclaim_options,
- &nodemask);
+ GFP_KERNEL, reclaim_options);
if (!reclaimed && !nr_retries--)
return -EAGAIN;
diff --git a/mm/memory.c b/mm/memory.c
index aad226daf41b..3e836fecd035 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -828,12 +828,8 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
return -EBUSY;
return -ENOENT;
} else if (is_pte_marker_entry(entry)) {
- /*
- * We're copying the pgtable should only because dst_vma has
- * uffd-wp enabled, do sanity check.
- */
- WARN_ON_ONCE(!userfaultfd_wp(dst_vma));
- set_pte_at(dst_mm, addr, dst_pte, pte);
+ if (is_swapin_error_entry(entry) || userfaultfd_wp(dst_vma))
+ set_pte_at(dst_mm, addr, dst_pte, pte);
return 0;
}
if (!userfaultfd_wp(dst_vma))
@@ -3629,8 +3625,12 @@ static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
/*
* Be careful so that we will only recover a special uffd-wp pte into a
* none pte. Otherwise it means the pte could have changed, so retry.
+ *
+ * This should also cover the case where e.g. the pte changed
+ * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_SWAPIN_ERROR.
+ * So is_pte_marker() check is not enough to safely drop the pte.
*/
- if (is_pte_marker(*vmf->pte))
+ if (pte_same(vmf->orig_pte, *vmf->pte))
pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 02c8a712282f..f940395667c8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -600,7 +600,8 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
if (flags & (MPOL_MF_MOVE_ALL) ||
- (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
+ (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
+ !hugetlb_pmd_shared(pte))) {
if (isolate_hugetlb(page, qp->pagelist) &&
(flags & MPOL_MF_STRICT))
/*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 908df12caa26..61cf60015a8b 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -245,7 +245,13 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
newpte = pte_swp_mksoft_dirty(newpte);
if (pte_swp_uffd_wp(oldpte))
newpte = pte_swp_mkuffd_wp(newpte);
- } else if (pte_marker_entry_uffd_wp(entry)) {
+ } else if (is_pte_marker_entry(entry)) {
+ /*
+ * Ignore swapin errors unconditionally,
+ * because any access should sigbus anyway.
+ */
+ if (is_swapin_error_entry(entry))
+ continue;
/*
* If this is uffd-wp pte marker and we'd like
* to unprotect it, drop it; the next page
diff --git a/mm/mremap.c b/mm/mremap.c
index fe587c5d6591..930f65c315c0 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1027,16 +1027,29 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
}
/*
- * Function vma_merge() is called on the extension we are adding to
- * the already existing vma, vma_merge() will merge this extension with
- * the already existing vma (expand operation itself) and possibly also
- * with the next vma if it becomes adjacent to the expanded vma and
- * otherwise compatible.
+ * Function vma_merge() is called on the extension we
+ * are adding to the already existing vma, vma_merge()
+ * will merge this extension with the already existing
+ * vma (expand operation itself) and possibly also with
+ * the next vma if it becomes adjacent to the expanded
+ * vma and otherwise compatible.
+ *
+ * However, vma_merge() can currently fail due to
+ * is_mergeable_vma() check for vm_ops->close (see the
+ * comment there). Yet this should not prevent vma
+ * expanding, so perform a simple expand for such vma.
+ * Ideally the check for close op should be only done
+ * when a vma would be actually removed due to a merge.
*/
- vma = vma_merge(mm, vma, extension_start, extension_end,
+ if (!vma->vm_ops || !vma->vm_ops->close) {
+ vma = vma_merge(mm, vma, extension_start, extension_end,
vma->vm_flags, vma->anon_vma, vma->vm_file,
extension_pgoff, vma_policy(vma),
vma->vm_userfaultfd_ctx, anon_vma_name(vma));
+ } else if (vma_adjust(vma, vma->vm_start, addr + new_len,
+ vma->vm_pgoff, NULL)) {
+ vma = NULL;
+ }
if (!vma) {
vm_unacct_memory(pages);
ret = -ENOMEM;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0745aedebb37..3bb3484563ed 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5631,9 +5631,12 @@ EXPORT_SYMBOL(get_zeroed_page);
*/
void __free_pages(struct page *page, unsigned int order)
{
+ /* get PageHead before we drop reference */
+ int head = PageHead(page);
+
if (put_page_testzero(page))
free_the_page(page, order);
- else if (!PageHead(page))
+ else if (!head)
while (order-- > 0)
free_the_page(page + (1 << order), order);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 908a529bca12..4fa440e87cd6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1100,6 +1100,7 @@ start_over:
goto check_out;
pr_debug("scan_swap_map of si %d failed to find offset\n",
si->type);
+ cond_resched();
spin_lock(&swap_avail_lock);
nextsi:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd6637fcd8f9..bf3eedf0209c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3323,13 +3323,16 @@ void lru_gen_migrate_mm(struct mm_struct *mm)
if (mem_cgroup_disabled())
return;
+ /* migration can happen before addition */
+ if (!mm->lru_gen.memcg)
+ return;
+
rcu_read_lock();
memcg = mem_cgroup_from_task(task);
rcu_read_unlock();
if (memcg == mm->lru_gen.memcg)
return;
- VM_WARN_ON_ONCE(!mm->lru_gen.memcg);
VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
lru_gen_del_mm(mm);
@@ -6754,8 +6757,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
- unsigned int reclaim_options,
- nodemask_t *nodemask)
+ unsigned int reclaim_options)
{
unsigned long nr_reclaimed;
unsigned int noreclaim_flag;
@@ -6770,7 +6772,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_unmap = 1,
.may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
.proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
- .nodemask = nodemask,
};
/*
* Traverse the ZONELIST_FALLBACK zonelist of the current node to put
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 9445bee6b014..702bc3fd687a 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -113,7 +113,23 @@
* have room for two bit at least.
*/
#define OBJ_ALLOCATED_TAG 1
-#define OBJ_TAG_BITS 1
+
+#ifdef CONFIG_ZPOOL
+/*
+ * The second least-significant bit in the object's header identifies if the
+ * value stored at the header is a deferred handle from the last reclaim
+ * attempt.
+ *
+ * As noted above, this is valid because we have room for two bits.
+ */
+#define OBJ_DEFERRED_HANDLE_TAG 2
+#define OBJ_TAG_BITS 2
+#define OBJ_TAG_MASK (OBJ_ALLOCATED_TAG | OBJ_DEFERRED_HANDLE_TAG)
+#else
+#define OBJ_TAG_BITS 1
+#define OBJ_TAG_MASK OBJ_ALLOCATED_TAG
+#endif /* CONFIG_ZPOOL */
+
#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
@@ -222,6 +238,12 @@ struct link_free {
* Handle of allocated object.
*/
unsigned long handle;
+#ifdef CONFIG_ZPOOL
+ /*
+ * Deferred handle of a reclaimed object.
+ */
+ unsigned long deferred_handle;
+#endif
};
};
@@ -272,8 +294,6 @@ struct zspage {
/* links the zspage to the lru list in the pool */
struct list_head lru;
bool under_reclaim;
- /* list of unfreed handles whose objects have been reclaimed */
- unsigned long *deferred_handles;
#endif
struct zs_pool *pool;
@@ -897,7 +917,8 @@ static unsigned long handle_to_obj(unsigned long handle)
return *(unsigned long *)handle;
}
-static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
+static bool obj_tagged(struct page *page, void *obj, unsigned long *phandle,
+ int tag)
{
unsigned long handle;
struct zspage *zspage = get_zspage(page);
@@ -908,13 +929,27 @@ static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
} else
handle = *(unsigned long *)obj;
- if (!(handle & OBJ_ALLOCATED_TAG))
+ if (!(handle & tag))
return false;
- *phandle = handle & ~OBJ_ALLOCATED_TAG;
+ /* Clear all tags before returning the handle */
+ *phandle = handle & ~OBJ_TAG_MASK;
return true;
}
+static inline bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
+{
+ return obj_tagged(page, obj, phandle, OBJ_ALLOCATED_TAG);
+}
+
+#ifdef CONFIG_ZPOOL
+static bool obj_stores_deferred_handle(struct page *page, void *obj,
+ unsigned long *phandle)
+{
+ return obj_tagged(page, obj, phandle, OBJ_DEFERRED_HANDLE_TAG);
+}
+#endif
+
static void reset_page(struct page *page)
{
__ClearPageMovable(page);
@@ -946,22 +981,36 @@ unlock:
}
#ifdef CONFIG_ZPOOL
+static unsigned long find_deferred_handle_obj(struct size_class *class,
+ struct page *page, int *obj_idx);
+
/*
* Free all the deferred handles whose objects are freed in zs_free.
*/
-static void free_handles(struct zs_pool *pool, struct zspage *zspage)
+static void free_handles(struct zs_pool *pool, struct size_class *class,
+ struct zspage *zspage)
{
- unsigned long handle = (unsigned long)zspage->deferred_handles;
+ int obj_idx = 0;
+ struct page *page = get_first_page(zspage);
+ unsigned long handle;
- while (handle) {
- unsigned long nxt_handle = handle_to_obj(handle);
+ while (1) {
+ handle = find_deferred_handle_obj(class, page, &obj_idx);
+ if (!handle) {
+ page = get_next_page(page);
+ if (!page)
+ break;
+ obj_idx = 0;
+ continue;
+ }
cache_free_handle(pool, handle);
- handle = nxt_handle;
+ obj_idx++;
}
}
#else
-static inline void free_handles(struct zs_pool *pool, struct zspage *zspage) {}
+static inline void free_handles(struct zs_pool *pool, struct size_class *class,
+ struct zspage *zspage) {}
#endif
static void __free_zspage(struct zs_pool *pool, struct size_class *class,
@@ -979,7 +1028,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
VM_BUG_ON(fg != ZS_EMPTY);
/* Free all deferred handles from zs_free */
- free_handles(pool, zspage);
+ free_handles(pool, class, zspage);
next = page = get_first_page(zspage);
do {
@@ -1067,7 +1116,6 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
#ifdef CONFIG_ZPOOL
INIT_LIST_HEAD(&zspage->lru);
zspage->under_reclaim = false;
- zspage->deferred_handles = NULL;
#endif
set_freeobj(zspage, 0);
@@ -1568,7 +1616,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(zs_malloc);
-static void obj_free(int class_size, unsigned long obj)
+static void obj_free(int class_size, unsigned long obj, unsigned long *handle)
{
struct link_free *link;
struct zspage *zspage;
@@ -1582,15 +1630,29 @@ static void obj_free(int class_size, unsigned long obj)
zspage = get_zspage(f_page);
vaddr = kmap_atomic(f_page);
-
- /* Insert this object in containing zspage's freelist */
link = (struct link_free *)(vaddr + f_offset);
- if (likely(!ZsHugePage(zspage)))
- link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
- else
- f_page->index = 0;
+
+ if (handle) {
+#ifdef CONFIG_ZPOOL
+ /* Stores the (deferred) handle in the object's header */
+ *handle |= OBJ_DEFERRED_HANDLE_TAG;
+ *handle &= ~OBJ_ALLOCATED_TAG;
+
+ if (likely(!ZsHugePage(zspage)))
+ link->deferred_handle = *handle;
+ else
+ f_page->index = *handle;
+#endif
+ } else {
+ /* Insert this object in containing zspage's freelist */
+ if (likely(!ZsHugePage(zspage)))
+ link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
+ else
+ f_page->index = 0;
+ set_freeobj(zspage, f_objidx);
+ }
+
kunmap_atomic(vaddr);
- set_freeobj(zspage, f_objidx);
mod_zspage_inuse(zspage, -1);
}
@@ -1615,7 +1677,6 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
zspage = get_zspage(f_page);
class = zspage_class(pool, zspage);
- obj_free(class->size, obj);
class_stat_dec(class, OBJ_USED, 1);
#ifdef CONFIG_ZPOOL
@@ -1624,15 +1685,15 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
* Reclaim needs the handles during writeback. It'll free
* them along with the zspage when it's done with them.
*
- * Record current deferred handle at the memory location
- * whose address is given by handle.
+ * Record current deferred handle in the object's header.
*/
- record_obj(handle, (unsigned long)zspage->deferred_handles);
- zspage->deferred_handles = (unsigned long *)handle;
+ obj_free(class->size, obj, &handle);
spin_unlock(&pool->lock);
return;
}
#endif
+ obj_free(class->size, obj, NULL);
+
fullness = fix_fullness_group(class, zspage);
if (fullness == ZS_EMPTY)
free_zspage(pool, class, zspage);
@@ -1713,11 +1774,11 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
}
/*
- * Find alloced object in zspage from index object and
+ * Find object with a certain tag in zspage from index object and
* return handle.
*/
-static unsigned long find_alloced_obj(struct size_class *class,
- struct page *page, int *obj_idx)
+static unsigned long find_tagged_obj(struct size_class *class,
+ struct page *page, int *obj_idx, int tag)
{
unsigned int offset;
int index = *obj_idx;
@@ -1728,7 +1789,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
offset += class->size * index;
while (offset < PAGE_SIZE) {
- if (obj_allocated(page, addr + offset, &handle))
+ if (obj_tagged(page, addr + offset, &handle, tag))
break;
offset += class->size;
@@ -1742,6 +1803,28 @@ static unsigned long find_alloced_obj(struct size_class *class,
return handle;
}
+/*
+ * Find alloced object in zspage from index object and
+ * return handle.
+ */
+static unsigned long find_alloced_obj(struct size_class *class,
+ struct page *page, int *obj_idx)
+{
+ return find_tagged_obj(class, page, obj_idx, OBJ_ALLOCATED_TAG);
+}
+
+#ifdef CONFIG_ZPOOL
+/*
+ * Find object storing a deferred handle in header in zspage from index object
+ * and return handle.
+ */
+static unsigned long find_deferred_handle_obj(struct size_class *class,
+ struct page *page, int *obj_idx)
+{
+ return find_tagged_obj(class, page, obj_idx, OBJ_DEFERRED_HANDLE_TAG);
+}
+#endif
+
struct zs_compact_control {
/* Source spage for migration which could be a subpage of zspage */
struct page *s_page;
@@ -1784,7 +1867,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
zs_object_copy(class, free_obj, used_obj);
obj_idx++;
record_obj(handle, free_obj);
- obj_free(class->size, used_obj);
+ obj_free(class->size, used_obj, NULL);
}
/* Remember last position in this iteration */
@@ -2478,6 +2561,90 @@ void zs_destroy_pool(struct zs_pool *pool)
EXPORT_SYMBOL_GPL(zs_destroy_pool);
#ifdef CONFIG_ZPOOL
+static void restore_freelist(struct zs_pool *pool, struct size_class *class,
+ struct zspage *zspage)
+{
+ unsigned int obj_idx = 0;
+ unsigned long handle, off = 0; /* off is within-page offset */
+ struct page *page = get_first_page(zspage);
+ struct link_free *prev_free = NULL;
+ void *prev_page_vaddr = NULL;
+
+ /* in case no free object found */
+ set_freeobj(zspage, (unsigned int)(-1UL));
+
+ while (page) {
+ void *vaddr = kmap_atomic(page);
+ struct page *next_page;
+
+ while (off < PAGE_SIZE) {
+ void *obj_addr = vaddr + off;
+
+ /* skip allocated object */
+ if (obj_allocated(page, obj_addr, &handle)) {
+ obj_idx++;
+ off += class->size;
+ continue;
+ }
+
+ /* free deferred handle from reclaim attempt */
+ if (obj_stores_deferred_handle(page, obj_addr, &handle))
+ cache_free_handle(pool, handle);
+
+ if (prev_free)
+ prev_free->next = obj_idx << OBJ_TAG_BITS;
+ else /* first free object found */
+ set_freeobj(zspage, obj_idx);
+
+ prev_free = (struct link_free *)vaddr + off / sizeof(*prev_free);
+ /* if last free object in a previous page, need to unmap */
+ if (prev_page_vaddr) {
+ kunmap_atomic(prev_page_vaddr);
+ prev_page_vaddr = NULL;
+ }
+
+ obj_idx++;
+ off += class->size;
+ }
+
+ /*
+ * Handle the last (full or partial) object on this page.
+ */
+ next_page = get_next_page(page);
+ if (next_page) {
+ if (!prev_free || prev_page_vaddr) {
+ /*
+ * There is no free object in this page, so we can safely
+ * unmap it.
+ */
+ kunmap_atomic(vaddr);
+ } else {
+ /* update prev_page_vaddr since prev_free is on this page */
+ prev_page_vaddr = vaddr;
+ }
+ } else { /* this is the last page */
+ if (prev_free) {
+ /*
+ * Reset OBJ_TAG_BITS bit to last link to tell
+ * whether it's allocated object or not.
+ */
+ prev_free->next = -1UL << OBJ_TAG_BITS;
+ }
+
+ /* unmap previous page (if not done yet) */
+ if (prev_page_vaddr) {
+ kunmap_atomic(prev_page_vaddr);
+ prev_page_vaddr = NULL;
+ }
+
+ kunmap_atomic(vaddr);
+ }
+
+ page = next_page;
+ off %= PAGE_SIZE;
+ }
+}
+
static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries)
{
int i, obj_idx, ret = 0;
@@ -2561,6 +2728,12 @@ next:
return 0;
}
+ /*
+ * Eviction fails on one of the handles, so we need to restore zspage.
+ * We need to rebuild its freelist (and free stored deferred handles),
+ * put it back to the correct size class, and add it to the LRU list.
+ */
+ restore_freelist(pool, class, zspage);
putback_zspage(class, zspage);
list_add(&zspage->lru, &pool->lru);
unlock_zspage(zspage);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index f20f4373ff40..9554abcfd5b4 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -871,6 +871,7 @@ static unsigned int ip_sabotage_in(void *priv,
if (nf_bridge && !nf_bridge->in_prerouting &&
!netif_is_l3_master(skb->dev) &&
!netif_is_l3_slave(skb->dev)) {
+ nf_bridge_info_free(skb);
state->okfn(state->net, state->sk, skb);
return NF_STOLEN;
}
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 608f8c24ae46..fc81d77724a1 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -140,7 +140,7 @@ struct isotp_sock {
canid_t rxid;
ktime_t tx_gap;
ktime_t lastrxcf_tstamp;
- struct hrtimer rxtimer, txtimer;
+ struct hrtimer rxtimer, txtimer, txfrtimer;
struct can_isotp_options opt;
struct can_isotp_fc_options rxfc, txfc;
struct can_isotp_ll_options ll;
@@ -871,7 +871,7 @@ static void isotp_rcv_echo(struct sk_buff *skb, void *data)
}
/* start timer to send next consecutive frame with correct delay */
- hrtimer_start(&so->txtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
+ hrtimer_start(&so->txfrtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
}
static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
@@ -879,49 +879,39 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
txtimer);
struct sock *sk = &so->sk;
- enum hrtimer_restart restart = HRTIMER_NORESTART;
- switch (so->tx.state) {
- case ISOTP_SENDING:
-
- /* cfecho should be consumed by isotp_rcv_echo() here */
- if (!so->cfecho) {
- /* start timeout for unlikely lost echo skb */
- hrtimer_set_expires(&so->txtimer,
- ktime_add(ktime_get(),
- ktime_set(ISOTP_ECHO_TIMEOUT, 0)));
- restart = HRTIMER_RESTART;
+ /* don't handle timeouts in IDLE state */
+ if (so->tx.state == ISOTP_IDLE)
+ return HRTIMER_NORESTART;
- /* push out the next consecutive frame */
- isotp_send_cframe(so);
- break;
- }
+ /* we did not get any flow control or echo frame in time */
- /* cfecho has not been cleared in isotp_rcv_echo() */
- pr_notice_once("can-isotp: cfecho %08X timeout\n", so->cfecho);
- fallthrough;
+ /* report 'communication error on send' */
+ sk->sk_err = ECOMM;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk_error_report(sk);
- case ISOTP_WAIT_FC:
- case ISOTP_WAIT_FIRST_FC:
+ /* reset tx state */
+ so->tx.state = ISOTP_IDLE;
+ wake_up_interruptible(&so->wait);
- /* we did not get any flow control frame in time */
+ return HRTIMER_NORESTART;
+}
- /* report 'communication error on send' */
- sk->sk_err = ECOMM;
- if (!sock_flag(sk, SOCK_DEAD))
- sk_error_report(sk);
+static enum hrtimer_restart isotp_txfr_timer_handler(struct hrtimer *hrtimer)
+{
+ struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
+ txfrtimer);
- /* reset tx state */
- so->tx.state = ISOTP_IDLE;
- wake_up_interruptible(&so->wait);
- break;
+ /* start echo timeout handling and cover below protocol error */
+ hrtimer_start(&so->txtimer, ktime_set(ISOTP_ECHO_TIMEOUT, 0),
+ HRTIMER_MODE_REL_SOFT);
- default:
- WARN_ONCE(1, "can-isotp: tx timer state %08X cfecho %08X\n",
- so->tx.state, so->cfecho);
- }
+ /* cfecho should be consumed by isotp_rcv_echo() here */
+ if (so->tx.state == ISOTP_SENDING && !so->cfecho)
+ isotp_send_cframe(so);
- return restart;
+ return HRTIMER_NORESTART;
}
static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
@@ -1162,6 +1152,10 @@ static int isotp_release(struct socket *sock)
/* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ /* force state machines to be idle also when a signal occurred */
+ so->tx.state = ISOTP_IDLE;
+ so->rx.state = ISOTP_IDLE;
+
spin_lock(&isotp_notifier_lock);
while (isotp_busy_notifier == so) {
spin_unlock(&isotp_notifier_lock);
@@ -1194,6 +1188,7 @@ static int isotp_release(struct socket *sock)
}
}
+ hrtimer_cancel(&so->txfrtimer);
hrtimer_cancel(&so->txtimer);
hrtimer_cancel(&so->rxtimer);
@@ -1597,6 +1592,8 @@ static int isotp_init(struct sock *sk)
so->rxtimer.function = isotp_rx_timer_handler;
hrtimer_init(&so->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
so->txtimer.function = isotp_tx_timer_handler;
+ hrtimer_init(&so->txfrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ so->txfrtimer.function = isotp_txfr_timer_handler;
init_waitqueue_head(&so->wait);
spin_lock_init(&so->rx_lock);
diff --git a/net/can/j1939/address-claim.c b/net/can/j1939/address-claim.c
index f33c47327927..ca4ad6cdd5cb 100644
--- a/net/can/j1939/address-claim.c
+++ b/net/can/j1939/address-claim.c
@@ -165,6 +165,46 @@ static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
* leaving this function.
*/
ecu = j1939_ecu_get_by_name_locked(priv, name);
+
+ if (ecu && ecu->addr == skcb->addr.sa) {
+ /* The ISO 11783-5 standard, in "4.5.2 - Address claim
+ * requirements", states:
+ * d) No CF shall begin, or resume, transmission on the
+ * network until 250 ms after it has successfully claimed
+ * an address except when responding to a request for
+ * address-claimed.
+ *
+ * But "Figure 6" and "Figure 7" in "4.5.4.2 - Address-claim
+ * prioritization" show that the CF begins the transmission
+ * after 250 ms from the first AC (address-claimed) message
+ * even if it sends another AC message during that time window
+ * to resolve the address contention with another CF.
+ *
+ * As stated in "4.4.2.3 - Address-claimed message":
+ * In order to successfully claim an address, the CF sending
+ * an address claimed message shall not receive a contending
+ * claim from another CF for at least 250 ms.
+ *
+ * As stated in "4.4.3.2 - NAME management (NM) message":
+ * 1) A commanding CF can
+ * d) request that a CF with a specified NAME transmit
+ * the address-claimed message with its current NAME.
+ * 2) A target CF shall
+ * d) send an address-claimed message in response to a
+ * request for a matching NAME
+ *
+ * Taking the above arguments into account, the 250 ms wait is
+ * requested only during network initialization.
+ *
+ * Do not restart the timer on AC message if both the NAME and
+ * the address match and so if the address has already been
+ * claimed (timer has expired) or the AC message has been sent
+ * to resolve the contention with another CF (timer is still
+ * running).
+ */
+ goto out_ecu_put;
+ }
+
if (!ecu && j1939_address_is_unicast(skcb->addr.sa))
ecu = j1939_ecu_create_locked(priv, name);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 5c722b55fe23..fce9b9ebf13f 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1092,10 +1092,6 @@ static bool j1939_session_deactivate(struct j1939_session *session)
bool active;
j1939_session_list_lock(priv);
- /* This function should be called with a session ref-count of at
- * least 2.
- */
- WARN_ON_ONCE(kref_read(&session->kref) < 2);
active = j1939_session_deactivate_locked(session);
j1939_session_list_unlock(priv);
diff --git a/net/can/raw.c b/net/can/raw.c
index 81071cdb0301..ba86782ba8bb 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -132,8 +132,8 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
return;
/* make sure to not pass oversized frames to the socket */
- if ((can_is_canfd_skb(oskb) && !ro->fd_frames && !ro->xl_frames) ||
- (can_is_canxl_skb(oskb) && !ro->xl_frames))
+ if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
+ (!ro->xl_frames && can_is_canxl_skb(oskb)))
return;
/* eliminate multiple filter matches for the same skb */
@@ -670,6 +670,11 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
return -EFAULT;
+ /* Enabling CAN XL includes CAN FD */
+ if (ro->xl_frames && !ro->fd_frames) {
+ ro->fd_frames = ro->xl_frames;
+ return -EINVAL;
+ }
break;
case CAN_RAW_XL_FRAMES:
@@ -679,6 +684,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
return -EFAULT;
+ /* Enabling CAN XL includes CAN FD */
+ if (ro->xl_frames)
+ ro->fd_frames = ro->xl_frames;
break;
case CAN_RAW_JOIN_FILTERS:
@@ -786,6 +794,25 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
+static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
+{
+ /* Classical CAN -> no checks for flags and device capabilities */
+ if (can_is_can_skb(skb))
+ return false;
+
+ /* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
+ if (ro->fd_frames && can_is_canfd_skb(skb) &&
+ (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
+ return false;
+
+ /* CAN XL -> needs to be enabled and a CAN XL device */
+ if (ro->xl_frames && can_is_canxl_skb(skb) &&
+ can_is_canxl_dev_mtu(mtu))
+ return false;
+
+ return true;
+}
+
static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
@@ -833,20 +860,8 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
goto free_skb;
err = -EINVAL;
- if (ro->xl_frames && can_is_canxl_dev_mtu(dev->mtu)) {
- /* CAN XL, CAN FD and Classical CAN */
- if (!can_is_canxl_skb(skb) && !can_is_canfd_skb(skb) &&
- !can_is_can_skb(skb))
- goto free_skb;
- } else if (ro->fd_frames && dev->mtu == CANFD_MTU) {
- /* CAN FD and Classical CAN */
- if (!can_is_canfd_skb(skb) && !can_is_can_skb(skb))
- goto free_skb;
- } else {
- /* Classical CAN */
- if (!can_is_can_skb(skb))
- goto free_skb;
- }
+ if (raw_bad_txframe(ro, skb, dev->mtu))
+ goto free_skb;
sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 032d6d0a5ce6..909a10e4b0dd 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -9979,7 +9979,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
goto err_xa_alloc;
devlink->netdevice_nb.notifier_call = devlink_netdevice_event;
- ret = register_netdevice_notifier_net(net, &devlink->netdevice_nb);
+ ret = register_netdevice_notifier(&devlink->netdevice_nb);
if (ret)
goto err_register_netdevice_notifier;
@@ -10171,8 +10171,7 @@ void devlink_free(struct devlink *devlink)
xa_destroy(&devlink->snapshot_ids);
xa_destroy(&devlink->ports);
- WARN_ON_ONCE(unregister_netdevice_notifier_net(devlink_net(devlink),
- &devlink->netdevice_nb));
+ WARN_ON_ONCE(unregister_netdevice_notifier(&devlink->netdevice_nb));
xa_erase(&devlinks, devlink->index);
@@ -10503,6 +10502,8 @@ static int devlink_netdevice_event(struct notifier_block *nb,
break;
case NETDEV_REGISTER:
case NETDEV_CHANGENAME:
+ if (devlink_net(devlink) != dev_net(netdev))
+ return NOTIFY_OK;
/* Set the netdev on top of previously set type. Note this
* event happens also during net namespace change so here
* we take into account netdev pointer appearing in this
@@ -10512,6 +10513,8 @@ static int devlink_netdevice_event(struct notifier_block *nb,
netdev);
break;
case NETDEV_UNREGISTER:
+ if (devlink_net(devlink) != dev_net(netdev))
+ return NOTIFY_OK;
/* Clear netdev pointer, but not the type. This event happens
* also during net namespace change so we need to clear
* pointer to netdev that is going to another net namespace.
diff --git a/net/core/gro.c b/net/core/gro.c
index 506f83d715f8..4bac7ea6e025 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -162,6 +162,15 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
struct sk_buff *lp;
int segs;
+ /* Do not splice page pool based packets w/ non-page pool
+ * packets. This can result in reference count issues as page
+ * pool pages will not decrement the reference count and will
+ * instead be immediately returned to the pool or have frag
+ * count decremented.
+ */
+ if (p->pp_recycle != skb->pp_recycle)
+ return -ETOOMANYREFS;
+
/* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
gro_max_size = READ_ONCE(p->dev->gro_max_size);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f00a79fc301b..4edd2176e238 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -269,7 +269,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
(n->nud_state == NUD_NOARP) ||
(tbl->is_multicast &&
tbl->is_multicast(n->primary_key)) ||
- time_after(tref, n->updated))
+ !time_in_range(n->updated, tref, jiffies))
remove = true;
write_unlock(&n->lock);
@@ -289,7 +289,17 @@ static int neigh_forced_gc(struct neigh_table *tbl)
static void neigh_add_timer(struct neighbour *n, unsigned long when)
{
+ /* Use safe distance from the jiffies - LONG_MAX point while timer
+ * is running in DELAY/PROBE state but still show to user space
+ * large times in the past.
+ */
+ unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
+
neigh_hold(n);
+ if (!time_in_range(n->confirmed, mint, jiffies))
+ n->confirmed = mint;
+ if (time_before(n->used, n->confirmed))
+ n->used = n->confirmed;
if (unlikely(mod_timer(&n->timer, when))) {
printk("NEIGH: BUG, double timer add, state is %x\n",
n->nud_state);
@@ -1001,12 +1011,14 @@ static void neigh_periodic_work(struct work_struct *work)
goto next_elt;
}
- if (time_before(n->used, n->confirmed))
+ if (time_before(n->used, n->confirmed) &&
+ time_is_before_eq_jiffies(n->confirmed))
n->used = n->confirmed;
if (refcount_read(&n->refcnt) == 1 &&
(state == NUD_FAILED ||
- time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
+ !time_in_range_open(jiffies, n->used,
+ n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
*np = n->next;
neigh_mark_dead(n);
write_unlock(&n->lock);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4a0eb5593275..a31ff4d83ecc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4100,7 +4100,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
skb_shinfo(skb)->frag_list = NULL;
- do {
+ while (list_skb) {
nskb = list_skb;
list_skb = list_skb->next;
@@ -4146,8 +4146,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
if (skb_needs_linearize(nskb, features) &&
__skb_linearize(nskb))
goto err_linearize;
-
- } while (list_skb);
+ }
skb->truesize = skb->truesize - delta_truesize;
skb->data_len = skb->data_len - delta_len;
diff --git a/net/core/sock.c b/net/core/sock.c
index f954d5893e79..6f27c24016fe 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1531,6 +1531,8 @@ set_sndbuf:
ret = -EINVAL;
break;
}
+ if ((u8)val == SOCK_TXREHASH_DEFAULT)
+ val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
/* Paired with READ_ONCE() in tcp_rtx_synack() */
WRITE_ONCE(sk->sk_txrehash, (u8)val);
break;
@@ -3451,7 +3453,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_pacing_rate = ~0UL;
WRITE_ONCE(sk->sk_pacing_shift, 10);
sk->sk_incoming_cpu = -1;
- sk->sk_txrehash = SOCK_TXREHASH_DEFAULT;
sk_rx_queue_clear(sk);
/*
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 22fa2c5bc6ec..a68a7290a3b2 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -1569,15 +1569,16 @@ void sock_map_unhash(struct sock *sk)
psock = sk_psock(sk);
if (unlikely(!psock)) {
rcu_read_unlock();
- if (sk->sk_prot->unhash)
- sk->sk_prot->unhash(sk);
- return;
+ saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
+ } else {
+ saved_unhash = psock->saved_unhash;
+ sock_map_remove_links(sk, psock);
+ rcu_read_unlock();
}
-
- saved_unhash = psock->saved_unhash;
- sock_map_remove_links(sk, psock);
- rcu_read_unlock();
- saved_unhash(sk);
+ if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
+ return;
+ if (saved_unhash)
+ saved_unhash(sk);
}
EXPORT_SYMBOL_GPL(sock_map_unhash);
@@ -1590,17 +1591,18 @@ void sock_map_destroy(struct sock *sk)
psock = sk_psock_get(sk);
if (unlikely(!psock)) {
rcu_read_unlock();
- if (sk->sk_prot->destroy)
- sk->sk_prot->destroy(sk);
- return;
+ saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
+ } else {
+ saved_destroy = psock->saved_destroy;
+ sock_map_remove_links(sk, psock);
+ rcu_read_unlock();
+ sk_psock_stop(psock);
+ sk_psock_put(sk, psock);
}
-
- saved_destroy = psock->saved_destroy;
- sock_map_remove_links(sk, psock);
- rcu_read_unlock();
- sk_psock_stop(psock);
- sk_psock_put(sk, psock);
- saved_destroy(sk);
+ if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
+ return;
+ if (saved_destroy)
+ saved_destroy(sk);
}
EXPORT_SYMBOL_GPL(sock_map_destroy);
@@ -1615,16 +1617,21 @@ void sock_map_close(struct sock *sk, long timeout)
if (unlikely(!psock)) {
rcu_read_unlock();
release_sock(sk);
- return sk->sk_prot->close(sk, timeout);
+ saved_close = READ_ONCE(sk->sk_prot)->close;
+ } else {
+ saved_close = psock->saved_close;
+ sock_map_remove_links(sk, psock);
+ rcu_read_unlock();
+ sk_psock_stop(psock);
+ release_sock(sk);
+ cancel_work_sync(&psock->work);
+ sk_psock_put(sk, psock);
}
-
- saved_close = psock->saved_close;
- sock_map_remove_links(sk, psock);
- rcu_read_unlock();
- sk_psock_stop(psock);
- release_sock(sk);
- cancel_work_sync(&psock->work);
- sk_psock_put(sk, psock);
+ /* Make sure we do not recurse. This is a bug.
+ * Leak the socket instead of crashing on a stack overflow.
+ */
+ if (WARN_ON_ONCE(saved_close == sock_map_close))
+ return;
saved_close(sk, timeout);
}
EXPORT_SYMBOL_GPL(sock_map_close);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6c0ec2789943..cf11f10927e1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -347,6 +347,7 @@ lookup_protocol:
sk->sk_destruct = inet_sock_destruct;
sk->sk_protocol = protocol;
sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+ sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
inet->uc_ttl = -1;
inet->mc_loop = 1;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d1f837579398..f2c43f67187d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1225,9 +1225,6 @@ int inet_csk_listen_start(struct sock *sk)
sk->sk_ack_backlog = 0;
inet_csk_delack_init(sk);
- if (sk->sk_txrehash == SOCK_TXREHASH_DEFAULT)
- sk->sk_txrehash = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
-
/* There is race window here: we announce ourselves listening,
* but this transition is still not validated by get_port().
* It is OK, because this socket enters to hash table only
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 94aad3870c5f..cf26d65ca389 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -6,6 +6,7 @@
#include <linux/bpf.h>
#include <linux/init.h>
#include <linux/wait.h>
+#include <linux/util_macros.h>
#include <net/inet_common.h>
#include <net/tls.h>
@@ -639,10 +640,9 @@ EXPORT_SYMBOL_GPL(tcp_bpf_update_proto);
*/
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
{
- int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
struct proto *prot = newsk->sk_prot;
- if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE])
+ if (is_insidevar(prot, tcp_bpf_prots))
newsk->sk_prot = sk->sk_prot_creator;
}
#endif /* CONFIG_BPF_SYSCALL */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f7a84a4acffc..faa47f9ea73a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3127,17 +3127,17 @@ static void add_v4_addrs(struct inet6_dev *idev)
offset = sizeof(struct in6_addr) - 4;
memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
- if (idev->dev->flags&IFF_POINTOPOINT) {
+ if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
+ scope = IPV6_ADDR_COMPATv4;
+ plen = 96;
+ pflags |= RTF_NONEXTHOP;
+ } else {
if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
return;
addr.s6_addr32[0] = htonl(0xfe800000);
scope = IFA_LINK;
plen = 64;
- } else {
- scope = IPV6_ADDR_COMPATv4;
- plen = 96;
- pflags |= RTF_NONEXTHOP;
}
if (addr.s6_addr32[3]) {
@@ -3447,6 +3447,30 @@ static void addrconf_gre_config(struct net_device *dev)
}
#endif
+static void addrconf_init_auto_addrs(struct net_device *dev)
+{
+ switch (dev->type) {
+#if IS_ENABLED(CONFIG_IPV6_SIT)
+ case ARPHRD_SIT:
+ addrconf_sit_config(dev);
+ break;
+#endif
+#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
+ case ARPHRD_IP6GRE:
+ case ARPHRD_IPGRE:
+ addrconf_gre_config(dev);
+ break;
+#endif
+ case ARPHRD_LOOPBACK:
+ init_loopback(dev);
+ break;
+
+ default:
+ addrconf_dev_config(dev);
+ break;
+ }
+}
+
static int fixup_permanent_addr(struct net *net,
struct inet6_dev *idev,
struct inet6_ifaddr *ifp)
@@ -3615,26 +3639,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
run_pending = 1;
}
- switch (dev->type) {
-#if IS_ENABLED(CONFIG_IPV6_SIT)
- case ARPHRD_SIT:
- addrconf_sit_config(dev);
- break;
-#endif
-#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
- case ARPHRD_IP6GRE:
- case ARPHRD_IPGRE:
- addrconf_gre_config(dev);
- break;
-#endif
- case ARPHRD_LOOPBACK:
- init_loopback(dev);
- break;
-
- default:
- addrconf_dev_config(dev);
- break;
- }
+ addrconf_init_auto_addrs(dev);
if (!IS_ERR_OR_NULL(idev)) {
if (run_pending)
@@ -6397,7 +6402,7 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
if (idev->cnf.addr_gen_mode != new_val) {
idev->cnf.addr_gen_mode = new_val;
- addrconf_dev_config(idev->dev);
+ addrconf_init_auto_addrs(idev->dev);
}
} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
struct net_device *dev;
@@ -6408,7 +6413,7 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
if (idev &&
idev->cnf.addr_gen_mode != new_val) {
idev->cnf.addr_gen_mode = new_val;
- addrconf_dev_config(idev->dev);
+ addrconf_init_auto_addrs(idev->dev);
}
}
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index fee9163382c2..847934763868 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -222,6 +222,7 @@ lookup_protocol:
np->pmtudisc = IPV6_PMTUDISC_WANT;
np->repflow = net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_ESTABLISHED;
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
+ sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
/* Init the ipv4 part of the socket since we can have sockets
* using v6 API for ipv4.
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index c2aae2a6d6a6..97bb4401dd3e 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -213,7 +213,6 @@ __ieee802154_rx_handle_packet(struct ieee802154_local *local,
ret = ieee802154_parse_frame_start(skb, &hdr);
if (ret) {
pr_debug("got invalid frame\n");
- kfree_skb(skb);
return;
}
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index 45bbe3e54cc2..3150f3f0c872 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -587,6 +587,11 @@ static void mctp_sk_unhash(struct sock *sk)
del_timer_sync(&msk->key_expiry);
}
+static void mctp_sk_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
static struct proto mctp_proto = {
.name = "MCTP",
.owner = THIS_MODULE,
@@ -623,6 +628,7 @@ static int mctp_pf_create(struct net *net, struct socket *sock,
return -ENOMEM;
sock_init_data(sock, sk);
+ sk->sk_destruct = mctp_sk_destruct;
rc = 0;
if (sk->sk_prot->init)
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 2ea7eae43bdb..10fe9771a852 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -998,8 +998,8 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
{
int addrlen = sizeof(struct sockaddr_in);
struct sockaddr_storage addr;
- struct mptcp_sock *msk;
struct socket *ssock;
+ struct sock *newsk;
int backlog = 1024;
int err;
@@ -1008,11 +1008,13 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
if (err)
return err;
- msk = mptcp_sk(entry->lsk->sk);
- if (!msk)
+ newsk = entry->lsk->sk;
+ if (!newsk)
return -EINVAL;
- ssock = __mptcp_nmpc_socket(msk);
+ lock_sock(newsk);
+ ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
+ release_sock(newsk);
if (!ssock)
return -EINVAL;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 8cd6cc67c2c5..bc6c1f62a690 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2897,6 +2897,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
bool do_cancel_work = false;
+ int subflows_alive = 0;
sk->sk_shutdown = SHUTDOWN_MASK;
@@ -2922,6 +2923,8 @@ cleanup:
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow = lock_sock_fast_nested(ssk);
+ subflows_alive += ssk->sk_state != TCP_CLOSE;
+
/* since the close timeout takes precedence on the fail one,
* cancel the latter
*/
@@ -2937,6 +2940,12 @@ cleanup:
}
sock_orphan(sk);
+ /* all the subflows are closed, only timeout can change the msk
+ * state, let's not keep resources busy for no reasons
+ */
+ if (subflows_alive == 0)
+ inet_sk_state_store(sk, TCP_CLOSE);
+
sock_hold(sk);
pr_debug("msk=%p state=%d", sk, sk->sk_state);
if (msk->token)
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index d4b1e6ec1b36..7f2c3727ab23 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -760,14 +760,21 @@ static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname,
static int mptcp_setsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
+ struct sock *sk = (struct sock *)msk;
struct socket *sock;
+ int ret = -EINVAL;
/* Limit to first subflow, before the connection establishment */
+ lock_sock(sk);
sock = __mptcp_nmpc_socket(msk);
if (!sock)
- return -EINVAL;
+ goto unlock;
- return tcp_setsockopt(sock->sk, level, optname, optval, optlen);
+ ret = tcp_setsockopt(sock->sk, level, optname, optval, optlen);
+
+unlock:
+ release_sock(sk);
+ return ret;
}
static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index ec54413fb31f..32904c76c6a1 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1399,6 +1399,7 @@ void __mptcp_error_report(struct sock *sk)
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
int err = sock_error(ssk);
+ int ssk_state;
if (!err)
continue;
@@ -1409,7 +1410,14 @@ void __mptcp_error_report(struct sock *sk)
if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
continue;
- inet_sk_state_store(sk, inet_sk_state_load(ssk));
+ /* We need to propagate only transition to CLOSE state.
+ * Orphaned socket will see such state change via
+ * subflow_sched_work_if_closed() and that path will properly
+ * destroy the msk as needed.
+ */
+ ssk_state = inet_sk_state_load(ssk);
+ if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
+ inet_sk_state_store(sk, ssk_state);
sk->sk_err = -err;
/* This barrier is coupled with smp_rmb() in mptcp_poll() */
@@ -1679,7 +1687,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
if (err)
return err;
- lock_sock(sf->sk);
+ lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
/* the newly created socket has to be in the same cgroup as its parent */
mptcp_attach_cgroup(sk, sf->sk);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 945dd40e7077..011d414038ea 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -142,10 +142,11 @@ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
}
#endif
+/* do_basic_checks ensures sch->length > 0, do not use before */
#define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \
for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \
- ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))) && \
- (sch)->length; \
+ (offset) < (skb)->len && \
+ ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \
(offset) += (ntohs((sch)->length) + 3) & ~3, (count)++)
/* Some validity checks to make sure the chunks are fine */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 6f7f4392cffb..5a4cb796150f 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -400,6 +400,11 @@ static int nr_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
lock_sock(sk);
+ if (sock->state != SS_UNCONNECTED) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+
if (sk->sk_state != TCP_LISTEN) {
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a71795355aec..fcee6012293b 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1004,14 +1004,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key) {
error = -ENOMEM;
- goto err_kfree_key;
+ goto err_kfree_flow;
}
ovs_match_init(&match, key, false, &mask);
error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
- goto err_kfree_flow;
+ goto err_kfree_key;
ovs_flow_mask_key(&new_flow->key, key, true, &mask);
@@ -1019,14 +1019,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
key, log);
if (error)
- goto err_kfree_flow;
+ goto err_kfree_key;
/* Validate actions. */
error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
&new_flow->key, &acts, log);
if (error) {
OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
- goto err_kfree_flow;
+ goto err_kfree_key;
}
reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
@@ -1126,10 +1126,10 @@ err_unlock_ovs:
kfree_skb(reply);
err_kfree_acts:
ovs_nla_free_flow_actions(acts);
-err_kfree_flow:
- ovs_flow_free(new_flow, false);
err_kfree_key:
kfree(key);
+err_kfree_flow:
+ ovs_flow_free(new_flow, false);
error:
return error;
}
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index 1990d496fcfc..e595079c2caf 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -83,7 +83,10 @@ static struct qrtr_node *node_get(unsigned int node_id)
node->id = node_id;
- radix_tree_insert(&nodes, node_id, node);
+ if (radix_tree_insert(&nodes, node_id, node)) {
+ kfree(node);
+ return NULL;
+ }
return node;
}
diff --git a/net/rds/message.c b/net/rds/message.c
index b47e4f0a1639..c19c93561227 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -104,9 +104,9 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs,
spin_lock_irqsave(&q->lock, flags);
head = &q->zcookie_head;
if (!list_empty(head)) {
- info = list_entry(head, struct rds_msg_zcopy_info,
- rs_zcookie_next);
- if (info && rds_zcookie_add(info, cookie)) {
+ info = list_first_entry(head, struct rds_msg_zcopy_info,
+ rs_zcookie_next);
+ if (rds_zcookie_add(info, cookie)) {
spin_unlock_irqrestore(&q->lock, flags);
kfree(rds_info_from_znotifier(znotif));
/* caller invokes rds_wake_sk_sleep() */
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 36fefc3957d7..ca2b17f32670 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -488,6 +488,12 @@ static int rose_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ lock_sock(sk);
+ if (sock->state != SS_UNCONNECTED) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+
if (sk->sk_state != TCP_LISTEN) {
struct rose_sock *rose = rose_sk(sk);
@@ -497,8 +503,10 @@ static int rose_listen(struct socket *sock, int backlog)
memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
+ release_sock(sk);
return 0;
}
+ release_sock(sk);
return -EOPNOTSUPP;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f46643850df8..92f2975b6a82 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -431,7 +431,10 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
m = mask;
while (m) {
- int prio = ffz(~m);
+ unsigned int prio = ffz(~m);
+
+ if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
+ break;
m &= ~(1 << prio);
if (p->inner.clprio[prio].feed.rb_node)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index ca1eba95c293..2f66a2006517 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -196,9 +196,7 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
/* When a data chunk is sent, reset the heartbeat interval. */
expires = jiffies + sctp_transport_timeout(transport);
- if ((time_before(transport->hb_timer.expires, expires) ||
- !timer_pending(&transport->hb_timer)) &&
- !mod_timer(&transport->hb_timer,
+ if (!mod_timer(&transport->hb_timer,
expires + get_random_u32_below(transport->rto)))
sctp_transport_hold(transport);
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 9ed978634125..a83d2b4275fa 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2427,7 +2427,7 @@ static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
{
struct tls_rec *rec;
- rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
+ rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
if (!rec)
return false;
diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
index a0f62fa02e06..8cbf45a8bcdc 100644
--- a/net/xfrm/xfrm_compat.c
+++ b/net/xfrm/xfrm_compat.c
@@ -5,6 +5,7 @@
* Based on code and translator idea by: Florian Westphal <[email protected]>
*/
#include <linux/compat.h>
+#include <linux/nospec.h>
#include <linux/xfrm.h>
#include <net/xfrm.h>
@@ -302,7 +303,7 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
nla_for_each_attr(nla, attrs, len, remaining) {
int err;
- switch (type) {
+ switch (nlh_src->nlmsg_type) {
case XFRM_MSG_NEWSPDINFO:
err = xfrm_nla_cpy(dst, nla, nla_len(nla));
break;
@@ -437,6 +438,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
NL_SET_ERR_MSG(extack, "Bad attribute");
return -EOPNOTSUPP;
}
+ type = array_index_nospec(type, XFRMA_MAX + 1);
if (nla_len(nla) < compat_policy[type].len) {
NL_SET_ERR_MSG(extack, "Attribute bad length");
return -EOPNOTSUPP;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index c06e54a10540..436d29640ac2 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -279,8 +279,7 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
goto out;
if (x->props.flags & XFRM_STATE_DECAP_DSCP)
- ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
- ipipv6_hdr(skb));
+ ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb));
if (!(x->props.flags & XFRM_STATE_NOECN))
ipip6_ecn_decapsulate(skb);
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index 1f99dc469027..35279c220bd7 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -310,6 +310,52 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
skb->mark = 0;
}
+static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type, unsigned short family)
+{
+ struct sec_path *sp;
+
+ sp = skb_sec_path(skb);
+ if (sp && (sp->len || sp->olen) &&
+ !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+ goto discard;
+
+ XFRM_SPI_SKB_CB(skb)->family = family;
+ if (family == AF_INET) {
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+ } else {
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
+ }
+
+ return xfrm_input(skb, nexthdr, spi, encap_type);
+discard:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int xfrmi4_rcv(struct sk_buff *skb)
+{
+ return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
+}
+
+static int xfrmi6_rcv(struct sk_buff *skb)
+{
+ return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
+ 0, 0, AF_INET6);
+}
+
+static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+{
+ return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
+}
+
+static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+{
+ return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
+}
+
static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
{
const struct xfrm_mode *inner_mode;
@@ -945,8 +991,8 @@ static struct pernet_operations xfrmi_net_ops = {
};
static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
- .handler = xfrm6_rcv,
- .input_handler = xfrm_input,
+ .handler = xfrmi6_rcv,
+ .input_handler = xfrmi6_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi6_err,
.priority = 10,
@@ -996,8 +1042,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
#endif
static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
- .handler = xfrm4_rcv,
- .input_handler = xfrm_input,
+ .handler = xfrmi4_rcv,
+ .input_handler = xfrmi4_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi4_err,
.priority = 10,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index e9eb82c5457d..5c61ec04b839 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -336,7 +336,7 @@ static void xfrm_policy_timer(struct timer_list *t)
}
if (xp->lft.hard_use_expires_seconds) {
time64_t tmo = xp->lft.hard_use_expires_seconds +
- (xp->curlft.use_time ? : xp->curlft.add_time) - now;
+ (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
@@ -354,7 +354,7 @@ static void xfrm_policy_timer(struct timer_list *t)
}
if (xp->lft.soft_use_expires_seconds) {
time64_t tmo = xp->lft.soft_use_expires_seconds +
- (xp->curlft.use_time ? : xp->curlft.add_time) - now;
+ (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
if (tmo <= 0) {
warn = 1;
tmo = XFRM_KM_TIMEOUT;
@@ -3661,7 +3661,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
return 1;
}
- pol->curlft.use_time = ktime_get_real_seconds();
+ /* This lockless write can happen from different cpus. */
+ WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
pols[0] = pol;
npols++;
@@ -3676,7 +3677,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
xfrm_pol_put(pols[0]);
return 0;
}
- pols[1]->curlft.use_time = ktime_get_real_seconds();
+ /* This write can happen from different cpus. */
+ WRITE_ONCE(pols[1]->curlft.use_time,
+ ktime_get_real_seconds());
npols++;
}
}
@@ -3742,6 +3745,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
goto reject;
}
+ if (if_id)
+ secpath_reset(skb);
+
xfrm_pols_put(pols, npols);
return 1;
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 89c731f4f0c7..00afe831c71c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -577,7 +577,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
if (x->km.state == XFRM_STATE_EXPIRED)
goto expired;
if (x->lft.hard_add_expires_seconds) {
- long tmo = x->lft.hard_add_expires_seconds +
+ time64_t tmo = x->lft.hard_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0) {
if (x->xflags & XFRM_SOFT_EXPIRE) {
@@ -594,8 +594,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
next = tmo;
}
if (x->lft.hard_use_expires_seconds) {
- long tmo = x->lft.hard_use_expires_seconds +
- (x->curlft.use_time ? : now) - now;
+ time64_t tmo = x->lft.hard_use_expires_seconds +
+ (READ_ONCE(x->curlft.use_time) ? : now) - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
@@ -604,7 +604,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
if (x->km.dying)
goto resched;
if (x->lft.soft_add_expires_seconds) {
- long tmo = x->lft.soft_add_expires_seconds +
+ time64_t tmo = x->lft.soft_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0) {
warn = 1;
@@ -616,8 +616,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
}
}
if (x->lft.soft_use_expires_seconds) {
- long tmo = x->lft.soft_use_expires_seconds +
- (x->curlft.use_time ? : now) - now;
+ time64_t tmo = x->lft.soft_use_expires_seconds +
+ (READ_ONCE(x->curlft.use_time) ? : now) - now;
if (tmo <= 0)
warn = 1;
else if (tmo < next)
@@ -1906,7 +1906,7 @@ out:
hrtimer_start(&x1->mtimer, ktime_set(1, 0),
HRTIMER_MODE_REL_SOFT);
- if (x1->curlft.use_time)
+ if (READ_ONCE(x1->curlft.use_time))
xfrm_state_check_expire(x1);
if (x->props.smark.m || x->props.smark.v || x->if_id) {
@@ -1940,8 +1940,8 @@ int xfrm_state_check_expire(struct xfrm_state *x)
{
xfrm_dev_state_update_curlft(x);
- if (!x->curlft.use_time)
- x->curlft.use_time = ktime_get_real_seconds();
+ if (!READ_ONCE(x->curlft.use_time))
+ WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 836391e5d209..4815a8e32227 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -66,9 +66,13 @@ endif
# Don't stop modules_install even if we can't sign external modules.
#
ifeq ($(CONFIG_MODULE_SIG_ALL),y)
+ifeq ($(filter pkcs11:%, $(CONFIG_MODULE_SIG_KEY)),)
sig-key := $(if $(wildcard $(CONFIG_MODULE_SIG_KEY)),,$(srctree)/)$(CONFIG_MODULE_SIG_KEY)
+else
+sig-key := $(CONFIG_MODULE_SIG_KEY)
+endif
quiet_cmd_sign = SIGN $@
- cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(sig-key) certs/signing_key.x509 $@ \
+ cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) "$(sig-key)" certs/signing_key.x509 $@ \
$(if $(KBUILD_EXTMOD),|| true)
else
quiet_cmd_sign :=
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 81025f50a542..f901504b5afc 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -541,16 +541,15 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
struct sg_table *sgt;
void *p;
+#ifdef CONFIG_SND_DMA_SGBUF
+ if (cpu_feature_enabled(X86_FEATURE_XENPV))
+ return snd_dma_sg_fallback_alloc(dmab, size);
+#endif
sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
DEFAULT_GFP, 0);
#ifdef CONFIG_SND_DMA_SGBUF
- if (!sgt && !get_dma_ops(dmab->dev.dev)) {
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
- dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
- else
- dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
+ if (!sgt && !get_dma_ops(dmab->dev.dev))
return snd_dma_sg_fallback_alloc(dmab, size);
- }
#endif
if (!sgt)
return NULL;
@@ -717,19 +716,38 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
/* Fallback SG-buffer allocations for x86 */
struct snd_dma_sg_fallback {
+ bool use_dma_alloc_coherent;
size_t count;
struct page **pages;
+ /* DMA address array; the first page contains #pages in ~PAGE_MASK */
+ dma_addr_t *addrs;
};
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
struct snd_dma_sg_fallback *sgbuf)
{
- bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
- size_t i;
-
- for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
- do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
+ size_t i, size;
+
+ if (sgbuf->pages && sgbuf->addrs) {
+ i = 0;
+ while (i < sgbuf->count) {
+ if (!sgbuf->pages[i] || !sgbuf->addrs[i])
+ break;
+ size = sgbuf->addrs[i] & ~PAGE_MASK;
+ if (WARN_ON(!size))
+ break;
+ if (sgbuf->use_dma_alloc_coherent)
+ dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
+ page_address(sgbuf->pages[i]),
+ sgbuf->addrs[i] & PAGE_MASK);
+ else
+ do_free_pages(page_address(sgbuf->pages[i]),
+ size << PAGE_SHIFT, false);
+ i += size;
+ }
+ }
kvfree(sgbuf->pages);
+ kvfree(sgbuf->addrs);
kfree(sgbuf);
}
@@ -738,24 +756,36 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
struct snd_dma_sg_fallback *sgbuf;
struct page **pagep, *curp;
size_t chunk, npages;
+ dma_addr_t *addrp;
dma_addr_t addr;
void *p;
- bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+
+ /* correct the type */
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG)
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
+ else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (!sgbuf)
return NULL;
+ sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
size = PAGE_ALIGN(size);
sgbuf->count = size >> PAGE_SHIFT;
sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
- if (!sgbuf->pages)
+ sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
+ if (!sgbuf->pages || !sgbuf->addrs)
goto error;
pagep = sgbuf->pages;
- chunk = size;
+ addrp = sgbuf->addrs;
+ chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
while (size > 0) {
chunk = min(size, chunk);
- p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
+ if (sgbuf->use_dma_alloc_coherent)
+ p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
+ else
+ p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
if (!p) {
if (chunk <= PAGE_SIZE)
goto error;
@@ -767,17 +797,25 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
size -= chunk;
/* fill pages */
npages = chunk >> PAGE_SHIFT;
+ *addrp = npages; /* store in lower bits */
curp = virt_to_page(p);
- while (npages--)
+ while (npages--) {
*pagep++ = curp++;
+ *addrp++ |= addr;
+ addr += PAGE_SIZE;
+ }
}
p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
if (!p)
goto error;
+
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+ set_pages_array_wc(sgbuf->pages, sgbuf->count);
+
dmab->private_data = sgbuf;
/* store the first page address for convenience */
- dmab->addr = snd_sgbuf_get_addr(dmab, 0);
+ dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
return p;
error:
@@ -787,10 +825,23 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
{
+ struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
+
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+ set_pages_array_wb(sgbuf->pages, sgbuf->count);
vunmap(dmab->area);
__snd_dma_sg_fallback_free(dmab, dmab->private_data);
}
+static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
+ size_t index = offset >> PAGE_SHIFT;
+
+ return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
+}
+
static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
@@ -805,8 +856,8 @@ static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
.alloc = snd_dma_sg_fallback_alloc,
.free = snd_dma_sg_fallback_free,
.mmap = snd_dma_sg_fallback_mmap,
+ .get_addr = snd_dma_sg_fallback_get_addr,
/* reuse vmalloc helpers */
- .get_addr = snd_dma_vmalloc_get_addr,
.get_page = snd_dma_vmalloc_get_page,
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
};
diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
index a900fc0e7644..88d1f4b56e4b 100644
--- a/sound/firewire/motu/motu-hwdep.c
+++ b/sound/firewire/motu/motu-hwdep.c
@@ -87,6 +87,10 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return -EFAULT;
count = consumed;
+ } else {
+ spin_unlock_irq(&motu->lock);
+
+ count = 0;
}
return count;
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 1a868dd9dc4b..890c2f7c33fc 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -144,6 +144,7 @@ static int hda_codec_driver_probe(struct device *dev)
error:
snd_hda_codec_cleanup_for_unbind(codec);
+ codec->preset = NULL;
return err;
}
@@ -166,6 +167,7 @@ static int hda_codec_driver_remove(struct device *dev)
if (codec->patch_ops.free)
codec->patch_ops.free(codec);
snd_hda_codec_cleanup_for_unbind(codec);
+ codec->preset = NULL;
module_put(dev->driver->owner);
return 0;
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index edd653ece70d..ac1cc7c5290e 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -795,7 +795,6 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
snd_array_free(&codec->cvt_setups);
snd_array_free(&codec->spdif_out);
snd_array_free(&codec->verbs);
- codec->preset = NULL;
codec->follower_dig_outs = NULL;
codec->spdif_status_reset = 0;
snd_array_free(&codec->mixers);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6fab7c8fc19a..1134a493d225 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9202,6 +9202,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
+ SND_PCI_QUIRK(0x1025, 0x1534, "Acer Predator PH315-54", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X),
SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
@@ -9422,6 +9423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
@@ -9432,6 +9434,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -9478,6 +9486,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
@@ -9521,6 +9530,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
@@ -9699,6 +9709,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x1d05, 0x1100, "TongFang GKxNRxx", ALC269_FIXUP_NO_SHUTUP),
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index aea7fae2ca4b..2994f85bc1b9 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -819,6 +819,9 @@ static int add_secret_dac_path(struct hda_codec *codec)
return 0;
nums = snd_hda_get_connections(codec, spec->gen.mixer_nid, conn,
ARRAY_SIZE(conn) - 1);
+ if (nums < 0)
+ return nums;
+
for (i = 0; i < nums; i++) {
if (get_wcaps_type(get_wcaps(codec, conn[i])) == AC_WID_AUD_OUT)
return 0;
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index d3f58a3d17fb..b5b0d43bb8dc 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -493,12 +493,11 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
dev_dbg(chip->card->dev,
"CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
*r_needed, *r_freed);
- for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
- for (i = 0; i != chip->rmh.stat_len; ++i)
- dev_dbg(chip->card->dev,
- " stat[%d]: %x, %x\n", i,
- chip->rmh.stat[i],
- chip->rmh.stat[i] & MASK_DATA_SIZE);
+ for (i = 0; i < MAX_STREAM_BUFFER && i < chip->rmh.stat_len;
+ ++i) {
+ dev_dbg(chip->card->dev, " stat[%d]: %x, %x\n", i,
+ chip->rmh.stat[i],
+ chip->rmh.stat[i] & MASK_DATA_SIZE);
}
}
diff --git a/sound/soc/amd/acp-es8336.c b/sound/soc/amd/acp-es8336.c
index 2fe8df86053a..89499542c803 100644
--- a/sound/soc/amd/acp-es8336.c
+++ b/sound/soc/amd/acp-es8336.c
@@ -198,9 +198,11 @@ static int st_es8336_late_probe(struct snd_soc_card *card)
int ret;
adev = acpi_dev_get_first_match_dev("ESSX8336", NULL, -1);
- if (adev)
- put_device(&adev->dev);
+ if (!adev)
+ return -ENODEV;
+
codec_dev = acpi_get_first_physical_node(adev);
+ acpi_dev_put(adev);
if (!codec_dev)
dev_err(card->dev, "can not find codec dev\n");
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 0d283e41f66d..36314753923b 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -230,10 +230,31 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "TIMI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 15 2022"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Razer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "RB"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Swift SFA16-41"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "IRBIS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "15NBC1011"),
+ }
+ },
{}
};
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 26066682c983..3b0e715549c9 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1191,18 +1191,12 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client)
if (pdata) {
cs42l56->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
if (i2c_client->dev.of_node) {
ret = cs42l56_handle_of_data(i2c_client,
&cs42l56->pdata);
if (ret != 0)
return ret;
}
- cs42l56->pdata = *pdata;
}
if (cs42l56->pdata.gpio_nreset) {
diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
index 9ddf6a35e91c..28a0565c2a95 100644
--- a/sound/soc/codecs/es8326.c
+++ b/sound/soc/codecs/es8326.c
@@ -729,14 +729,16 @@ static int es8326_probe(struct snd_soc_component *component)
}
dev_dbg(component->dev, "jack-pol %x", es8326->jack_pol);
- ret = device_property_read_u8(component->dev, "everest,interrupt-src", &es8326->jack_pol);
+ ret = device_property_read_u8(component->dev, "everest,interrupt-src",
+ &es8326->interrupt_src);
if (ret != 0) {
dev_dbg(component->dev, "interrupt-src return %d", ret);
es8326->interrupt_src = ES8326_HP_DET_SRC_PIN9;
}
dev_dbg(component->dev, "interrupt-src %x", es8326->interrupt_src);
- ret = device_property_read_u8(component->dev, "everest,interrupt-clk", &es8326->jack_pol);
+ ret = device_property_read_u8(component->dev, "everest,interrupt-clk",
+ &es8326->interrupt_clk);
if (ret != 0) {
dev_dbg(component->dev, "interrupt-clk return %d", ret);
es8326->interrupt_clk = 0x45;
diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
index 3f981a9e7fb6..c54ecf3e6987 100644
--- a/sound/soc/codecs/rt715-sdca-sdw.c
+++ b/sound/soc/codecs/rt715-sdca-sdw.c
@@ -167,7 +167,7 @@ static int rt715_sdca_read_prop(struct sdw_slave *slave)
}
/* set the timeout values */
- prop->clk_stop_timeout = 20;
+ prop->clk_stop_timeout = 200;
return 0;
}
diff --git a/sound/soc/codecs/tas5805m.c b/sound/soc/codecs/tas5805m.c
index beb4ec629a03..4e38eb7acea1 100644
--- a/sound/soc/codecs/tas5805m.c
+++ b/sound/soc/codecs/tas5805m.c
@@ -154,6 +154,7 @@ static const uint32_t tas5805m_volume[] = {
#define TAS5805M_VOLUME_MIN 0
struct tas5805m_priv {
+ struct i2c_client *i2c;
struct regulator *pvdd;
struct gpio_desc *gpio_pdn_n;
@@ -165,6 +166,9 @@ struct tas5805m_priv {
int vol[2];
bool is_powered;
bool is_muted;
+
+ struct work_struct work;
+ struct mutex lock;
};
static void set_dsp_scale(struct regmap *rm, int offset, int vol)
@@ -181,13 +185,11 @@ static void set_dsp_scale(struct regmap *rm, int offset, int vol)
regmap_bulk_write(rm, offset, v, ARRAY_SIZE(v));
}
-static void tas5805m_refresh(struct snd_soc_component *component)
+static void tas5805m_refresh(struct tas5805m_priv *tas5805m)
{
- struct tas5805m_priv *tas5805m =
- snd_soc_component_get_drvdata(component);
struct regmap *rm = tas5805m->regmap;
- dev_dbg(component->dev, "refresh: is_muted=%d, vol=%d/%d\n",
+ dev_dbg(&tas5805m->i2c->dev, "refresh: is_muted=%d, vol=%d/%d\n",
tas5805m->is_muted, tas5805m->vol[0], tas5805m->vol[1]);
regmap_write(rm, REG_PAGE, 0x00);
@@ -201,6 +203,9 @@ static void tas5805m_refresh(struct snd_soc_component *component)
set_dsp_scale(rm, 0x24, tas5805m->vol[0]);
set_dsp_scale(rm, 0x28, tas5805m->vol[1]);
+ regmap_write(rm, REG_PAGE, 0x00);
+ regmap_write(rm, REG_BOOK, 0x00);
+
/* Set/clear digital soft-mute */
regmap_write(rm, REG_DEVICE_CTRL_2,
(tas5805m->is_muted ? DCTRL2_MUTE : 0) |
@@ -226,8 +231,11 @@ static int tas5805m_vol_get(struct snd_kcontrol *kcontrol,
struct tas5805m_priv *tas5805m =
snd_soc_component_get_drvdata(component);
+ mutex_lock(&tas5805m->lock);
ucontrol->value.integer.value[0] = tas5805m->vol[0];
ucontrol->value.integer.value[1] = tas5805m->vol[1];
+ mutex_unlock(&tas5805m->lock);
+
return 0;
}
@@ -243,11 +251,13 @@ static int tas5805m_vol_put(struct snd_kcontrol *kcontrol,
snd_soc_kcontrol_component(kcontrol);
struct tas5805m_priv *tas5805m =
snd_soc_component_get_drvdata(component);
+ int ret = 0;
if (!(volume_is_valid(ucontrol->value.integer.value[0]) &&
volume_is_valid(ucontrol->value.integer.value[1])))
return -EINVAL;
+ mutex_lock(&tas5805m->lock);
if (tas5805m->vol[0] != ucontrol->value.integer.value[0] ||
tas5805m->vol[1] != ucontrol->value.integer.value[1]) {
tas5805m->vol[0] = ucontrol->value.integer.value[0];
@@ -256,11 +266,12 @@ static int tas5805m_vol_put(struct snd_kcontrol *kcontrol,
tas5805m->vol[0], tas5805m->vol[1],
tas5805m->is_powered);
if (tas5805m->is_powered)
- tas5805m_refresh(component);
- return 1;
+ tas5805m_refresh(tas5805m);
+ ret = 1;
}
+ mutex_unlock(&tas5805m->lock);
- return 0;
+ return ret;
}
static const struct snd_kcontrol_new tas5805m_snd_controls[] = {
@@ -294,54 +305,83 @@ static int tas5805m_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_component *component = dai->component;
struct tas5805m_priv *tas5805m =
snd_soc_component_get_drvdata(component);
- struct regmap *rm = tas5805m->regmap;
- unsigned int chan, global1, global2;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- dev_dbg(component->dev, "DSP startup\n");
-
- /* We mustn't issue any I2C transactions until the I2S
- * clock is stable. Furthermore, we must allow a 5ms
- * delay after the first set of register writes to
- * allow the DSP to boot before configuring it.
- */
- usleep_range(5000, 10000);
- send_cfg(rm, dsp_cfg_preboot,
- ARRAY_SIZE(dsp_cfg_preboot));
- usleep_range(5000, 15000);
- send_cfg(rm, tas5805m->dsp_cfg_data,
- tas5805m->dsp_cfg_len);
-
- tas5805m->is_powered = true;
- tas5805m_refresh(component);
+ dev_dbg(component->dev, "clock start\n");
+ schedule_work(&tas5805m->work);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- dev_dbg(component->dev, "DSP shutdown\n");
+ break;
- tas5805m->is_powered = false;
+ default:
+ return -EINVAL;
+ }
- regmap_write(rm, REG_PAGE, 0x00);
- regmap_write(rm, REG_BOOK, 0x00);
+ return 0;
+}
- regmap_read(rm, REG_CHAN_FAULT, &chan);
- regmap_read(rm, REG_GLOBAL_FAULT1, &global1);
- regmap_read(rm, REG_GLOBAL_FAULT2, &global2);
+static void do_work(struct work_struct *work)
+{
+ struct tas5805m_priv *tas5805m =
+ container_of(work, struct tas5805m_priv, work);
+ struct regmap *rm = tas5805m->regmap;
- dev_dbg(component->dev,
- "fault regs: CHAN=%02x, GLOBAL1=%02x, GLOBAL2=%02x\n",
- chan, global1, global2);
+ dev_dbg(&tas5805m->i2c->dev, "DSP startup\n");
- regmap_write(rm, REG_DEVICE_CTRL_2, DCTRL2_MODE_HIZ);
- break;
+ mutex_lock(&tas5805m->lock);
+ /* We mustn't issue any I2C transactions until the I2S
+ * clock is stable. Furthermore, we must allow a 5ms
+ * delay after the first set of register writes to
+ * allow the DSP to boot before configuring it.
+ */
+ usleep_range(5000, 10000);
+ send_cfg(rm, dsp_cfg_preboot, ARRAY_SIZE(dsp_cfg_preboot));
+ usleep_range(5000, 15000);
+ send_cfg(rm, tas5805m->dsp_cfg_data, tas5805m->dsp_cfg_len);
+
+ tas5805m->is_powered = true;
+ tas5805m_refresh(tas5805m);
+ mutex_unlock(&tas5805m->lock);
+}
- default:
- return -EINVAL;
+static int tas5805m_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct tas5805m_priv *tas5805m =
+ snd_soc_component_get_drvdata(component);
+ struct regmap *rm = tas5805m->regmap;
+
+ if (event & SND_SOC_DAPM_PRE_PMD) {
+ unsigned int chan, global1, global2;
+
+ dev_dbg(component->dev, "DSP shutdown\n");
+ cancel_work_sync(&tas5805m->work);
+
+ mutex_lock(&tas5805m->lock);
+ if (tas5805m->is_powered) {
+ tas5805m->is_powered = false;
+
+ regmap_write(rm, REG_PAGE, 0x00);
+ regmap_write(rm, REG_BOOK, 0x00);
+
+ regmap_read(rm, REG_CHAN_FAULT, &chan);
+ regmap_read(rm, REG_GLOBAL_FAULT1, &global1);
+ regmap_read(rm, REG_GLOBAL_FAULT2, &global2);
+
+ dev_dbg(component->dev, "fault regs: CHAN=%02x, "
+ "GLOBAL1=%02x, GLOBAL2=%02x\n",
+ chan, global1, global2);
+
+ regmap_write(rm, REG_DEVICE_CTRL_2, DCTRL2_MODE_HIZ);
+ }
+ mutex_unlock(&tas5805m->lock);
}
return 0;
@@ -354,7 +394,8 @@ static const struct snd_soc_dapm_route tas5805m_audio_map[] = {
static const struct snd_soc_dapm_widget tas5805m_dapm_widgets[] = {
SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
- SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0,
+ tas5805m_dac_event, SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_OUTPUT("OUT")
};
@@ -375,11 +416,14 @@ static int tas5805m_mute(struct snd_soc_dai *dai, int mute, int direction)
struct tas5805m_priv *tas5805m =
snd_soc_component_get_drvdata(component);
+ mutex_lock(&tas5805m->lock);
dev_dbg(component->dev, "set mute=%d (is_powered=%d)\n",
mute, tas5805m->is_powered);
+
tas5805m->is_muted = mute;
if (tas5805m->is_powered)
- tas5805m_refresh(component);
+ tas5805m_refresh(tas5805m);
+ mutex_unlock(&tas5805m->lock);
return 0;
}
@@ -434,6 +478,7 @@ static int tas5805m_i2c_probe(struct i2c_client *i2c)
if (!tas5805m)
return -ENOMEM;
+ tas5805m->i2c = i2c;
tas5805m->pvdd = devm_regulator_get(dev, "pvdd");
if (IS_ERR(tas5805m->pvdd)) {
dev_err(dev, "failed to get pvdd supply: %ld\n",
@@ -507,6 +552,9 @@ static int tas5805m_i2c_probe(struct i2c_client *i2c)
gpiod_set_value(tas5805m->gpio_pdn_n, 1);
usleep_range(10000, 15000);
+ INIT_WORK(&tas5805m->work, do_work);
+ mutex_init(&tas5805m->lock);
+
/* Don't register through devm. We need to be able to unregister
* the component prior to deasserting PDN#
*/
@@ -527,6 +575,7 @@ static void tas5805m_i2c_remove(struct i2c_client *i2c)
struct device *dev = &i2c->dev;
struct tas5805m_priv *tas5805m = dev_get_drvdata(dev);
+ cancel_work_sync(&tas5805m->work);
snd_soc_unregister_component(dev);
gpiod_set_value(tas5805m->gpio_pdn_n, 0);
usleep_range(10000, 15000);
diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
index 966ba4909204..58fdb4e9fd97 100644
--- a/sound/soc/codecs/wsa883x.c
+++ b/sound/soc/codecs/wsa883x.c
@@ -1359,8 +1359,8 @@ static struct snd_soc_dai_driver wsa883x_dais[] = {
.stream_name = "SPKR Playback",
.rates = WSA883X_RATES | WSA883X_FRAC_RATES,
.formats = WSA883X_FORMATS,
- .rate_max = 8000,
- .rate_min = 352800,
+ .rate_min = 8000,
+ .rate_max = 352800,
.channels_min = 1,
.channels_max = 1,
},
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 1c9be8a5dcb1..35a52c3a020d 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -1141,6 +1141,7 @@ static int fsl_sai_check_version(struct device *dev)
sai->verid.version = val &
(FSL_SAI_VERID_MAJOR_MASK | FSL_SAI_VERID_MINOR_MASK);
+ sai->verid.version >>= FSL_SAI_VERID_MINOR_SHIFT;
sai->verid.feature = val & FSL_SAI_VERID_FEATURE_MASK;
ret = regmap_read(sai->regmap, FSL_SAI_PARAM, &val);
diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
index 2ca24273c491..637501850728 100644
--- a/sound/soc/intel/avs/core.c
+++ b/sound/soc/intel/avs/core.c
@@ -481,6 +481,29 @@ err_remap_bar0:
return ret;
}
+static void avs_pci_shutdown(struct pci_dev *pci)
+{
+ struct hdac_bus *bus = pci_get_drvdata(pci);
+ struct avs_dev *adev = hdac_to_avs(bus);
+
+ cancel_work_sync(&adev->probe_work);
+ avs_ipc_block(adev->ipc);
+
+ snd_hdac_stop_streams(bus);
+ avs_dsp_op(adev, int_control, false);
+ snd_hdac_ext_bus_ppcap_int_enable(bus, false);
+ snd_hdac_ext_bus_link_power_down_all(bus);
+
+ snd_hdac_bus_stop_chip(bus);
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
+ if (avs_platattr_test(adev, CLDMA))
+ pci_free_irq(pci, 0, &code_loader);
+ pci_free_irq(pci, 0, adev);
+ pci_free_irq(pci, 0, bus);
+ pci_free_irq_vectors(pci);
+}
+
static void avs_pci_remove(struct pci_dev *pci)
{
struct hdac_device *hdev, *save;
@@ -739,6 +762,7 @@ static struct pci_driver avs_pci_driver = {
.id_table = avs_ids,
.probe = avs_pci_probe,
.remove = avs_pci_remove,
+ .shutdown = avs_pci_shutdown,
.driver = {
.pm = &avs_dev_pm,
},
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 09d1f0f6d686..df157b01df8b 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -497,21 +497,28 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
if (adev) {
snprintf(codec_name, sizeof(codec_name),
"i2c-%s", acpi_dev_name(adev));
- put_device(&adev->dev);
byt_cht_es8316_dais[dai_index].codecs->name = codec_name;
} else {
dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
return -ENXIO;
}
+ codec_dev = acpi_get_first_physical_node(adev);
+ acpi_dev_put(adev);
+ if (!codec_dev)
+ return -EPROBE_DEFER;
+ priv->codec_dev = get_device(codec_dev);
+
/* override platform name, if required */
byt_cht_es8316_card.dev = dev;
platform_name = mach->mach_params.platform;
ret = snd_soc_fixup_dai_links_platform_name(&byt_cht_es8316_card,
platform_name);
- if (ret)
+ if (ret) {
+ put_device(codec_dev);
return ret;
+ }
/* Check for BYTCR or other platform and setup quirks */
dmi_id = dmi_first_match(byt_cht_es8316_quirk_table);
@@ -539,13 +546,10 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
/* get the clock */
priv->mclk = devm_clk_get(dev, "pmc_plt_clk_3");
- if (IS_ERR(priv->mclk))
+ if (IS_ERR(priv->mclk)) {
+ put_device(codec_dev);
return dev_err_probe(dev, PTR_ERR(priv->mclk), "clk_get pmc_plt_clk_3 failed\n");
-
- codec_dev = acpi_get_first_physical_node(adev);
- if (!codec_dev)
- return -EPROBE_DEFER;
- priv->codec_dev = get_device(codec_dev);
+ }
if (quirk & BYT_CHT_ES8316_JD_INVERTED)
props[cnt++] = PROPERTY_ENTRY_BOOL("everest,jack-detect-inverted");
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 4699ca79f3ea..79e0039c79a3 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -1636,13 +1636,18 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
if (adev) {
snprintf(byt_rt5640_codec_name, sizeof(byt_rt5640_codec_name),
"i2c-%s", acpi_dev_name(adev));
- put_device(&adev->dev);
byt_rt5640_dais[dai_index].codecs->name = byt_rt5640_codec_name;
} else {
dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
return -ENXIO;
}
+ codec_dev = acpi_get_first_physical_node(adev);
+ acpi_dev_put(adev);
+ if (!codec_dev)
+ return -EPROBE_DEFER;
+ priv->codec_dev = get_device(codec_dev);
+
/*
* swap SSP0 if bytcr is detected
* (will be overridden if DMI quirk is detected)
@@ -1717,11 +1722,6 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
byt_rt5640_quirk = quirk_override;
}
- codec_dev = acpi_get_first_physical_node(adev);
- if (!codec_dev)
- return -EPROBE_DEFER;
- priv->codec_dev = get_device(codec_dev);
-
if (byt_rt5640_quirk & BYT_RT5640_JD_HP_ELITEP_1000G2) {
acpi_dev_add_driver_gpios(ACPI_COMPANION(priv->codec_dev),
byt_rt5640_hp_elitepad_1000g2_gpios);
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 81ac6eeda2e6..8fca9b82d4d0 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -922,7 +922,6 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
if (adev) {
snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name),
"i2c-%s", acpi_dev_name(adev));
- put_device(&adev->dev);
byt_rt5651_dais[dai_index].codecs->name = byt_rt5651_codec_name;
} else {
dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
@@ -930,6 +929,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
}
codec_dev = acpi_get_first_physical_node(adev);
+ acpi_dev_put(adev);
if (!codec_dev)
return -EPROBE_DEFER;
priv->codec_dev = get_device(codec_dev);
diff --git a/sound/soc/intel/boards/bytcr_wm5102.c b/sound/soc/intel/boards/bytcr_wm5102.c
index 1669eb3bd80f..c0706537f673 100644
--- a/sound/soc/intel/boards/bytcr_wm5102.c
+++ b/sound/soc/intel/boards/bytcr_wm5102.c
@@ -411,9 +411,9 @@ static int snd_byt_wm5102_mc_probe(struct platform_device *pdev)
return -ENOENT;
}
snprintf(codec_name, sizeof(codec_name), "spi-%s", acpi_dev_name(adev));
- put_device(&adev->dev);
codec_dev = bus_find_device_by_name(&spi_bus_type, NULL, codec_name);
+ acpi_dev_put(adev);
if (!codec_dev)
return -EPROBE_DEFER;
diff --git a/sound/soc/intel/boards/sof_cs42l42.c b/sound/soc/intel/boards/sof_cs42l42.c
index e38bd2831e6a..e9d190cb13b0 100644
--- a/sound/soc/intel/boards/sof_cs42l42.c
+++ b/sound/soc/intel/boards/sof_cs42l42.c
@@ -336,6 +336,9 @@ static int create_spk_amp_dai_links(struct device *dev,
links[*id].platforms = platform_component;
links[*id].num_platforms = ARRAY_SIZE(platform_component);
links[*id].dpcm_playback = 1;
+ /* firmware-generated echo reference */
+ links[*id].dpcm_capture = 1;
+
links[*id].no_pcm = 1;
links[*id].cpus = &cpus[*id];
links[*id].num_cpus = 1;
diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
index 773e5d1d87d4..894b6610b9e2 100644
--- a/sound/soc/intel/boards/sof_es8336.c
+++ b/sound/soc/intel/boards/sof_es8336.c
@@ -681,7 +681,6 @@ static int sof_es8336_probe(struct platform_device *pdev)
if (adev) {
snprintf(codec_name, sizeof(codec_name),
"i2c-%s", acpi_dev_name(adev));
- put_device(&adev->dev);
dai_links[0].codecs->name = codec_name;
/* also fixup codec dai name if relevant */
@@ -692,16 +691,19 @@ static int sof_es8336_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = snd_soc_fixup_dai_links_platform_name(&sof_es8336_card,
- mach->mach_params.platform);
- if (ret)
- return ret;
-
codec_dev = acpi_get_first_physical_node(adev);
+ acpi_dev_put(adev);
if (!codec_dev)
return -EPROBE_DEFER;
priv->codec_dev = get_device(codec_dev);
+ ret = snd_soc_fixup_dai_links_platform_name(&sof_es8336_card,
+ mach->mach_params.platform);
+ if (ret) {
+ put_device(codec_dev);
+ return ret;
+ }
+
if (quirk & SOF_ES8336_JD_INVERTED)
props[cnt++] = PROPERTY_ENTRY_BOOL("everest,jack-detect-inverted");
diff --git a/sound/soc/intel/boards/sof_nau8825.c b/sound/soc/intel/boards/sof_nau8825.c
index a800854c2831..6794a0249a9a 100644
--- a/sound/soc/intel/boards/sof_nau8825.c
+++ b/sound/soc/intel/boards/sof_nau8825.c
@@ -487,8 +487,6 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
links[id].num_codecs = ARRAY_SIZE(max_98373_components);
links[id].init = max_98373_spk_codec_init;
links[id].ops = &max_98373_ops;
- /* feedback stream */
- links[id].dpcm_capture = 1;
} else if (sof_nau8825_quirk &
SOF_MAX98360A_SPEAKER_AMP_PRESENT) {
max_98360a_dai_link(&links[id]);
@@ -506,6 +504,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
links[id].platforms = platform_component;
links[id].num_platforms = ARRAY_SIZE(platform_component);
links[id].dpcm_playback = 1;
+ /* feedback stream or firmware-generated echo reference */
+ links[id].dpcm_capture = 1;
+
links[id].no_pcm = 1;
links[id].cpus = &cpus[id];
links[id].num_cpus = 1;
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index 2eabc4b0fafa..71a11d747622 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -761,8 +761,6 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
links[id].num_codecs = ARRAY_SIZE(max_98373_components);
links[id].init = max_98373_spk_codec_init;
links[id].ops = &max_98373_ops;
- /* feedback stream */
- links[id].dpcm_capture = 1;
} else if (sof_rt5682_quirk &
SOF_MAX98360A_SPEAKER_AMP_PRESENT) {
max_98360a_dai_link(&links[id]);
@@ -789,6 +787,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
links[id].platforms = platform_component;
links[id].num_platforms = ARRAY_SIZE(platform_component);
links[id].dpcm_playback = 1;
+ /* feedback stream or firmware-generated echo reference */
+ links[id].dpcm_capture = 1;
+
links[id].no_pcm = 1;
links[id].cpus = &cpus[id];
links[id].num_cpus = 1;
diff --git a/sound/soc/intel/boards/sof_ssp_amp.c b/sound/soc/intel/boards/sof_ssp_amp.c
index 94d25aeb6e7c..7b74f122e340 100644
--- a/sound/soc/intel/boards/sof_ssp_amp.c
+++ b/sound/soc/intel/boards/sof_ssp_amp.c
@@ -258,13 +258,12 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
sof_rt1308_dai_link(&links[id]);
} else if (sof_ssp_amp_quirk & SOF_CS35L41_SPEAKER_AMP_PRESENT) {
cs35l41_set_dai_link(&links[id]);
-
- /* feedback from amplifier */
- links[id].dpcm_capture = 1;
}
links[id].platforms = platform_component;
links[id].num_platforms = ARRAY_SIZE(platform_component);
links[id].dpcm_playback = 1;
+ /* feedback from amplifier or firmware-generated echo reference */
+ links[id].dpcm_capture = 1;
links[id].no_pcm = 1;
links[id].cpus = &cpus[id];
links[id].num_cpus = 1;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index c3be24b2fac5..a79a2fb260b8 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1401,13 +1401,17 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
- if (!kc)
+ if (!kc) {
+ ret = -ENOMEM;
goto hdr_err;
+ }
kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
GFP_KERNEL);
- if (!kcontrol_type)
+ if (!kcontrol_type) {
+ ret = -ENOMEM;
goto hdr_err;
+ }
for (i = 0; i < le32_to_cpu(w->num_kcontrols); i++) {
control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
index 6bd2888fbb66..d5ccd4d09278 100644
--- a/sound/soc/sof/amd/acp.c
+++ b/sound/soc/sof/amd/acp.c
@@ -318,7 +318,6 @@ static irqreturn_t acp_irq_thread(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
- unsigned int base = desc->dsp_intr_base;
unsigned int val, count = ACP_HW_SEM_RETRY_COUNT;
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat);
@@ -328,28 +327,20 @@ static irqreturn_t acp_irq_thread(int irq, void *context)
return IRQ_HANDLED;
}
- val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
- if (val & ACP_DSP_TO_HOST_IRQ) {
- while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
- /* Wait until acquired HW Semaphore lock or timeout */
- count--;
- if (!count) {
- dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
- return IRQ_NONE;
- }
+ while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
+ /* Wait until acquired HW Semaphore lock or timeout */
+ count--;
+ if (!count) {
+ dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
+ return IRQ_NONE;
}
-
- sof_ops(sdev)->irq_thread(irq, sdev);
- val |= ACP_DSP_TO_HOST_IRQ;
- snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val);
-
- /* Unlock or Release HW Semaphore */
- snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
-
- return IRQ_HANDLED;
}
- return IRQ_NONE;
+ sof_ops(sdev)->irq_thread(irq, sdev);
+ /* Unlock or Release HW Semaphore */
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
+
+ return IRQ_HANDLED;
};
static irqreturn_t acp_irq_handler(int irq, void *dev_id)
@@ -360,8 +351,11 @@ static irqreturn_t acp_irq_handler(int irq, void *dev_id)
unsigned int val;
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
- if (val)
+ if (val) {
+ val |= ACP_DSP_TO_HOST_IRQ;
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val);
return IRQ_WAKE_THREAD;
+ }
return IRQ_NONE;
}
diff --git a/sound/soc/sof/ipc4-mtrace.c b/sound/soc/sof/ipc4-mtrace.c
index 70dea8ae706e..0ec6ef681012 100644
--- a/sound/soc/sof/ipc4-mtrace.c
+++ b/sound/soc/sof/ipc4-mtrace.c
@@ -344,9 +344,10 @@ static ssize_t sof_ipc4_priority_mask_dfs_write(struct file *file,
size_t count, loff_t *ppos)
{
struct sof_mtrace_priv *priv = file->private_data;
- int id, ret;
+ unsigned int id;
char *buf;
u32 mask;
+ int ret;
/*
* To update Nth mask entry, write:
@@ -357,9 +358,9 @@ static ssize_t sof_ipc4_priority_mask_dfs_write(struct file *file,
if (IS_ERR(buf))
return PTR_ERR(buf);
- ret = sscanf(buf, "%d,0x%x", &id, &mask);
+ ret = sscanf(buf, "%u,0x%x", &id, &mask);
if (ret != 2) {
- ret = sscanf(buf, "%d,%x", &id, &mask);
+ ret = sscanf(buf, "%u,%x", &id, &mask);
if (ret != 2) {
ret = -EINVAL;
goto out;
diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
index 7306a2649857..865c367eb2f2 100644
--- a/sound/soc/sof/sof-audio.c
+++ b/sound/soc/sof/sof-audio.c
@@ -271,9 +271,9 @@ sof_unprepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widg
struct snd_sof_widget *swidget = widget->dobj.private;
struct snd_soc_dapm_path *p;
- /* return if the widget is in use or if it is already unprepared */
- if (!swidget->prepared || swidget->use_count > 1)
- return;
+ /* skip if the widget is in use or if it is already unprepared */
+ if (!swidget || !swidget->prepared || swidget->use_count > 0)
+ goto sink_unprepare;
if (widget_ops[widget->id].ipc_unprepare)
/* unprepare the source widget */
@@ -281,6 +281,7 @@ sof_unprepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widg
swidget->prepared = false;
+sink_unprepare:
/* unprepare all widgets in the sink paths */
snd_soc_dapm_widget_for_each_sink_path(widget, p) {
if (!p->walking && p->sink->dobj.private) {
@@ -303,7 +304,7 @@ sof_prepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget
struct snd_soc_dapm_path *p;
int ret;
- if (!widget_ops[widget->id].ipc_prepare || swidget->prepared)
+ if (!swidget || !widget_ops[widget->id].ipc_prepare || swidget->prepared)
goto sink_prepare;
/* prepare the source widget */
@@ -326,7 +327,8 @@ sink_prepare:
p->walking = false;
if (ret < 0) {
/* unprepare the source widget */
- if (widget_ops[widget->id].ipc_unprepare && swidget->prepared) {
+ if (widget_ops[widget->id].ipc_unprepare &&
+ swidget && swidget->prepared) {
widget_ops[widget->id].ipc_unprepare(swidget);
swidget->prepared = false;
}
@@ -429,11 +431,11 @@ sof_walk_widgets_in_order(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget_l
for_each_dapm_widgets(list, i, widget) {
/* starting widget for playback is AIF type */
- if (dir == SNDRV_PCM_STREAM_PLAYBACK && !WIDGET_IS_AIF(widget->id))
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK && widget->id != snd_soc_dapm_aif_in)
continue;
/* starting widget for capture is DAI type */
- if (dir == SNDRV_PCM_STREAM_CAPTURE && !WIDGET_IS_DAI(widget->id))
+ if (dir == SNDRV_PCM_STREAM_CAPTURE && widget->id != snd_soc_dapm_dai_out)
continue;
switch (op) {
diff --git a/sound/synth/emux/emux_nrpn.c b/sound/synth/emux/emux_nrpn.c
index 8056422ed7c5..0d6b82ae2955 100644
--- a/sound/synth/emux/emux_nrpn.c
+++ b/sound/synth/emux/emux_nrpn.c
@@ -349,6 +349,9 @@ int
snd_emux_xg_control(struct snd_emux_port *port, struct snd_midi_channel *chan,
int param)
{
+ if (param >= ARRAY_SIZE(chan->control))
+ return -EINVAL;
+
return send_converted_effect(xg_effects, ARRAY_SIZE(xg_effects),
port, chan, param,
chan->control[param],
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 3d13fdf7590c..3ecd1ba7fd4b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -2152,6 +2152,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_GENERIC_IMPLICIT_FB),
DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
QUIRK_FLAG_IFACE_SKIP_CLOSE),
+ DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+ QUIRK_FLAG_FIXED_RATE),
DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
QUIRK_FLAG_FIXED_RATE),
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 85973e55489e..fdb7f5db7308 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -15,10 +15,6 @@ bool mirrored_kernelcore = false;
struct page {};
-void __free_pages_core(struct page *page, unsigned int order)
-{
-}
-
void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index 2cf0c7a3fe23..567e07c19ecc 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -30,6 +30,8 @@
#define MAX_STRERR_LEN 256
#define MAX_TEST_NAME 80
+#define __always_unused __attribute__((__unused__))
+
#define _FAIL(errnum, fmt...) \
({ \
error_at_line(0, (errnum), __func__, __LINE__, fmt); \
@@ -321,7 +323,8 @@ static int socket_loopback(int family, int sotype)
return socket_loopback_reuseport(family, sotype, -1);
}
-static void test_insert_invalid(int family, int sotype, int mapfd)
+static void test_insert_invalid(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u32 key = 0;
u64 value;
@@ -338,7 +341,8 @@ static void test_insert_invalid(int family, int sotype, int mapfd)
FAIL_ERRNO("map_update: expected EBADF");
}
-static void test_insert_opened(int family, int sotype, int mapfd)
+static void test_insert_opened(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u32 key = 0;
u64 value;
@@ -359,7 +363,8 @@ static void test_insert_opened(int family, int sotype, int mapfd)
xclose(s);
}
-static void test_insert_bound(int family, int sotype, int mapfd)
+static void test_insert_bound(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
socklen_t len;
@@ -386,7 +391,8 @@ close:
xclose(s);
}
-static void test_insert(int family, int sotype, int mapfd)
+static void test_insert(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u64 value;
u32 key;
@@ -402,7 +408,8 @@ static void test_insert(int family, int sotype, int mapfd)
xclose(s);
}
-static void test_delete_after_insert(int family, int sotype, int mapfd)
+static void test_delete_after_insert(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u64 value;
u32 key;
@@ -419,7 +426,8 @@ static void test_delete_after_insert(int family, int sotype, int mapfd)
xclose(s);
}
-static void test_delete_after_close(int family, int sotype, int mapfd)
+static void test_delete_after_close(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
int err, s;
u64 value;
@@ -442,7 +450,8 @@ static void test_delete_after_close(int family, int sotype, int mapfd)
FAIL_ERRNO("map_delete: expected EINVAL/EINVAL");
}
-static void test_lookup_after_insert(int family, int sotype, int mapfd)
+static void test_lookup_after_insert(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u64 cookie, value;
socklen_t len;
@@ -470,7 +479,8 @@ static void test_lookup_after_insert(int family, int sotype, int mapfd)
xclose(s);
}
-static void test_lookup_after_delete(int family, int sotype, int mapfd)
+static void test_lookup_after_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
int err, s;
u64 value;
@@ -493,7 +503,8 @@ static void test_lookup_after_delete(int family, int sotype, int mapfd)
xclose(s);
}
-static void test_lookup_32_bit_value(int family, int sotype, int mapfd)
+static void test_lookup_32_bit_value(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u32 key, value32;
int err, s;
@@ -523,7 +534,8 @@ close:
xclose(s);
}
-static void test_update_existing(int family, int sotype, int mapfd)
+static void test_update_existing(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
int s1, s2;
u64 value;
@@ -551,7 +563,7 @@ close_s1:
/* Exercise the code path where we destroy child sockets that never
* got accept()'ed, aka orphans, when parent socket gets closed.
*/
-static void test_destroy_orphan_child(int family, int sotype, int mapfd)
+static void do_destroy_orphan_child(int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
socklen_t len;
@@ -582,10 +594,38 @@ close_srv:
xclose(s);
}
+static void test_destroy_orphan_child(struct test_sockmap_listen *skel,
+ int family, int sotype, int mapfd)
+{
+ int msg_verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
+ int skb_verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ const struct test {
+ int progfd;
+ enum bpf_attach_type atype;
+ } tests[] = {
+ { -1, -1 },
+ { msg_verdict, BPF_SK_MSG_VERDICT },
+ { skb_verdict, BPF_SK_SKB_VERDICT },
+ };
+ const struct test *t;
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ if (t->progfd != -1 &&
+ xbpf_prog_attach(t->progfd, mapfd, t->atype, 0) != 0)
+ return;
+
+ do_destroy_orphan_child(family, sotype, mapfd);
+
+ if (t->progfd != -1)
+ xbpf_prog_detach2(t->progfd, mapfd, t->atype);
+ }
+}
+
/* Perform a passive open after removing listening socket from SOCKMAP
* to ensure that callbacks get restored properly.
*/
-static void test_clone_after_delete(int family, int sotype, int mapfd)
+static void test_clone_after_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
socklen_t len;
@@ -621,7 +661,8 @@ close_srv:
* SOCKMAP, but got accept()'ed only after the parent has been removed
* from SOCKMAP, gets cloned without parent psock state or callbacks.
*/
-static void test_accept_after_delete(int family, int sotype, int mapfd)
+static void test_accept_after_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
const u32 zero = 0;
@@ -675,7 +716,8 @@ close_srv:
/* Check that child socket that got created and accepted while parent
* was in a SOCKMAP is cloned without parent psock state or callbacks.
*/
-static void test_accept_before_delete(int family, int sotype, int mapfd)
+static void test_accept_before_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
const u32 zero = 0, one = 1;
@@ -784,7 +826,8 @@ done:
return NULL;
}
-static void test_syn_recv_insert_delete(int family, int sotype, int mapfd)
+static void test_syn_recv_insert_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct connect_accept_ctx ctx = { 0 };
struct sockaddr_storage addr;
@@ -847,7 +890,8 @@ static void *listen_thread(void *arg)
return NULL;
}
-static void test_race_insert_listen(int family, int socktype, int mapfd)
+static void test_race_insert_listen(struct test_sockmap_listen *skel __always_unused,
+ int family, int socktype, int mapfd)
{
struct connect_accept_ctx ctx = { 0 };
const u32 zero = 0;
@@ -1473,7 +1517,8 @@ static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
int family, int sotype)
{
const struct op_test {
- void (*fn)(int family, int sotype, int mapfd);
+ void (*fn)(struct test_sockmap_listen *skel,
+ int family, int sotype, int mapfd);
const char *name;
int sotype;
} tests[] = {
@@ -1520,7 +1565,7 @@ static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
if (!test__start_subtest(s))
continue;
- t->fn(family, sotype, map_fd);
+ t->fn(skel, family, sotype, map_fd);
test_ops_cleanup(map);
}
}
diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
index 68b14fdfebdb..d63fd8991b03 100644
--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
+++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
@@ -225,3 +225,39 @@
.result_unpriv = ACCEPT,
.insn_processed = 15,
},
+/* The test performs a conditional 64-bit write to a stack location
+ * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
+ * then data is read from fp[-8]. This sequence is unsafe.
+ *
+ * The test would be mistakenly marked as safe w/o dst register parent
+ * preservation in verifier.c:copy_register_state() function.
+ *
+ * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
+ * checkpoint state after conditional 64-bit assignment.
+ */
+{
+ "write tracking and register parent chain bug",
+ .insns = {
+ /* r6 = ktime_get_ns() */
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* r0 = ktime_get_ns() */
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ /* if r0 > r6 goto +1 */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1),
+ /* *(u64 *)(r10 - 8) = 0xdeadbeef */
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef),
+ /* r1 = 42 */
+ BPF_MOV64_IMM(BPF_REG_1, 42),
+ /* *(u8 *)(r10 - 8) = r1 */
+ BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8),
+ /* r2 = *(u64 *)(r10 - 8) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8),
+ /* exit(0) */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .flags = BPF_F_TEST_STATE_FREQ,
+ .errstr = "invalid read from stack off -8+1 size 8",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
index 186e1c26867e..75c100de90ff 100755
--- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
@@ -268,6 +268,7 @@ TEST_MATRIX=(
# Taking away all CPUs from parent or itself if there are tasks
# will make the partition invalid.
" S+ C2-3:P1:S+ C3:P1 . . T C2-3 . . 0 A1:2-3,A2:2-3 A1:P1,A2:P-1"
+ " S+ C3:P1:S+ C3 . . T P1 . . 0 A1:3,A2:3 A1:P1,A2:P-1"
" S+ $SETUP_A123_PARTITIONS . T:C2-3 . . . 0 A1:2-3,A2:2-3,A3:3 A1:P1,A2:P-1,A3:P-1"
" S+ $SETUP_A123_PARTITIONS . T:C2-3:C1-3 . . . 0 A1:1,A2:2,A3:3 A1:P1,A2:P1,A3:P1"
diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
index 9c79bbcce5a8..aff0a59f92d9 100755
--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
+++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
@@ -246,7 +246,7 @@ test_vlan_ingress_modify()
bridge vlan add dev $swp2 vid 300
tc filter add dev $swp1 ingress chain $(IS1 2) pref 3 \
- protocol 802.1Q flower skip_sw vlan_id 200 \
+ protocol 802.1Q flower skip_sw vlan_id 200 src_mac $h1_mac \
action vlan modify id 300 \
action goto chain $(IS2 0 0)
diff --git a/tools/testing/selftests/filesystems/fat/run_fat_tests.sh b/tools/testing/selftests/filesystems/fat/run_fat_tests.sh
index 7f35dc3d15df..7f35dc3d15df 100644..100755
--- a/tools/testing/selftests/filesystems/fat/run_fat_tests.sh
+++ b/tools/testing/selftests/filesystems/fat/run_fat_tests.sh
diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
index beb944fa6fd4..54680dc5887f 100644
--- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c
+++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
@@ -237,6 +237,11 @@ static void guest_check_s1ptw_wr_in_dirty_log(void)
GUEST_SYNC(CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG);
}
+static void guest_check_no_s1ptw_wr_in_dirty_log(void)
+{
+ GUEST_SYNC(CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG);
+}
+
static void guest_exec(void)
{
int (*code)(void) = (int (*)(void))TEST_EXEC_GVA;
@@ -304,7 +309,7 @@ static struct uffd_args {
/* Returns true to continue the test, and false if it should be skipped. */
static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
- struct uffd_args *args, bool expect_write)
+ struct uffd_args *args)
{
uint64_t addr = msg->arg.pagefault.address;
uint64_t flags = msg->arg.pagefault.flags;
@@ -313,7 +318,6 @@ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
"The only expected UFFD mode is MISSING");
- ASSERT_EQ(!!(flags & UFFD_PAGEFAULT_FLAG_WRITE), expect_write);
ASSERT_EQ(addr, (uint64_t)args->hva);
pr_debug("uffd fault: addr=%p write=%d\n",
@@ -337,19 +341,14 @@ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
return 0;
}
-static int uffd_pt_write_handler(int mode, int uffd, struct uffd_msg *msg)
-{
- return uffd_generic_handler(mode, uffd, msg, &pt_args, true);
-}
-
-static int uffd_data_write_handler(int mode, int uffd, struct uffd_msg *msg)
+static int uffd_pt_handler(int mode, int uffd, struct uffd_msg *msg)
{
- return uffd_generic_handler(mode, uffd, msg, &data_args, true);
+ return uffd_generic_handler(mode, uffd, msg, &pt_args);
}
-static int uffd_data_read_handler(int mode, int uffd, struct uffd_msg *msg)
+static int uffd_data_handler(int mode, int uffd, struct uffd_msg *msg)
{
- return uffd_generic_handler(mode, uffd, msg, &data_args, false);
+ return uffd_generic_handler(mode, uffd, msg, &data_args);
}
static void setup_uffd_args(struct userspace_mem_region *region,
@@ -471,9 +470,12 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
{
struct userspace_mem_region *data_region, *pt_region;
bool continue_test = true;
+ uint64_t pte_gpa, pte_pg;
data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
+ pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
+ pte_pg = (pte_gpa - pt_region->region.guest_phys_addr) / getpagesize();
if (cmd == CMD_SKIP_TEST)
continue_test = false;
@@ -486,13 +488,13 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
TEST_ASSERT(check_write_in_dirty_log(vm, data_region, 0),
"Missing write in dirty log");
if (cmd & CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG)
- TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, 0),
+ TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, pte_pg),
"Missing s1ptw write in dirty log");
if (cmd & CMD_CHECK_NO_WRITE_IN_DIRTY_LOG)
TEST_ASSERT(!check_write_in_dirty_log(vm, data_region, 0),
"Unexpected write in dirty log");
if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG)
- TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, 0),
+ TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, pte_pg),
"Unexpected s1ptw write in dirty log");
return continue_test;
@@ -797,7 +799,7 @@ static void help(char *name)
.expected_events = { .uffd_faults = _uffd_faults, }, \
}
-#define TEST_DIRTY_LOG(_access, _with_af, _test_check) \
+#define TEST_DIRTY_LOG(_access, _with_af, _test_check, _pt_check) \
{ \
.name = SCAT3(dirty_log, _access, _with_af), \
.data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
@@ -805,13 +807,12 @@ static void help(char *name)
.guest_prepare = { _PREPARE(_with_af), \
_PREPARE(_access) }, \
.guest_test = _access, \
- .guest_test_check = { _CHECK(_with_af), _test_check, \
- guest_check_s1ptw_wr_in_dirty_log}, \
+ .guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
.expected_events = { 0 }, \
}
#define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler, \
- _uffd_faults, _test_check) \
+ _uffd_faults, _test_check, _pt_check) \
{ \
.name = SCAT3(uffd_and_dirty_log, _access, _with_af), \
.data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
@@ -820,16 +821,17 @@ static void help(char *name)
_PREPARE(_access) }, \
.guest_test = _access, \
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
- .guest_test_check = { _CHECK(_with_af), _test_check }, \
+ .guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
.uffd_data_handler = _uffd_data_handler, \
- .uffd_pt_handler = uffd_pt_write_handler, \
+ .uffd_pt_handler = uffd_pt_handler, \
.expected_events = { .uffd_faults = _uffd_faults, }, \
}
#define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits) \
{ \
- .name = SCAT3(ro_memslot, _access, _with_af), \
+ .name = SCAT2(ro_memslot, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
+ .pt_memslot_flags = KVM_MEM_READONLY, \
.guest_prepare = { _PREPARE(_access) }, \
.guest_test = _access, \
.mmio_handler = _mmio_handler, \
@@ -840,6 +842,7 @@ static void help(char *name)
{ \
.name = SCAT2(ro_memslot_no_syndrome, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
+ .pt_memslot_flags = KVM_MEM_READONLY, \
.guest_test = _access, \
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
.expected_events = { .fail_vcpu_runs = 1 }, \
@@ -848,9 +851,9 @@ static void help(char *name)
#define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits, \
_test_check) \
{ \
- .name = SCAT3(ro_memslot, _access, _with_af), \
+ .name = SCAT2(ro_memslot, _access), \
.data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
- .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
+ .pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.guest_prepare = { _PREPARE(_access) }, \
.guest_test = _access, \
.guest_test_check = { _test_check }, \
@@ -862,7 +865,7 @@ static void help(char *name)
{ \
.name = SCAT2(ro_memslot_no_syn_and_dlog, _access), \
.data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
- .pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
+ .pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.guest_test = _access, \
.guest_test_check = { _test_check }, \
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
@@ -874,11 +877,12 @@ static void help(char *name)
{ \
.name = SCAT2(ro_memslot_uffd, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
+ .pt_memslot_flags = KVM_MEM_READONLY, \
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
.guest_prepare = { _PREPARE(_access) }, \
.guest_test = _access, \
.uffd_data_handler = _uffd_data_handler, \
- .uffd_pt_handler = uffd_pt_write_handler, \
+ .uffd_pt_handler = uffd_pt_handler, \
.mmio_handler = _mmio_handler, \
.expected_events = { .mmio_exits = _mmio_exits, \
.uffd_faults = _uffd_faults }, \
@@ -889,10 +893,11 @@ static void help(char *name)
{ \
.name = SCAT2(ro_memslot_no_syndrome, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
+ .pt_memslot_flags = KVM_MEM_READONLY, \
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
.guest_test = _access, \
.uffd_data_handler = _uffd_data_handler, \
- .uffd_pt_handler = uffd_pt_write_handler, \
+ .uffd_pt_handler = uffd_pt_handler, \
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
.expected_events = { .fail_vcpu_runs = 1, \
.uffd_faults = _uffd_faults }, \
@@ -933,44 +938,51 @@ static struct test_desc tests[] = {
* (S1PTW).
*/
TEST_UFFD(guest_read64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_read_handler, uffd_pt_write_handler, 2),
- /* no_af should also lead to a PT write. */
+ uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_read64, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_read_handler, uffd_pt_write_handler, 2),
- /* Note how that cas invokes the read handler. */
+ uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_cas, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_read_handler, uffd_pt_write_handler, 2),
+ uffd_data_handler, uffd_pt_handler, 2),
/*
* Can't test guest_at with_af as it's IMPDEF whether the AF is set.
* The S1PTW fault should still be marked as a write.
*/
TEST_UFFD(guest_at, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_read_handler, uffd_pt_write_handler, 1),
+ uffd_no_handler, uffd_pt_handler, 1),
TEST_UFFD(guest_ld_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_read_handler, uffd_pt_write_handler, 2),
+ uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_write64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_write_handler, uffd_pt_write_handler, 2),
+ uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_dc_zva, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_write_handler, uffd_pt_write_handler, 2),
+ uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_st_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_write_handler, uffd_pt_write_handler, 2),
+ uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_exec, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
- uffd_data_read_handler, uffd_pt_write_handler, 2),
+ uffd_data_handler, uffd_pt_handler, 2),
/*
* Try accesses when the data and PT memory regions are both
* tracked for dirty logging.
*/
- TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log),
- /* no_af should also lead to a PT write. */
- TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log),
- TEST_DIRTY_LOG(guest_ld_preidx, with_af, guest_check_no_write_in_dirty_log),
- TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log),
- TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log),
- TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log),
- TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log),
- TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log),
- TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log),
+ TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log,
+ guest_check_no_s1ptw_wr_in_dirty_log),
+ TEST_DIRTY_LOG(guest_ld_preidx, with_af,
+ guest_check_no_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log,
+ guest_check_no_s1ptw_wr_in_dirty_log),
+ TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
/*
* Access when the data and PT memory regions are both marked for
@@ -980,29 +992,43 @@ static struct test_desc tests[] = {
* fault, and nothing in the dirty log. Any S1PTW should result in
* a write in the dirty log and a userfaultfd write.
*/
- TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af, uffd_data_read_handler, 2,
- guest_check_no_write_in_dirty_log),
- /* no_af should also lead to a PT write. */
- TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af, uffd_data_read_handler, 2,
- guest_check_no_write_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af, uffd_data_read_handler,
- 2, guest_check_no_write_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, 0, 1,
- guest_check_no_write_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af, uffd_data_read_handler, 2,
- guest_check_no_write_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af, uffd_data_write_handler,
- 2, guest_check_write_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af, uffd_data_read_handler, 2,
- guest_check_write_in_dirty_log),
- TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af, uffd_data_write_handler,
- 2, guest_check_write_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af,
+ uffd_data_handler, 2,
+ guest_check_no_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af,
+ uffd_data_handler, 2,
+ guest_check_no_write_in_dirty_log,
+ guest_check_no_s1ptw_wr_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af,
+ uffd_data_handler,
+ 2, guest_check_no_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, uffd_no_handler, 1,
+ guest_check_no_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af,
+ uffd_data_handler, 2,
+ guest_check_no_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af,
+ uffd_data_handler,
+ 2, guest_check_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af,
+ uffd_data_handler, 2,
+ guest_check_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
+ TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af,
+ uffd_data_handler,
+ 2, guest_check_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af,
- uffd_data_write_handler, 2,
- guest_check_write_in_dirty_log),
-
+ uffd_data_handler, 2,
+ guest_check_write_in_dirty_log,
+ guest_check_s1ptw_wr_in_dirty_log),
/*
- * Try accesses when the data memory region is marked read-only
+ * Access when both the PT and data regions are marked read-only
* (with KVM_MEM_READONLY). Writes with a syndrome result in an
* MMIO exit, writes with no syndrome (e.g., CAS) result in a
* failed vcpu run, and reads/execs with and without syndroms do
@@ -1018,7 +1044,7 @@ static struct test_desc tests[] = {
TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
/*
- * Access when both the data region is both read-only and marked
+ * The PT and data regions are both read-only and marked
* for dirty logging at the same time. The expected result is that
* for writes there should be no write in the dirty log. The
* readonly handling is the same as if the memslot was not marked
@@ -1043,7 +1069,7 @@ static struct test_desc tests[] = {
guest_check_no_write_in_dirty_log),
/*
- * Access when the data region is both read-only and punched with
+ * The PT and data regions are both read-only and punched with
* holes tracked with userfaultfd. The expected result is the
* union of both userfaultfd and read-only behaviors. For example,
* write accesses result in a userfaultfd write fault and an MMIO
@@ -1051,22 +1077,15 @@ static struct test_desc tests[] = {
* no userfaultfd write fault. Reads result in userfaultfd getting
* triggered.
*/
- TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0,
- uffd_data_read_handler, 2),
- TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0,
- uffd_data_read_handler, 2),
- TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0,
- uffd_no_handler, 1),
- TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0,
- uffd_data_read_handler, 2),
+ TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0, uffd_data_handler, 2),
+ TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0, uffd_data_handler, 2),
+ TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0, uffd_no_handler, 1),
+ TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0, uffd_data_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1,
- uffd_data_write_handler, 2),
- TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas,
- uffd_data_read_handler, 2),
- TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva,
- uffd_no_handler, 1),
- TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx,
- uffd_no_handler, 1),
+ uffd_data_handler, 2),
+ TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas, uffd_data_handler, 2),
+ TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva, uffd_no_handler, 1),
+ TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx, uffd_no_handler, 1),
{ 0 }
};
diff --git a/tools/testing/selftests/net/cmsg_ipv6.sh b/tools/testing/selftests/net/cmsg_ipv6.sh
index 2d89cb0ad288..330d0b1ceced 100755
--- a/tools/testing/selftests/net/cmsg_ipv6.sh
+++ b/tools/testing/selftests/net/cmsg_ipv6.sh
@@ -6,7 +6,7 @@ ksft_skip=4
NS=ns
IP6=2001:db8:1::1/64
TGT6=2001:db8:1::2
-TMPF=`mktemp`
+TMPF=$(mktemp --suffix ".pcap")
cleanup()
{
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 1c4f866de7d7..3d8e4ebda1b6 100755
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -914,14 +914,14 @@ sysctl_set()
local value=$1; shift
SYSCTL_ORIG[$key]=$(sysctl -n $key)
- sysctl -qw $key=$value
+ sysctl -qw $key="$value"
}
sysctl_restore()
{
local key=$1; shift
- sysctl -qw $key=${SYSCTL_ORIG["$key"]}
+ sysctl -qw $key="${SYSCTL_ORIG[$key]}"
}
forwarding_enable()
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index d11d3d566608..079f8f46849d 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -498,6 +498,12 @@ kill_events_pids()
kill_wait $evts_ns2_pid
}
+kill_tests_wait()
+{
+ kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
+ wait
+}
+
pm_nl_set_limits()
{
local ns=$1
@@ -1694,6 +1700,7 @@ chk_subflow_nr()
local subflow_nr=$3
local cnt1
local cnt2
+ local dump_stats
if [ -n "${need_title}" ]; then
printf "%03u %-36s %s" "${TEST_COUNT}" "${TEST_NAME}" "${msg}"
@@ -1711,7 +1718,12 @@ chk_subflow_nr()
echo "[ ok ]"
fi
- [ "${dump_stats}" = 1 ] && ( ss -N $ns1 -tOni ; ss -N $ns1 -tOni | grep token; ip -n $ns1 mptcp endpoint )
+ if [ "${dump_stats}" = 1 ]; then
+ ss -N $ns1 -tOni
+ ss -N $ns1 -tOni | grep token
+ ip -n $ns1 mptcp endpoint
+ dump_stats
+ fi
}
chk_link_usage()
@@ -3049,7 +3061,7 @@ endpoint_tests()
pm_nl_set_limits $ns1 2 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
- run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow 2>/dev/null &
wait_mpj $ns1
pm_nl_check_endpoint 1 "creation" \
@@ -3062,14 +3074,14 @@ endpoint_tests()
pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
pm_nl_check_endpoint 0 "modif is allowed" \
$ns2 10.0.2.2 id 1 flags signal
- wait
+ kill_tests_wait
fi
if reset "delete and re-add"; then
pm_nl_set_limits $ns1 1 1
pm_nl_set_limits $ns2 1 1
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
- run_tests $ns1 $ns2 10.0.1.1 4 0 0 slow &
+ run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
wait_mpj $ns2
pm_nl_del_endpoint $ns2 2 10.0.2.2
@@ -3079,7 +3091,7 @@ endpoint_tests()
pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
wait_mpj $ns2
chk_subflow_nr "" "after re-add" 2
- wait
+ kill_tests_wait
fi
}
diff --git a/tools/testing/selftests/net/test_vxlan_vnifiltering.sh b/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
index 704997ffc244..8c3ac0a72545 100755
--- a/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
+++ b/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
@@ -293,19 +293,11 @@ setup-vm() {
elif [[ -n $vtype && $vtype == "vnifilterg" ]]; then
# Add per vni group config with 'bridge vni' api
if [ -n "$group" ]; then
- if [ "$family" == "v4" ]; then
- if [ $mcast -eq 1 ]; then
- bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group $group
- else
- bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote $group
- fi
- else
- if [ $mcast -eq 1 ]; then
- bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group6 $group
- else
- bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote6 $group
- fi
- fi
+ if [ $mcast -eq 1 ]; then
+ bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group $group
+ else
+ bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote $group
+ fi
fi
fi
done
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index dc932fd65363..640bc43452fa 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -7,6 +7,7 @@ readonly GREEN='\033[0;92m'
readonly YELLOW='\033[0;33m'
readonly RED='\033[0;31m'
readonly NC='\033[0m' # No Color
+readonly TESTPORT=8000
readonly KSFT_PASS=0
readonly KSFT_FAIL=1
@@ -56,11 +57,26 @@ trap wake_children EXIT
run_one() {
local -r args=$@
+ local nr_socks=0
+ local i=0
+ local -r timeout=10
+
+ ./udpgso_bench_rx -p "$TESTPORT" &
+ ./udpgso_bench_rx -p "$TESTPORT" -t &
+
+ # Wait for the above test program to get ready to receive connections.
+ while [ "$i" -lt "$timeout" ]; do
+ nr_socks="$(ss -lnHi | grep -c "\*:${TESTPORT}")"
+ [ "$nr_socks" -eq 2 ] && break
+ i=$((i + 1))
+ sleep 1
+ done
+ if [ "$nr_socks" -ne 2 ]; then
+ echo "timed out while waiting for udpgso_bench_rx"
+ exit 1
+ fi
- ./udpgso_bench_rx &
- ./udpgso_bench_rx -t &
-
- ./udpgso_bench_tx ${args}
+ ./udpgso_bench_tx -p "$TESTPORT" ${args}
}
run_in_netns() {
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index 6a193425c367..4058c7451e70 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -250,7 +250,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size)
static void do_flush_udp(int fd)
{
static char rbuf[ETH_MAX_MTU];
- int ret, len, gso_size, budget = 256;
+ int ret, len, gso_size = 0, budget = 256;
len = cfg_read_all ? sizeof(rbuf) : 0;
while (budget--) {
@@ -336,6 +336,8 @@ static void parse_opts(int argc, char **argv)
cfg_verify = true;
cfg_read_all = true;
break;
+ default:
+ exit(1);
}
}
diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
index f1fdaa270291..477392715a9a 100644
--- a/tools/testing/selftests/net/udpgso_bench_tx.c
+++ b/tools/testing/selftests/net/udpgso_bench_tx.c
@@ -62,6 +62,7 @@ static int cfg_payload_len = (1472 * 42);
static int cfg_port = 8000;
static int cfg_runtime_ms = -1;
static bool cfg_poll;
+static int cfg_poll_loop_timeout_ms = 2000;
static bool cfg_segment;
static bool cfg_sendmmsg;
static bool cfg_tcp;
@@ -235,16 +236,17 @@ static void flush_errqueue_recv(int fd)
}
}
-static void flush_errqueue(int fd, const bool do_poll)
+static void flush_errqueue(int fd, const bool do_poll,
+ unsigned long poll_timeout, const bool poll_err)
{
if (do_poll) {
struct pollfd fds = {0};
int ret;
fds.fd = fd;
- ret = poll(&fds, 1, 500);
+ ret = poll(&fds, 1, poll_timeout);
if (ret == 0) {
- if (cfg_verbose)
+ if ((cfg_verbose) && (poll_err))
fprintf(stderr, "poll timeout\n");
} else if (ret < 0) {
error(1, errno, "poll");
@@ -254,6 +256,20 @@ static void flush_errqueue(int fd, const bool do_poll)
flush_errqueue_recv(fd);
}
+static void flush_errqueue_retry(int fd, unsigned long num_sends)
+{
+ unsigned long tnow, tstop;
+ bool first_try = true;
+
+ tnow = gettimeofday_ms();
+ tstop = tnow + cfg_poll_loop_timeout_ms;
+ do {
+ flush_errqueue(fd, true, tstop - tnow, first_try);
+ first_try = false;
+ tnow = gettimeofday_ms();
+ } while ((stat_zcopies != num_sends) && (tnow < tstop));
+}
+
static int send_tcp(int fd, char *data)
{
int ret, done = 0, count = 0;
@@ -413,7 +429,8 @@ static int send_udp_segment(int fd, char *data)
static void usage(const char *filepath)
{
- error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]",
+ error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] "
+ "[-L secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]",
filepath);
}
@@ -423,7 +440,7 @@ static void parse_opts(int argc, char **argv)
int max_len, hdrlen;
int c;
- while ((c = getopt(argc, argv, "46acC:D:Hl:mM:p:s:PS:tTuvz")) != -1) {
+ while ((c = getopt(argc, argv, "46acC:D:Hl:L:mM:p:s:PS:tTuvz")) != -1) {
switch (c) {
case '4':
if (cfg_family != PF_UNSPEC)
@@ -452,6 +469,9 @@ static void parse_opts(int argc, char **argv)
case 'l':
cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
break;
+ case 'L':
+ cfg_poll_loop_timeout_ms = strtoul(optarg, NULL, 10) * 1000;
+ break;
case 'm':
cfg_sendmmsg = true;
break;
@@ -490,6 +510,8 @@ static void parse_opts(int argc, char **argv)
case 'z':
cfg_zerocopy = true;
break;
+ default:
+ exit(1);
}
}
@@ -677,7 +699,7 @@ int main(int argc, char **argv)
num_sends += send_udp(fd, buf[i]);
num_msgs++;
if ((cfg_zerocopy && ((num_msgs & 0xF) == 0)) || cfg_tx_tstamp)
- flush_errqueue(fd, cfg_poll);
+ flush_errqueue(fd, cfg_poll, 500, true);
if (cfg_msg_nr && num_msgs >= cfg_msg_nr)
break;
@@ -696,7 +718,7 @@ int main(int argc, char **argv)
} while (!interrupted && (cfg_runtime_ms == -1 || tnow < tstop));
if (cfg_zerocopy || cfg_tx_tstamp)
- flush_errqueue(fd, true);
+ flush_errqueue_retry(fd, num_sends);
if (close(fd))
error(1, errno, "close");
diff --git a/tools/testing/selftests/vm/hugetlb-madvise.c b/tools/testing/selftests/vm/hugetlb-madvise.c
index a634f47d1e56..9a127a8fe176 100644
--- a/tools/testing/selftests/vm/hugetlb-madvise.c
+++ b/tools/testing/selftests/vm/hugetlb-madvise.c
@@ -17,7 +17,6 @@
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
-#define __USE_GNU
#include <fcntl.h>
#define MIN_FREE_PAGES 20
diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h
index 813baf13f62a..51a919083d9b 100644
--- a/tools/virtio/linux/bug.h
+++ b/tools/virtio/linux/bug.h
@@ -1,13 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef BUG_H
-#define BUG_H
+#ifndef _LINUX_BUG_H
+#define _LINUX_BUG_H
#include <asm/bug.h>
#define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
-#define BUILD_BUG_ON(x)
-
#define BUG() abort()
-#endif /* BUG_H */
+#endif /* _LINUX_BUG_H */
diff --git a/tools/virtio/linux/build_bug.h b/tools/virtio/linux/build_bug.h
new file mode 100644
index 000000000000..cdbb75e28a60
--- /dev/null
+++ b/tools/virtio/linux/build_bug.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILD_BUG_H
+#define _LINUX_BUILD_BUG_H
+
+#define BUILD_BUG_ON(x)
+
+#endif /* _LINUX_BUILD_BUG_H */
diff --git a/tools/virtio/linux/cpumask.h b/tools/virtio/linux/cpumask.h
new file mode 100644
index 000000000000..307da69d6b26
--- /dev/null
+++ b/tools/virtio/linux/cpumask.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CPUMASK_H
+#define _LINUX_CPUMASK_H
+
+#include <linux/kernel.h>
+
+#endif /* _LINUX_CPUMASK_H */
diff --git a/tools/virtio/linux/gfp.h b/tools/virtio/linux/gfp.h
new file mode 100644
index 000000000000..43d146f236f1
--- /dev/null
+++ b/tools/virtio/linux/gfp.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GFP_H
+#define __LINUX_GFP_H
+
+#include <linux/topology.h>
+
+#endif
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 21593bf97755..8b877167933d 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -10,6 +10,7 @@
#include <stdarg.h>
#include <linux/compiler.h>
+#include <linux/log2.h>
#include <linux/types.h>
#include <linux/overflow.h>
#include <linux/list.h>
diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h
new file mode 100644
index 000000000000..272b5aa285d5
--- /dev/null
+++ b/tools/virtio/linux/kmsan.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KMSAN_H
+#define _LINUX_KMSAN_H
+
+#include <linux/gfp.h>
+
+inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+#endif /* _LINUX_KMSAN_H */
diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h
index 369ee308b668..74d9e1825748 100644
--- a/tools/virtio/linux/scatterlist.h
+++ b/tools/virtio/linux/scatterlist.h
@@ -2,6 +2,7 @@
#ifndef SCATTERLIST_H
#define SCATTERLIST_H
#include <linux/kernel.h>
+#include <linux/bug.h>
struct scatterlist {
unsigned long page_link;
diff --git a/tools/virtio/linux/topology.h b/tools/virtio/linux/topology.h
new file mode 100644
index 000000000000..910794afb993
--- /dev/null
+++ b/tools/virtio/linux/topology.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TOPOLOGY_H
+#define _LINUX_TOPOLOGY_H
+
+#include <linux/cpumask.h>
+
+#endif /* _LINUX_TOPOLOGY_H */